From 2997a91250c4af915be70d2be38df1b3889c4c2d Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 30 Jul 2019 23:14:00 -0400 Subject: add changelog file --- changelog.d/5759.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5759.misc (limited to 'changelog.d') diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc new file mode 100644 index 0000000000..c0bc566c4c --- /dev/null +++ b/changelog.d/5759.misc @@ -0,0 +1 @@ +Allow devices to be marked as hidden, for use by features such as cross-signing. \ No newline at end of file -- cgit 1.4.1 From d28d1e2d1b056a0c9e2b9f2c92013515a56dd9fb Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 1 Aug 2019 21:52:35 -0400 Subject: add changelog --- changelog.d/5769.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5769.feature (limited to 'changelog.d') diff --git a/changelog.d/5769.feature b/changelog.d/5769.feature new file mode 100644 index 0000000000..c34257cb8f --- /dev/null +++ b/changelog.d/5769.feature @@ -0,0 +1 @@ +allow uploading of cross-signing keys \ No newline at end of file -- cgit 1.4.1 From 8c9adcc95dee892f90d6acbbe5c54acbf621720b Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 1 Aug 2019 22:09:05 -0400 Subject: fix formatting --- changelog.d/5769.feature | 2 +- tests/handlers/test_e2e_keys.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'changelog.d') diff --git a/changelog.d/5769.feature b/changelog.d/5769.feature index c34257cb8f..bf994ca327 100644 --- a/changelog.d/5769.feature +++ b/changelog.d/5769.feature @@ -1 +1 @@ -allow uploading of cross-signing keys \ No newline at end of file +Allow uploading of cross-signing keys. \ No newline at end of file diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 9ae4cb6ea2..a62c52eefa 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -176,7 +176,9 @@ class E2eKeysHandlerTestCase(unittest.TestCase): } yield self.handler.upload_signing_keys_for_user(local_user, keys2) - devices = yield self.handler.query_devices({"device_keys": {local_user: []}}, 0, local_user) + devices = yield self.handler.query_devices( + {"device_keys": {local_user: []}}, 0, local_user + ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) @defer.inlineCallbacks -- cgit 1.4.1 From f63ba7a7955d077224d4d602cd33bb31fad92fbc Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Mon, 12 Aug 2019 15:14:37 -0700 Subject: Cross-signing [1/4] -- hidden devices (#5759) * allow devices to be marked as "hidden" This is a prerequisite for cross-signing, as it allows us to create other things that live within the device namespace, so they can be used for signatures. --- changelog.d/5759.misc | 1 + synapse/storage/devices.py | 38 +++++++++++++++++----- synapse/storage/end_to_end_keys.py | 2 +- synapse/storage/schema/delta/56/hidden_devices.sql | 18 ++++++++++ 4 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5759.misc create mode 100644 synapse/storage/schema/delta/56/hidden_devices.sql (limited to 'changelog.d') diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc new file mode 100644 index 0000000000..c0bc566c4c --- /dev/null +++ b/changelog.d/5759.misc @@ -0,0 +1 @@ +Allow devices to be marked as hidden, for use by features such as cross-signing. \ No newline at end of file diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 8f72d92895..991e28ea24 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +22,7 @@ from canonicaljson import json from twisted.internet import defer -from synapse.api.errors import StoreError +from synapse.api.errors import Codes, StoreError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import Cache, SQLBaseStore, db_to_json from synapse.storage.background_updates import BackgroundUpdateStore @@ -36,7 +38,8 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( class DeviceWorkerStore(SQLBaseStore): def get_device(self, user_id, device_id): - """Retrieve a device. + """Retrieve a device. Only returns devices that are not marked as + hidden. Args: user_id (str): The ID of the user which owns the device @@ -48,14 +51,15 @@ class DeviceWorkerStore(SQLBaseStore): """ return self._simple_select_one( table="devices", - keyvalues={"user_id": user_id, "device_id": device_id}, + keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, retcols=("user_id", "device_id", "display_name"), desc="get_device", ) @defer.inlineCallbacks def get_devices_by_user(self, user_id): - """Retrieve all of a user's registered devices. + """Retrieve all of a user's registered devices. Only returns devices + that are not marked as hidden. Args: user_id (str): @@ -66,7 +70,7 @@ class DeviceWorkerStore(SQLBaseStore): """ devices = yield self._simple_select_list( table="devices", - keyvalues={"user_id": user_id}, + keyvalues={"user_id": user_id, "hidden": False}, retcols=("user_id", "device_id", "display_name"), desc="get_devices_by_user", ) @@ -540,6 +544,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): Returns: defer.Deferred: boolean whether the device was inserted or an existing device existed with that ID. + Raises: + StoreError: if the device is already in use """ key = (user_id, device_id) if self.device_id_exists_cache.get(key, None): @@ -552,12 +558,25 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): "user_id": user_id, "device_id": device_id, "display_name": initial_device_display_name, + "hidden": False, }, desc="store_device", or_ignore=True, ) + if not inserted: + # if the device already exists, check if it's a real device, or + # if the device ID is reserved by something else + hidden = yield self._simple_select_one_onecol( + "devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + retcol="hidden", + ) + if hidden: + raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN) self.device_id_exists_cache.prefill(key, True) return inserted + except StoreError: + raise except Exception as e: logger.error( "store_device with device_id=%s(%r) user_id=%s(%r)" @@ -584,7 +603,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): """ yield self._simple_delete_one( table="devices", - keyvalues={"user_id": user_id, "device_id": device_id}, + keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, desc="delete_device", ) @@ -604,14 +623,15 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): table="devices", column="device_id", iterable=device_ids, - keyvalues={"user_id": user_id}, + keyvalues={"user_id": user_id, "hidden": False}, desc="delete_devices", ) for device_id in device_ids: self.device_id_exists_cache.invalidate((user_id, device_id)) def update_device(self, user_id, device_id, new_display_name=None): - """Update a device. + """Update a device. Only updates the device if it is not marked as + hidden. Args: user_id (str): The ID of the user which owns the device @@ -630,7 +650,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): return defer.succeed(None) return self._simple_update_one( table="devices", - keyvalues={"user_id": user_id, "device_id": device_id}, + keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, updatevalues=updates, desc="update_device", ) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 1e07474e70..6f524cedd9 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -85,7 +85,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): " k.key_json" " FROM devices d" " %s JOIN e2e_device_keys_json k USING (user_id, device_id)" - " WHERE %s" + " WHERE %s AND NOT d.hidden" ) % ( "LEFT" if include_all_devices else "INNER", " OR ".join("(" + q + ")" for q in query_clauses), diff --git a/synapse/storage/schema/delta/56/hidden_devices.sql b/synapse/storage/schema/delta/56/hidden_devices.sql new file mode 100644 index 0000000000..67f8b20297 --- /dev/null +++ b/synapse/storage/schema/delta/56/hidden_devices.sql @@ -0,0 +1,18 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- device list needs to know which ones are "real" devices, and which ones are +-- just used to avoid collisions +ALTER TABLE devices ADD COLUMN hidden BOOLEAN DEFAULT FALSE; -- cgit 1.4.1 From b9d57502da8ae4e11523a155e0fd608433e1025d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Sep 2019 16:06:03 +0100 Subject: changelog --- changelog.d/6037.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6037.feature (limited to 'changelog.d') diff --git a/changelog.d/6037.feature b/changelog.d/6037.feature new file mode 100644 index 0000000000..95d82bd4d8 --- /dev/null +++ b/changelog.d/6037.feature @@ -0,0 +1 @@ +Handle userid clashes when authenticating via SAML by appending an integer suffix. \ No newline at end of file -- cgit 1.4.1 From 62e3ff92fd3228b5c34f6cee691e22f9b1f85c9e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 19 Sep 2019 10:53:14 +0100 Subject: Remove POST method from password reset submit_token endpoint (#6056) Removes the POST method from `/password_reset//submit_token/` as it's only used by phone number verification which Synapse does not support yet. --- changelog.d/6056.bugfix | 1 + synapse/rest/client/v2_alpha/account.py | 17 ----------------- 2 files changed, 1 insertion(+), 17 deletions(-) create mode 100644 changelog.d/6056.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6056.bugfix b/changelog.d/6056.bugfix new file mode 100644 index 0000000000..4d9573a58d --- /dev/null +++ b/changelog.d/6056.bugfix @@ -0,0 +1 @@ +Remove POST method from password reset submit_token endpoint until we implement submit_url functionality. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 2ea515d2f6..afaaeeacdd 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -272,23 +272,6 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.write(html.encode("utf-8")) finish_request(request) - @defer.inlineCallbacks - def on_POST(self, request, medium): - if medium != "email": - raise SynapseError( - 400, "This medium is currently not supported for password resets" - ) - - body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["sid", "client_secret", "token"]) - - valid, _ = yield self.store.validate_threepid_session( - body["sid"], body["client_secret"], body["token"], self.clock.time_msec() - ) - response_code = 200 if valid else 400 - - return response_code, {"success": valid} - class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") -- cgit 1.4.1 From 84a2743e2eaf5402cef8b68327efaf54daf64150 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Sep 2019 10:55:43 +0100 Subject: Add changelog --- changelog.d/6064.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6064.misc (limited to 'changelog.d') diff --git a/changelog.d/6064.misc b/changelog.d/6064.misc new file mode 100644 index 0000000000..28dc89111b --- /dev/null +++ b/changelog.d/6064.misc @@ -0,0 +1 @@ +Clean up the sample config for SAML authentication. -- cgit 1.4.1 From bcd91328692555d85df346c4571085c9b41b8f6a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Sep 2019 15:06:27 +0100 Subject: Undo the deletion of some tables (#6047) This is a partial revert of #5893. The problem is that if we drop these tables in the same release as removing the code that writes to them, it prevents users users from being able to roll back to a previous release. So let's leave the tables in place for now, and remember to drop them in a subsequent release. (Note that these tables haven't been *read* for *years*, so any missing rows resulting from a temporary upgrade to vNext won't cause a problem.) --- changelog.d/5893.misc | 2 +- changelog.d/6047.misc | 2 ++ .../schema/delta/56/drop_unused_event_tables.sql | 20 -------------------- 3 files changed, 3 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6047.misc delete mode 100644 synapse/storage/schema/delta/56/drop_unused_event_tables.sql (limited to 'changelog.d') diff --git a/changelog.d/5893.misc b/changelog.d/5893.misc index 07ee4888dc..5ef171cb3e 100644 --- a/changelog.d/5893.misc +++ b/changelog.d/5893.misc @@ -1 +1 @@ -Drop some unused tables. +Stop populating some unused tables. diff --git a/changelog.d/6047.misc b/changelog.d/6047.misc new file mode 100644 index 0000000000..a4cdb8abb3 --- /dev/null +++ b/changelog.d/6047.misc @@ -0,0 +1,2 @@ +Stop populating some unused tables. + diff --git a/synapse/storage/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql deleted file mode 100644 index 9f09922c67..0000000000 --- a/synapse/storage/schema/delta/56/drop_unused_event_tables.sql +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright 2019 The Matrix.org Foundation C.I.C. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- these tables are never used. -DROP TABLE IF EXISTS room_names; -DROP TABLE IF EXISTS topics; -DROP TABLE IF EXISTS history_visibility; -DROP TABLE IF EXISTS guest_access; -- cgit 1.4.1 From 35ce3bda7aaa6281f02123225ca63d913fa12df1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Sep 2019 15:06:48 +0100 Subject: Add some notes on rolling back to v1.3.1. (#6049) --- UPGRADE.rst | 25 +++++++++++++++++++++++++ changelog.d/6049.doc | 1 + 2 files changed, 26 insertions(+) create mode 100644 changelog.d/6049.doc (limited to 'changelog.d') diff --git a/UPGRADE.rst b/UPGRADE.rst index 5aaf804902..53f3af4ed1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -99,6 +99,31 @@ Synapse will expect these files to exist inside the configured template director default templates, see `synapse/res/templates `_. +Rolling back to v1.3.1 +---------------------- + +If you encounter problems with v1.4.0, it should be possible to roll back to +v1.3.1, subject to the following: + +* The 'room statistics' engine was heavily reworked in this release (see + `#5971 `_), including + significant changes to the database schema, which are not easily + reverted. This will cause the room statistics engine to stop updating when + you downgrade. + + The room statistics are essentially unused in v1.3.1 (in future versions of + Synapse, they will be used to populate the room directory), so there should + be no loss of functionality. However, the statistics engine will write errors + to the logs, which can be avoided by setting the following in `homeserver.yaml`: + + .. code:: yaml + + stats: + enabled: false + + Don't forget to re-enable it when you upgrade again, in preparation for its + use in the room directory! + Upgrading to v1.2.0 =================== diff --git a/changelog.d/6049.doc b/changelog.d/6049.doc new file mode 100644 index 0000000000..e0307bf5c1 --- /dev/null +++ b/changelog.d/6049.doc @@ -0,0 +1 @@ +Add some notes on rolling back to v1.3.1. -- cgit 1.4.1 From fe349b497e4b22bb409eb199b77479c5895af525 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Sep 2019 18:20:01 +0100 Subject: Update the upgrade notes (#6050) * make it clear that if you installed from a package manager, you should use that to upgrade * Document the new way of getting the server version (cf #4878) * Write some words about downgrading. --- UPGRADE.rst | 80 ++++++++++++++++++++++++++++++++++------------------ changelog.d/6050.doc | 1 + 2 files changed, 54 insertions(+), 27 deletions(-) create mode 100644 changelog.d/6050.doc (limited to 'changelog.d') diff --git a/UPGRADE.rst b/UPGRADE.rst index 53f3af4ed1..4ede973a08 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -2,52 +2,78 @@ Upgrading Synapse ================= Before upgrading check if any special steps are required to upgrade from the -what you currently have installed to current version of synapse. The extra +what you currently have installed to current version of Synapse. The extra instructions that may be required are listed later in this document. -1. If synapse was installed in a virtualenv then activate that virtualenv before - upgrading. If synapse is installed in a virtualenv in ``~/synapse/env`` then - run: +* If Synapse was installed using `prebuilt packages + `_, you will need to follow the normal process + for upgrading those packages. - .. code:: bash +* If Synapse was installed from source, then: - source ~/synapse/env/bin/activate - -2. If synapse was installed using pip then upgrade to the latest version by - running: + 1. Activate the virtualenv before upgrading. For example, if Synapse is + installed in a virtualenv in ``~/synapse/env`` then run: - .. code:: bash + .. code:: bash - pip install --upgrade matrix-synapse[all] + source ~/synapse/env/bin/activate - # restart synapse - synctl restart + 2. If Synapse was installed using pip then upgrade to the latest version by + running: + .. code:: bash - If synapse was installed using git then upgrade to the latest version by - running: + pip install --upgrade matrix-synapse - .. code:: bash + If Synapse was installed using git then upgrade to the latest version by + running: - # Pull the latest version of the master branch. + .. code:: bash + git pull + pip install --upgrade . - # Update synapse and its python dependencies. - pip install --upgrade .[all] + 3. Restart Synapse: - # restart synapse - ./synctl restart + .. code:: bash + ./synctl restart -To check whether your update was successful, you can check the Server header -returned by the Client-Server API: +To check whether your update was successful, you can check the running server +version with: .. code:: bash - # replace with the hostname of your synapse homeserver. - # You may need to specify a port (eg, :8448) if your server is not - # configured on port 443. - curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" + # you may need to replace 'localhost:8008' if synapse is not configured + # to listen on port 8008. + + curl http://localhost:8008/_synapse/admin/v1/server_version + +Rolling back to older versions +------------------------------ + +Rolling back to previous releases can be difficult, due to database schema +changes between releases. Where we have been able to test the rollback process, +this will be noted below. + +In general, you will need to undo any changes made during the upgrade process, +for example: + +* pip: + + .. code:: bash + + source env/bin/activate + # replace `1.3.0` accordingly: + pip install matrix-synapse==1.3.0 + +* Debian: + + .. code:: bash + + # replace `1.3.0` and `stretch` accordingly: + wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb + dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb Upgrading to v1.4.0 =================== diff --git a/changelog.d/6050.doc b/changelog.d/6050.doc new file mode 100644 index 0000000000..3d19c69bc4 --- /dev/null +++ b/changelog.d/6050.doc @@ -0,0 +1 @@ +Update the upgrade notes. -- cgit 1.4.1 From 599f786e4ee98050f399ff7f530a7208ce14468d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Sep 2019 18:52:17 +0100 Subject: Update 6037.feature --- changelog.d/6037.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/6037.feature b/changelog.d/6037.feature index 95d82bd4d8..85553d2da0 100644 --- a/changelog.d/6037.feature +++ b/changelog.d/6037.feature @@ -1 +1 @@ -Handle userid clashes when authenticating via SAML by appending an integer suffix. \ No newline at end of file +Make the process for mapping SAML2 users to matrix IDs more flexible. -- cgit 1.4.1 From b74606ea2262a717193f08bb6876459c1ee2d97d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Sep 2019 20:29:11 +0100 Subject: Fix a bug with saml attribute maps. Fixes a bug where the default attribute maps were prioritised over user-specified ones, resulting in incorrect mappings. The problem is that if you call SPConfig.load() multiple times, it adds new attribute mappers to a list. So by calling it with the default config first, and then the user-specified config, we would always get the default mappers before the user-specified mappers. To solve this, let's merge the config dicts first, and then pass them to SPConfig. --- changelog.d/6069.bugfix | 1 + synapse/config/saml2_config.py | 34 ++++++++++++++++++++++++++++------ synapse/util/module_loader.py | 20 +++++++++++++++++++- 3 files changed, 48 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6069.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6069.bugfix b/changelog.d/6069.bugfix new file mode 100644 index 0000000000..a437ac41a9 --- /dev/null +++ b/changelog.d/6069.bugfix @@ -0,0 +1 @@ +Fix a bug which caused SAML attribute maps to be overridden by defaults. diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 6a8161547a..14539fdb2a 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +13,29 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from synapse.python_dependencies import DependencyException, check_requirements +from synapse.util.module_loader import load_python_module from ._base import Config, ConfigError +def _dict_merge(merge_dict, into_dct): + for k, v in merge_dict.items(): + if k not in into_dct: + into_dct[k] = v + continue + + current_val = into_dct[k] + + if isinstance(v, dict) and isinstance(current_val, dict): + _dict_merge(v, current_val) + continue + + # otherwise we just overwrite + into_dct[k] = v + + class SAML2Config(Config): def read_config(self, config, **kwargs): self.saml2_enabled = False @@ -33,15 +52,18 @@ class SAML2Config(Config): self.saml2_enabled = True - import saml2.config - - self.saml2_sp_config = saml2.config.SPConfig() - self.saml2_sp_config.load(self._default_saml_config_dict()) - self.saml2_sp_config.load(saml2_config.get("sp_config", {})) + saml2_config_dict = self._default_saml_config_dict() + _dict_merge(saml2_config.get("sp_config", {}), saml2_config_dict) config_path = saml2_config.get("config_path", None) if config_path is not None: - self.saml2_sp_config.load_file(config_path) + mod = load_python_module(config_path) + _dict_merge(mod.CONFIG, saml2_config_dict) + + import saml2.config + + self.saml2_sp_config = saml2.config.SPConfig() + self.saml2_sp_config.load(saml2_config_dict) # session lifetime: in milliseconds self.saml2_session_lifetime = self.parse_duration( diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 522acd5aa8..7ff7eb1e4d 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -14,12 +14,13 @@ # limitations under the License. import importlib +import importlib.util from synapse.config._base import ConfigError def load_module(provider): - """ Loads a module with its config + """ Loads a synapse module with its config Take a dict with keys 'module' (the module name) and 'config' (the config dict). @@ -38,3 +39,20 @@ def load_module(provider): raise ConfigError("Failed to parse config for %r: %r" % (provider["module"], e)) return provider_class, provider_config + + +def load_python_module(location: str): + """Load a python module, and return a reference to its global namespace + + Args: + location (str): path to the module + + Returns: + python module object + """ + spec = importlib.util.spec_from_file_location(location, location) + if spec is None: + raise Exception("Unable to load module at %s" % (location,)) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod -- cgit 1.4.1 From 36015d68efccd2520ac0a569a5f8714544f6568c Mon Sep 17 00:00:00 2001 From: "J. Ryan Stinnett" Date: Thu, 19 Sep 2019 22:28:30 +0100 Subject: Use unstable prefix for 3PID unbind API (#6062) --- changelog.d/5980.feature | 2 +- changelog.d/6062.bugfix | 1 + synapse/rest/client/v2_alpha/account.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6062.bugfix (limited to 'changelog.d') diff --git a/changelog.d/5980.feature b/changelog.d/5980.feature index f25d8d81d9..e20117cf1c 100644 --- a/changelog.d/5980.feature +++ b/changelog.d/5980.feature @@ -1 +1 @@ -Add POST /_matrix/client/r0/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account. \ No newline at end of file +Add POST /_matrix/client/unstable/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account. diff --git a/changelog.d/6062.bugfix b/changelog.d/6062.bugfix new file mode 100644 index 0000000000..e20117cf1c --- /dev/null +++ b/changelog.d/6062.bugfix @@ -0,0 +1 @@ +Add POST /_matrix/client/unstable/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account. diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index afaaeeacdd..ce1487dbc5 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -529,7 +529,7 @@ class ThreepidRestServlet(RestServlet): class ThreepidUnbindRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/unbind$") + PATTERNS = client_patterns("/account/3pid/unbind$", releases=(), unstable=True) def __init__(self, hs): super(ThreepidUnbindRestServlet, self).__init__() -- cgit 1.4.1 From 2def5ea0da4b8134384adcd48e1e312f2f7e65c9 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Thu, 19 Sep 2019 22:29:47 +0100 Subject: Docker: support SYNAPSE_WORKER envvar (#6058) * Allow passing SYNAPSE_WORKER envvar * changelog.d * Document SYNAPSE_WORKER. Attempting to imply that you don't need to change this default unless you're in worker mode. Also aware that there's a bigger problem of attempting to document a complete working configuration of workers using docker, as we currently only document to use `synctl` for worker mode, and synctl doesn't work that way in docker. --- changelog.d/6058.docker | 1 + docker/README.md | 2 ++ docker/start.py | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6058.docker (limited to 'changelog.d') diff --git a/changelog.d/6058.docker b/changelog.d/6058.docker new file mode 100644 index 0000000000..30be6933c9 --- /dev/null +++ b/changelog.d/6058.docker @@ -0,0 +1 @@ +Provide SYNAPSE_WORKER envvar to specify python module. diff --git a/docker/README.md b/docker/README.md index d5879c2f2c..4b712f3f5c 100644 --- a/docker/README.md +++ b/docker/README.md @@ -89,6 +89,8 @@ The following environment variables are supported in run mode: `/data`. * `SYNAPSE_CONFIG_PATH`: path to the config file. Defaults to `/homeserver.yaml`. +* `SYNAPSE_WORKER`: module to execute, used when running synapse with workers. + Defaults to `synapse.app.homeserver`, which is suitable for non-worker mode. * `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`. * `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`. diff --git a/docker/start.py b/docker/start.py index 260f2d9943..e41ea20e70 100755 --- a/docker/start.py +++ b/docker/start.py @@ -182,6 +182,7 @@ def main(args, environ): mode = args[1] if len(args) > 1 else None desired_uid = int(environ.get("UID", "991")) desired_gid = int(environ.get("GID", "991")) + synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver") if (desired_uid == os.getuid()) and (desired_gid == os.getgid()): ownership = None else: @@ -245,7 +246,7 @@ def main(args, environ): log("Starting synapse with config file " + config_path) - args = ["python", "-m", "synapse.app.homeserver", "--config-path", config_path] + args = ["python", "-m", synapse_worker, "--config-path", config_path] if ownership is not None: args = ["su-exec", ownership] + args os.execv("/sbin/su-exec", args) -- cgit 1.4.1 From 3ac614eb6c294b7f77dde123f85ddaf3a389e3b8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 20 Sep 2019 10:46:34 +0100 Subject: Drop support for bind param on POST /account/3pid (MSC2290) (#6067) As per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290/files#diff-05cde9463e9209b701312b3baf2fb2ebR151), we're dropping the bind parameter from `/account/3pid`. This endpoint can now only be used for adding threepid's to the user's account on the homeserver. --- changelog.d/6067.feature | 1 + synapse/rest/client/v2_alpha/account.py | 4 ---- sytest-blacklist | 9 +++++++++ 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6067.feature (limited to 'changelog.d') diff --git a/changelog.d/6067.feature b/changelog.d/6067.feature new file mode 100644 index 0000000000..72685961c9 --- /dev/null +++ b/changelog.d/6067.feature @@ -0,0 +1 @@ +Remove `bind` parameter from Client Server POST `/account` endpoint as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290/). \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ce1487dbc5..1791f4d79b 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -521,10 +521,6 @@ class ThreepidRestServlet(RestServlet): user_id, threepid["medium"], threepid["address"], threepid["validated_at"] ) - if "bind" in body and body["bind"]: - logger.debug("Binding threepid %s to %s", threepid, user_id) - yield self.identity_handler.bind_threepid(threepid_creds, user_id) - return 200, {} diff --git a/sytest-blacklist b/sytest-blacklist index 11785fd43f..04698cb068 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -29,3 +29,12 @@ Enabling an unknown default rule fails with 404 # Blacklisted due to https://github.com/matrix-org/synapse/issues/1663 New federated private chats get full presence information (SYN-115) + +# Blacklisted temporarily due to https://github.com/matrix-org/matrix-doc/pull/2290 +# These sytests need to be updated with new endpoints, which will come in a later PR +# That PR will also remove this blacklist +Can bind 3PID via home server +Can bind and unbind 3PID via homeserver +3PIDs are unbound after account deactivation +Can bind and unbind 3PID via /unbind by specifying the identity server +Can bind and unbind 3PID via /unbind without specifying the identity server -- cgit 1.4.1 From aeb40f355c8590855eeca05b49bfff2b91faa85b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 20 Sep 2019 10:46:59 +0100 Subject: Ensure email validation link parameters are URL-encoded (#6063) The validation links sent via email had their query parameters inserted without any URL-encoding. Surprisingly this didn't seem to cause any issues, but if a user were to put a `/` in their client_secret it could lead to problems. --- changelog.d/6063.bugfix | 1 + synapse/push/mailer.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6063.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6063.bugfix b/changelog.d/6063.bugfix new file mode 100644 index 0000000000..7485e32a2c --- /dev/null +++ b/changelog.d/6063.bugfix @@ -0,0 +1 @@ +Ensure query parameters in email validation links are URL-encoded. \ No newline at end of file diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 3dfd527849..2437235dc4 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -136,10 +136,11 @@ class Mailer(object): group together multiple email sending attempts sid (str): The generated session ID """ + params = {"token": token, "client_secret": client_secret, "sid": sid} link = ( self.hs.config.public_baseurl - + "_matrix/client/unstable/password_reset/email/submit_token" - "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid) + + "_matrix/client/unstable/password_reset/email/submit_token?%s" + % urllib.parse.urlencode(params) ) template_vars = {"link": link} @@ -163,10 +164,11 @@ class Mailer(object): group together multiple email sending attempts sid (str): The generated session ID """ + params = {"token": token, "client_secret": client_secret, "sid": sid} link = ( self.hs.config.public_baseurl - + "_matrix/client/unstable/registration/email/submit_token" - "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid) + + "_matrix/client/unstable/registration/email/submit_token?%s" + % urllib.parse.urlencode(params) ) template_vars = {"link": link} -- cgit 1.4.1 From 9d94313209fdb2141189c927cb1f81fea6feb5e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Sep 2019 12:05:00 +0100 Subject: Fix exception when resetting retry timings Fixes: > TypeError: set_destination_retry_timings() missing 1 required positional argument: 'retry_interval' Introduced in #6016. --- changelog.d/6072.misc | 1 + synapse/federation/transport/server.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6072.misc (limited to 'changelog.d') diff --git a/changelog.d/6072.misc b/changelog.d/6072.misc new file mode 100644 index 0000000000..91cf164714 --- /dev/null +++ b/changelog.d/6072.misc @@ -0,0 +1 @@ +Add a 'failure_ts' column to the 'destinations' database table. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7dc696c7ae..7f8a16e355 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -165,7 +165,7 @@ class Authenticator(object): async def _reset_retry_timings(self, origin): try: logger.info("Marking origin %r as up", origin) - await self.store.set_destination_retry_timings(origin, 0, 0) + await self.store.set_destination_retry_timings(origin, None, 0, 0) except Exception: logger.exception("Error resetting retry timings on %s", origin) -- cgit 1.4.1 From 7763dd3e9592909cfe3d7763f4a68b8135fc2bdc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 20 Sep 2019 14:58:37 +0100 Subject: Remove trailing slash ability from password reset's submit_token endpoint (#6074) Remove trailing slash ability from the password reset submit_token endpoint. Since we provide the link in an email, and have never sent it with a trailing slash, there's no point for us to accept them on the endpoint. --- changelog.d/6074.feature | 1 + synapse/rest/client/v2_alpha/account.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6074.feature (limited to 'changelog.d') diff --git a/changelog.d/6074.feature b/changelog.d/6074.feature new file mode 100644 index 0000000000..b7aa9c99d8 --- /dev/null +++ b/changelog.d/6074.feature @@ -0,0 +1 @@ +Prevent password reset's submit_token endpoint from accepting trailing slashes. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 1791f4d79b..3c5b23dc80 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -200,7 +200,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): """Handles 3PID validation token submission""" PATTERNS = client_patterns( - "/password_reset/(?P[^/]*)/submit_token/*$", releases=(), unstable=True + "/password_reset/(?P[^/]*)/submit_token$", releases=(), unstable=True ) def __init__(self, hs): -- cgit 1.4.1 From df3401a71d78088da36a03c73d35bc116c712df6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 20 Sep 2019 15:21:30 +0100 Subject: Allow HS to send emails when adding an email to the HS (#6042) --- changelog.d/6042.feature | 1 + docs/sample_config.yaml | 12 ++ synapse/config/emailconfig.py | 36 ++++ synapse/handlers/identity.py | 17 +- synapse/push/mailer.py | 29 +++ synapse/res/templates/add_threepid.html | 9 + synapse/res/templates/add_threepid.txt | 6 + synapse/res/templates/add_threepid_failure.html | 8 + synapse/res/templates/add_threepid_success.html | 6 + synapse/rest/client/v2_alpha/account.py | 252 ++++++++++++++++++++---- synapse/rest/client/v2_alpha/register.py | 24 +-- synapse/storage/registration.py | 31 ++- 12 files changed, 359 insertions(+), 72 deletions(-) create mode 100644 changelog.d/6042.feature create mode 100644 synapse/res/templates/add_threepid.html create mode 100644 synapse/res/templates/add_threepid.txt create mode 100644 synapse/res/templates/add_threepid_failure.html create mode 100644 synapse/res/templates/add_threepid_success.html (limited to 'changelog.d') diff --git a/changelog.d/6042.feature b/changelog.d/6042.feature new file mode 100644 index 0000000000..a737760363 --- /dev/null +++ b/changelog.d/6042.feature @@ -0,0 +1 @@ +Allow homeserver to handle or delegate email validation when adding an email to a user's account. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 3e4edc6b0b..61d9f09a99 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1261,6 +1261,12 @@ password_config: # #registration_template_html: registration.html # #registration_template_text: registration.txt # +# # Templates for validation emails sent by the homeserver when adding an email to +# # your user account +# # +# #add_threepid_template_html: add_threepid.html +# #add_threepid_template_text: add_threepid.txt +# # # Templates for password reset success and failure pages that a user # # will see after attempting to reset their password # # @@ -1272,6 +1278,12 @@ password_config: # # # #registration_template_success_html: registration_success.html # #registration_template_failure_html: registration_failure.html +# +# # Templates for success and failure pages that a user will see after attempting +# # to add an email or phone to their account +# # +# #add_threepid_success_html: add_threepid_success.html +# #add_threepid_failure_html: add_threepid_failure.html #password_providers: diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index e5de768b0c..d9b43de660 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -169,12 +169,22 @@ class EmailConfig(Config): self.email_registration_template_text = email_config.get( "registration_template_text", "registration.txt" ) + self.email_add_threepid_template_html = email_config.get( + "add_threepid_template_html", "add_threepid.html" + ) + self.email_add_threepid_template_text = email_config.get( + "add_threepid_template_text", "add_threepid.txt" + ) + self.email_password_reset_template_failure_html = email_config.get( "password_reset_template_failure_html", "password_reset_failure.html" ) self.email_registration_template_failure_html = email_config.get( "registration_template_failure_html", "registration_failure.html" ) + self.email_add_threepid_template_failure_html = email_config.get( + "add_threepid_template_failure_html", "add_threepid_failure.html" + ) # These templates do not support any placeholder variables, so we # will read them from disk once during setup @@ -184,6 +194,9 @@ class EmailConfig(Config): email_registration_template_success_html = email_config.get( "registration_template_success_html", "registration_success.html" ) + email_add_threepid_template_success_html = email_config.get( + "add_threepid_template_success_html", "add_threepid_success.html" + ) # Check templates exist for f in [ @@ -191,9 +204,14 @@ class EmailConfig(Config): self.email_password_reset_template_text, self.email_registration_template_html, self.email_registration_template_text, + self.email_add_threepid_template_html, + self.email_add_threepid_template_text, self.email_password_reset_template_failure_html, + self.email_registration_template_failure_html, + self.email_add_threepid_template_failure_html, email_password_reset_template_success_html, email_registration_template_success_html, + email_add_threepid_template_success_html, ]: p = os.path.join(self.email_template_dir, f) if not os.path.isfile(p): @@ -212,6 +230,12 @@ class EmailConfig(Config): self.email_registration_template_success_html_content = self.read_file( filepath, "email.registration_template_success_html" ) + filepath = os.path.join( + self.email_template_dir, email_add_threepid_template_success_html + ) + self.email_add_threepid_template_success_html_content = self.read_file( + filepath, "email.add_threepid_template_success_html" + ) if self.email_enable_notifs: required = [ @@ -328,6 +352,12 @@ class EmailConfig(Config): # #registration_template_html: registration.html # #registration_template_text: registration.txt # + # # Templates for validation emails sent by the homeserver when adding an email to + # # your user account + # # + # #add_threepid_template_html: add_threepid.html + # #add_threepid_template_text: add_threepid.txt + # # # Templates for password reset success and failure pages that a user # # will see after attempting to reset their password # # @@ -339,6 +369,12 @@ class EmailConfig(Config): # # # #registration_template_success_html: registration_success.html # #registration_template_failure_html: registration_failure.html + # + # # Templates for success and failure pages that a user will see after attempting + # # to add an email or phone to their account + # # + # #add_threepid_success_html: add_threepid_success.html + # #add_threepid_failure_html: add_threepid_failure.html """ diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 512f38e5a6..156719e308 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -81,11 +81,10 @@ class IdentityHandler(BaseHandler): given identity server Args: - id_server (str|None): The identity server to validate 3PIDs against. If None, - we will attempt to extract id_server creds + id_server (str): The identity server to validate 3PIDs against. Must be a + complete URL including the protocol (http(s)://) creds (dict[str, str]): Dictionary containing the following keys: - * id_server|idServer: An optional domain name of an identity server * client_secret|clientSecret: A unique secret str provided by the client * sid: The ID of the validation session @@ -104,20 +103,10 @@ class IdentityHandler(BaseHandler): raise SynapseError( 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM ) - if not id_server: - # Attempt to get the id_server from the creds dict - id_server = creds.get("id_server") or creds.get("idServer") - if not id_server: - raise SynapseError( - 400, "Missing param id_server in creds", errcode=Codes.MISSING_PARAM - ) query_params = {"sid": session_id, "client_secret": client_secret} - url = "https://%s%s" % ( - id_server, - "/_matrix/identity/api/v1/3pid/getValidated3pid", - ) + url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" data = yield self.http_client.get_json(url, query_params) return data if "medium" in data else None diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 2437235dc4..5a4fc78b4c 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -179,6 +179,35 @@ class Mailer(object): template_vars, ) + @defer.inlineCallbacks + def send_add_threepid_mail(self, email_address, token, client_secret, sid): + """Send an email with a validation link to a user for adding a 3pid to their account + + Args: + email_address (str): Email address we're sending the validation link to + + token (str): Unique token generated by the server to verify the email was received + + client_secret (str): Unique token generated by the client to group together + multiple email sending attempts + + sid (str): The generated session ID + """ + params = {"token": token, "client_secret": client_secret, "sid": sid} + link = ( + self.hs.config.public_baseurl + + "_matrix/client/unstable/add_threepid/email/submit_token?%s" + % urllib.parse.urlencode(params) + ) + + template_vars = {"link": link} + + yield self.send_email( + email_address, + "[%s] Validate Your Email" % self.hs.config.server_name, + template_vars, + ) + @defer.inlineCallbacks def send_notification_mail( self, app_id, user_id, email_address, push_actions, reason diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html new file mode 100644 index 0000000000..cc4ab07e09 --- /dev/null +++ b/synapse/res/templates/add_threepid.html @@ -0,0 +1,9 @@ + + +

A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:

+ + {{ link }} + +

If this was not you, you can safely ignore this email. Thank you.

+ + diff --git a/synapse/res/templates/add_threepid.txt b/synapse/res/templates/add_threepid.txt new file mode 100644 index 0000000000..a60c1ff659 --- /dev/null +++ b/synapse/res/templates/add_threepid.txt @@ -0,0 +1,6 @@ +A request to add an email address to your Matrix account has been received. If this was you, +please click the link below to confirm adding this email: + +{{ link }} + +If this was not you, you can safely ignore this email. Thank you. diff --git a/synapse/res/templates/add_threepid_failure.html b/synapse/res/templates/add_threepid_failure.html new file mode 100644 index 0000000000..441d11c846 --- /dev/null +++ b/synapse/res/templates/add_threepid_failure.html @@ -0,0 +1,8 @@ + + + +

The request failed for the following reason: {{ failure_reason }}.

+ +

No changes have been made to your account.

+ + diff --git a/synapse/res/templates/add_threepid_success.html b/synapse/res/templates/add_threepid_success.html new file mode 100644 index 0000000000..fbd6e4018f --- /dev/null +++ b/synapse/res/templates/add_threepid_success.html @@ -0,0 +1,6 @@ + + + +

Your email has now been validated, please return to your client. You may now close this window.

+ + diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 3c5b23dc80..1139bb156c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -21,7 +21,12 @@ from six.moves import http_client from twisted.internet import defer from synapse.api.constants import LoginType -from synapse.api.errors import Codes, SynapseError, ThreepidValidationError +from synapse.api.errors import ( + Codes, + HttpResponseException, + SynapseError, + ThreepidValidationError, +) from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import finish_request from synapse.http.servlet import ( @@ -103,16 +108,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - # Have the configured identity server handle the request - if not self.hs.config.account_threepid_delegate_email: - logger.warn( - "No upstream email account_threepid_delegate configured on the server to " - "handle this request" - ) - raise SynapseError( - 400, "Password reset by email is not supported on this homeserver" - ) + assert self.hs.config.account_threepid_delegate_email + # Have the configured identity server handle the request ret = yield self.identity_handler.requestEmailToken( self.hs.config.account_threepid_delegate_email, email, @@ -214,6 +212,11 @@ class PasswordResetSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastore() + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + self.failure_email_template, = load_jinja2_templates( + self.config.email_template_dir, + [self.config.email_password_reset_template_failure_html], + ) @defer.inlineCallbacks def on_GET(self, request, medium): @@ -261,13 +264,8 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.setResponseCode(e.code) # Show a failure page with a reason - html_template, = load_jinja2_templates( - self.config.email_template_dir, - [self.config.email_password_reset_template_failure_html], - ) - template_vars = {"failure_reason": e.msg} - html = html_template.render(**template_vars) + html = self.failure_email_template.render(**template_vars) request.write(html.encode("utf-8")) finish_request(request) @@ -399,13 +397,35 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_handlers().identity_handler self.store = self.hs.get_datastore() + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + template_html, template_text = load_jinja2_templates( + self.config.email_template_dir, + [ + self.config.email_add_threepid_template_html, + self.config.email_add_threepid_template_text, + ], + public_baseurl=self.config.public_baseurl, + ) + self.mailer = Mailer( + hs=self.hs, + app_name=self.config.email_app_name, + template_html=template_html, + template_text=template_text, + ) + @defer.inlineCallbacks def on_POST(self, request): + if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.local_threepid_handling_disabled_due_to_email_config: + logger.warn( + "Adding emails have been disabled due to lack of an email config" + ) + raise SynapseError( + 400, "Adding an email to your account is disabled on this server" + ) + body = parse_json_object_from_request(request) - assert_params_in_dict( - body, ["id_server", "client_secret", "email", "send_attempt"] - ) - id_server = "https://" + body["id_server"] # Assume https + assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) client_secret = body["client_secret"] email = body["email"] send_attempt = body["send_attempt"] @@ -425,9 +445,30 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): if existing_user_id is not None: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - ret = yield self.identity_handler.requestEmailToken( - id_server, email, client_secret, send_attempt, next_link - ) + if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = yield self.identity_handler.requestEmailToken( + self.hs.config.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send threepid validation emails from Synapse + sid = yield self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_add_threepid_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} + return 200, ret @@ -471,9 +512,86 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): ret = yield self.identity_handler.requestMsisdnToken( id_server, country, phone_number, client_secret, send_attempt, next_link ) + return 200, ret +class AddThreepidSubmitTokenServlet(RestServlet): + """Handles 3PID validation token submission for adding an email to a user's account""" + + PATTERNS = client_patterns( + "/add_threepid/email/submit_token$", releases=(), unstable=True + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super().__init__() + self.config = hs.config + self.clock = hs.get_clock() + self.store = hs.get_datastore() + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + self.failure_email_template, = load_jinja2_templates( + self.config.email_template_dir, + [self.config.email_add_threepid_template_failure_html], + ) + + @defer.inlineCallbacks + def on_GET(self, request): + if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.local_threepid_handling_disabled_due_to_email_config: + logger.warn( + "Adding emails have been disabled due to lack of an email config" + ) + raise SynapseError( + 400, "Adding an email to your account is disabled on this server" + ) + elif self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + raise SynapseError( + 400, + "This homeserver is not validating threepids. Use an identity server " + "instead.", + ) + + sid = parse_string(request, "sid", required=True) + client_secret = parse_string(request, "client_secret", required=True) + token = parse_string(request, "token", required=True) + + # Attempt to validate a 3PID session + try: + # Mark the session as valid + next_link = yield self.store.validate_threepid_session( + sid, client_secret, token, self.clock.time_msec() + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warn( + "Not redirecting to next_link as it is a local file: address" + ) + else: + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + return None + + # Otherwise show the success template + html = self.config.email_add_threepid_template_success_html_content + request.setResponseCode(200) + except ThreepidValidationError as e: + request.setResponseCode(e.code) + + # Show a failure page with a reason + template_vars = {"failure_reason": e.msg} + html = self.failure_email_template.render(**template_vars) + + request.write(html.encode("utf-8")) + finish_request(request) + + class ThreepidRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid$") @@ -495,6 +613,8 @@ class ThreepidRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() body = parse_json_object_from_request(request) threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds") @@ -502,26 +622,85 @@ class ThreepidRestServlet(RestServlet): raise SynapseError( 400, "Missing param three_pid_creds", Codes.MISSING_PARAM ) + assert_params_in_dict(threepid_creds, ["client_secret", "sid"]) - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + client_secret = threepid_creds["client_secret"] + sid = threepid_creds["sid"] - # Specify None as the identity server to retrieve it from the request body instead - threepid = yield self.identity_handler.threepid_from_creds(None, threepid_creds) + # We don't actually know which medium this 3PID is. Thus we first assume it's email, + # and if validation fails we try msisdn + validation_session = None - if not threepid: - raise SynapseError(400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED) + # Try to validate as email + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Ask our delegated email identity server + try: + validation_session = yield self.identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_email, threepid_creds + ) + except HttpResponseException: + logger.debug( + "%s reported non-validated threepid: %s", + self.hs.config.account_threepid_delegate_email, + threepid_creds, + ) + elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + # Get a validated session matching these details + validation_session = yield self.datastore.get_threepid_validation_session( + "email", client_secret, sid=sid, validated=True + ) - for reqd in ["medium", "address", "validated_at"]: - if reqd not in threepid: - logger.warn("Couldn't add 3pid: invalid response from ID server") - raise SynapseError(500, "Invalid response from ID Server") + # Old versions of Sydent return a 200 http code even on a failed validation check. + # Thus, in addition to the HttpResponseException check above (which checks for + # non-200 errors), we need to make sure validation_session isn't actually an error, + # identified by containing an "error" key + # See https://github.com/matrix-org/sydent/issues/215 for details + if validation_session and "error" not in validation_session: + yield self._add_threepid_to_account(user_id, validation_session) + return 200, {} - yield self.auth_handler.add_threepid( - user_id, threepid["medium"], threepid["address"], threepid["validated_at"] + # Try to validate as msisdn + if self.hs.config.account_threepid_delegate_msisdn: + # Ask our delegated msisdn identity server + try: + validation_session = yield self.identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_msisdn, threepid_creds + ) + except HttpResponseException: + logger.debug( + "%s reported non-validated threepid: %s", + self.hs.config.account_threepid_delegate_email, + threepid_creds, + ) + + # Check that validation_session isn't actually an error due to old Sydent instances + # See explanatory comment above + if validation_session and "error" not in validation_session: + yield self._add_threepid_to_account(user_id, validation_session) + return 200, {} + + raise SynapseError( + 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED ) - return 200, {} + @defer.inlineCallbacks + def _add_threepid_to_account(self, user_id, validation_session): + """Add a threepid wrapped in a validation_session dict to an account + + Args: + user_id (str): The mxid of the user to add this 3PID to + + validation_session (dict): A dict containing the following: + * medium - medium of the threepid + * address - address of the threepid + * validated_at - timestamp of when the validation occurred + """ + yield self.auth_handler.add_threepid( + user_id, + validation_session["medium"], + validation_session["address"], + validation_session["validated_at"], + ) class ThreepidUnbindRestServlet(RestServlet): @@ -613,6 +792,7 @@ def register_servlets(hs, http_server): DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) + AddThreepidSubmitTokenServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5c7a5f3579..34276ea3fa 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -131,15 +131,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - if not self.hs.config.account_threepid_delegate_email: - logger.warn( - "No upstream email account_threepid_delegate configured on the server to " - "handle this request" - ) - raise SynapseError( - 400, "Registration by email is not supported on this homeserver" - ) + assert self.hs.config.account_threepid_delegate_email + # Have the configured identity server handle the request ret = yield self.identity_handler.requestEmailToken( self.hs.config.account_threepid_delegate_email, email, @@ -246,6 +240,12 @@ class RegistrationSubmitTokenServlet(RestServlet): self.clock = hs.get_clock() self.store = hs.get_datastore() + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + self.failure_email_template, = load_jinja2_templates( + self.config.email_template_dir, + [self.config.email_registration_template_failure_html], + ) + @defer.inlineCallbacks def on_GET(self, request, medium): if medium != "email": @@ -289,17 +289,11 @@ class RegistrationSubmitTokenServlet(RestServlet): request.setResponseCode(200) except ThreepidValidationError as e: - # Show a failure page with a reason request.setResponseCode(e.code) # Show a failure page with a reason - html_template, = load_jinja2_templates( - self.config.email_template_dir, - [self.config.email_registration_template_failure_html], - ) - template_vars = {"failure_reason": e.msg} - html = html_template.render(**template_vars) + html = self.failure_email_template.render(**template_vars) request.write(html.encode("utf-8")) finish_request(request) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 109052fa41..da27ad76b6 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -24,7 +24,7 @@ from six.moves import range from twisted.internet import defer from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, StoreError, ThreepidValidationError +from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore @@ -661,18 +661,31 @@ class RegistrationWorkerStore(SQLBaseStore): medium (str|None): The medium of the 3PID address (str|None): The address of the 3PID sid (str|None): The ID of the validation session - client_secret (str|None): A unique string provided by the client to - help identify this validation attempt + client_secret (str): A unique string provided by the client to help identify this + validation attempt validated (bool|None): Whether sessions should be filtered by whether they have been validated already or not. None to perform no filtering Returns: - deferred {str, int}|None: A dict containing the - latest session_id and send_attempt count for this 3PID. - Otherwise None if there hasn't been a previous attempt + Deferred[dict|None]: A dict containing the following: + * address - address of the 3pid + * medium - medium of the 3pid + * client_secret - a secret provided by the client for this validation session + * session_id - ID of the validation session + * send_attempt - a number serving to dedupe send attempts for this session + * validated_at - timestamp of when this session was validated if so + + Otherwise None if a validation session is not found """ - keyvalues = {"medium": medium, "client_secret": client_secret} + if not client_secret: + raise SynapseError( + 400, "Missing parameter: client_secret", errcode=Codes.MISSING_PARAM + ) + + keyvalues = {"client_secret": client_secret} + if medium: + keyvalues["medium"] = medium if address: keyvalues["address"] = address if sid: @@ -1209,6 +1222,10 @@ class RegistrationStore( current_ts (int): The current unix time in milliseconds. Used for checking token expiry status + Raises: + ThreepidValidationError: if a matching validation token was not found or has + expired + Returns: deferred str|None: A str representing a link to redirect the user to if there is one. -- cgit 1.4.1 From 885a4726b7f9cdf02187b92b43f639e2cbfbb12e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 15:37:23 +0200 Subject: Return timeout error to user for identity server calls (#6073) --- changelog.d/6073.feature | 1 + synapse/handlers/identity.py | 16 ++++++++++++++-- synapse/handlers/room_member.py | 32 +++++++++++++++++++++++--------- 3 files changed, 38 insertions(+), 11 deletions(-) create mode 100644 changelog.d/6073.feature (limited to 'changelog.d') diff --git a/changelog.d/6073.feature b/changelog.d/6073.feature new file mode 100644 index 0000000000..15d9933891 --- /dev/null +++ b/changelog.d/6073.feature @@ -0,0 +1 @@ +Return a clearer error message when a timeout occurs when attempting to contact an identity server. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 156719e308..cd4700b521 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -22,6 +22,7 @@ import logging from canonicaljson import json from twisted.internet import defer +from twisted.internet.error import TimeoutError from synapse.api.errors import ( CodeMessageException, @@ -108,7 +109,10 @@ class IdentityHandler(BaseHandler): url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" - data = yield self.http_client.get_json(url, query_params) + try: + data = yield self.http_client.get_json(url, query_params) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") return data if "medium" in data else None @defer.inlineCallbacks @@ -171,6 +175,8 @@ class IdentityHandler(BaseHandler): if e.code != 404 or not use_v2: logger.error("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json.loads(e.msg) # XXX WAT? return data @@ -261,7 +267,9 @@ class IdentityHandler(BaseHandler): logger.warn("Received %d response while unbinding threepid", e.code) else: logger.error("Failed to unbind threepid on identity server: %s", e) - raise SynapseError(502, "Failed to contact identity server") + raise SynapseError(500, "Failed to contact identity server") + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") yield self.store.remove_user_bound_threepid( user_id=mxid, @@ -394,6 +402,8 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") @defer.inlineCallbacks def requestMsisdnToken( @@ -446,6 +456,8 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") def create_id_access_token_header(id_access_token): diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 35450feb6f..39df0f128d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -25,6 +25,7 @@ from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 from twisted.internet import defer +from twisted.internet.error import TimeoutError from synapse import types from synapse.api.constants import EventTypes, Membership @@ -756,7 +757,8 @@ class RoomMemberHandler(object): raise AuthError(401, "No signatures on 3pid binding") yield self._verify_any_signature(data, id_server) return data["mxid"] - + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") except IOError as e: logger.warning("Error from v1 identity server lookup: %s" % (e,)) @@ -777,10 +779,13 @@ class RoomMemberHandler(object): Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. """ # Check what hashing details are supported by this identity server - hash_details = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), - {"access_token": id_access_token}, - ) + try: + hash_details = yield self.simple_http_client.get_json( + "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), + {"access_token": id_access_token}, + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") if not isinstance(hash_details, dict): logger.warning( @@ -851,6 +856,8 @@ class RoomMemberHandler(object): }, headers=headers, ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") except Exception as e: logger.warning("Error when performing a v2 3pid lookup: %s", e) raise SynapseError( @@ -873,10 +880,13 @@ class RoomMemberHandler(object): if server_hostname not in data["signatures"]: raise AuthError(401, "No signature from server %s" % (server_hostname,)) for key_name, signature in data["signatures"][server_hostname].items(): - key_data = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/api/v1/pubkey/%s" - % (id_server_scheme, server_hostname, key_name) - ) + try: + key_data = yield self.simple_http_client.get_json( + "%s%s/_matrix/identity/api/v1/pubkey/%s" + % (id_server_scheme, server_hostname, key_name) + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") if "public_key" not in key_data: raise AuthError( 401, "No public key named %s from %s" % (key_name, server_hostname) @@ -1051,6 +1061,8 @@ class RoomMemberHandler(object): invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: if e.code != 404: logger.info("Failed to POST %s with JSON: %s", url, e) @@ -1067,6 +1079,8 @@ class RoomMemberHandler(object): data = yield self.simple_http_client.post_json_get_json( url, invite_config ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( "Error trying to call /store-invite on %s%s: %s", -- cgit 1.4.1 From 1c9feadf4bf0755162d0d210bea398a3fb690ab6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 15:38:19 +0200 Subject: Generalize email sending logging (#6075) In ancient times Synapse would only send emails when it was notifying a user about a message they received... Now it can do all sorts of neat things! Change the logging so it's not just about notifications. --- changelog.d/6075.misc | 1 + synapse/push/mailer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6075.misc (limited to 'changelog.d') diff --git a/changelog.d/6075.misc b/changelog.d/6075.misc new file mode 100644 index 0000000000..914e56bcfe --- /dev/null +++ b/changelog.d/6075.misc @@ -0,0 +1 @@ +Change mailer logging to reflect Synapse doesn't just do chat notifications by email now. \ No newline at end of file diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 5a4fc78b4c..5b16ab4ae8 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -311,7 +311,7 @@ class Mailer(object): multipart_msg.attach(text_part) multipart_msg.attach(html_part) - logger.info("Sending email notification to %s" % email_address) + logger.info("Sending email to %s" % email_address) yield make_deferred_yieldable( self.sendmail( -- cgit 1.4.1 From 1b519e0272a13649d442aad2a10c9a3b39c2d200 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 16:38:38 +0200 Subject: Disable /register/available if registration is disabled (#6082) Fixes #6066 This register endpoint should be disabled if registration is disabled, otherwise we're giving anyone the ability to check if a username exists on a server when we don't need to be. Error code is 403 (Forbidden) as that's the same returned by /register when registration is disabled. --- changelog.d/6082.feature | 1 + synapse/rest/client/v2_alpha/register.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/6082.feature (limited to 'changelog.d') diff --git a/changelog.d/6082.feature b/changelog.d/6082.feature new file mode 100644 index 0000000000..c30662b608 --- /dev/null +++ b/changelog.d/6082.feature @@ -0,0 +1 @@ +Return 403 on `/register/available` if registration has been disabled. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 34276ea3fa..e99b1f5c45 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -328,6 +328,11 @@ class UsernameAvailabilityRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): + if not self.hs.config.enable_registration: + raise SynapseError( + 403, "Registration has been disabled", errcode=Codes.FORBIDDEN + ) + ip = self.hs.get_ip_from_request(request) with self.ratelimiter.ratelimit(ip) as wait_deferred: yield wait_deferred -- cgit 1.4.1 From 30af161af27146cc44152292060c7005a6b8546b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 17:50:27 +0200 Subject: Implement MSC2290 (#6043) Implements MSC2290. This PR adds two new endpoints, /unstable/account/3pid/add and /unstable/account/3pid/bind. Depending on the progress of that MSC the unstable prefix may go away. This PR also removes the blacklist on some 3PID tests which occurs in #6042, as the corresponding Sytest PR changes them to use the new endpoints. Finally, it also modifies the account deactivation code such that it doesn't just try to deactivate 3PIDs that were bound to the user's account, but any 3PIDs that were bound through the homeserver on that user's account. --- changelog.d/6043.feature | 1 + synapse/handlers/deactivate_account.py | 4 +- synapse/handlers/identity.py | 134 +++++++++++++++---------- synapse/rest/client/v2_alpha/account.py | 161 +++++++++++++++++-------------- synapse/rest/client/v2_alpha/register.py | 6 ++ synapse/storage/registration.py | 22 ++++- sytest-blacklist | 9 -- 7 files changed, 203 insertions(+), 134 deletions(-) create mode 100644 changelog.d/6043.feature (limited to 'changelog.d') diff --git a/changelog.d/6043.feature b/changelog.d/6043.feature new file mode 100644 index 0000000000..cd27b0400b --- /dev/null +++ b/changelog.d/6043.feature @@ -0,0 +1 @@ +Implement new Client Server API endpoints `/account/3pid/add` and `/account/3pid/bind` as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290). \ No newline at end of file diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 5f804d1f13..d83912c9a4 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -73,7 +73,9 @@ class DeactivateAccountHandler(BaseHandler): # unbinding identity_server_supports_unbinding = True - threepids = yield self.store.user_get_threepids(user_id) + # Retrieve the 3PIDs this user has bound to an identity server + threepids = yield self.store.user_get_bound_threepids(user_id) + for threepid in threepids: try: result = yield self._identity_handler.try_unbind_threepid( diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index cd4700b521..d50d485e06 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -30,6 +30,7 @@ from synapse.api.errors import ( HttpResponseException, SynapseError, ) +from synapse.config.emailconfig import ThreepidBehaviour from synapse.util.stringutils import random_string from ._base import BaseHandler @@ -45,36 +46,6 @@ class IdentityHandler(BaseHandler): self.federation_http_client = hs.get_http_client() self.hs = hs - def _extract_items_from_creds_dict(self, creds): - """ - Retrieve entries from a "credentials" dictionary - - Args: - creds (dict[str, str]): Dictionary of credentials that contain the following keys: - * client_secret|clientSecret: A unique secret str provided by the client - * id_server|idServer: the domain of the identity server to query - * id_access_token: The access token to authenticate to the identity - server with. - - Returns: - tuple(str, str, str|None): A tuple containing the client_secret, the id_server, - and the id_access_token value if available. - """ - client_secret = creds.get("client_secret") or creds.get("clientSecret") - if not client_secret: - raise SynapseError( - 400, "No client_secret in creds", errcode=Codes.MISSING_PARAM - ) - - id_server = creds.get("id_server") or creds.get("idServer") - if not id_server: - raise SynapseError( - 400, "No id_server in creds", errcode=Codes.MISSING_PARAM - ) - - id_access_token = creds.get("id_access_token") - return client_secret, id_server, id_access_token - @defer.inlineCallbacks def threepid_from_creds(self, id_server, creds): """ @@ -113,35 +84,50 @@ class IdentityHandler(BaseHandler): data = yield self.http_client.get_json(url, query_params) except TimeoutError: raise SynapseError(500, "Timed out contacting identity server") - return data if "medium" in data else None + except HttpResponseException as e: + logger.info( + "%s returned %i for threepid validation for: %s", + id_server, + e.code, + creds, + ) + return None + + # Old versions of Sydent return a 200 http code even on a failed validation + # check. Thus, in addition to the HttpResponseException check above (which + # checks for non-200 errors), we need to make sure validation_session isn't + # actually an error, identified by the absence of a "medium" key + # See https://github.com/matrix-org/sydent/issues/215 for details + if "medium" in data: + return data + + logger.info("%s reported non-validated threepid: %s", id_server, creds) + return None @defer.inlineCallbacks - def bind_threepid(self, creds, mxid, use_v2=True): + def bind_threepid( + self, client_secret, sid, mxid, id_server, id_access_token=None, use_v2=True + ): """Bind a 3PID to an identity server Args: - creds (dict[str, str]): Dictionary of credentials that contain the following keys: - * client_secret|clientSecret: A unique secret str provided by the client - * id_server|idServer: the domain of the identity server to query - * id_access_token: The access token to authenticate to the identity - server with. Required if use_v2 is true + client_secret (str): A unique secret provided by the client + + sid (str): The ID of the validation session + mxid (str): The MXID to bind the 3PID to - use_v2 (bool): Whether to use v2 Identity Service API endpoints + + id_server (str): The domain of the identity server to query + + id_access_token (str): The access token to authenticate to the identity + server with, if necessary. Required if use_v2 is true + + use_v2 (bool): Whether to use v2 Identity Service API endpoints. Defaults to True Returns: Deferred[dict]: The response from the identity server """ - logger.debug("binding threepid %r to %s", creds, mxid) - - client_secret, id_server, id_access_token = self._extract_items_from_creds_dict( - creds - ) - - sid = creds.get("sid") - if not sid: - raise SynapseError( - 400, "No sid in three_pid_creds", errcode=Codes.MISSING_PARAM - ) + logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) # If an id_access_token is not supplied, force usage of v1 if id_access_token is None: @@ -160,7 +146,6 @@ class IdentityHandler(BaseHandler): data = yield self.http_client.post_json_get_json( bind_url, bind_data, headers=headers ) - logger.debug("bound threepid %r to %s", creds, mxid) # Remember where we bound the threepid yield self.store.add_user_bound_threepid( @@ -182,7 +167,10 @@ class IdentityHandler(BaseHandler): return data logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) - return (yield self.bind_threepid(creds, mxid, use_v2=False)) + res = yield self.bind_threepid( + client_secret, sid, mxid, id_server, id_access_token, use_v2=False + ) + return res @defer.inlineCallbacks def try_unbind_threepid(self, mxid, threepid): @@ -459,6 +447,50 @@ class IdentityHandler(BaseHandler): except TimeoutError: raise SynapseError(500, "Timed out contacting identity server") + @defer.inlineCallbacks + def validate_threepid_session(self, client_secret, sid): + """Validates a threepid session with only the client secret and session ID + Tries validating against any configured account_threepid_delegates as well as locally. + + Args: + client_secret (str): A secret provided by the client + + sid (str): The ID of the session + + Returns: + Dict[str, str|int] if validation was successful, otherwise None + """ + # XXX: We shouldn't need to keep wrapping and unwrapping this value + threepid_creds = {"client_secret": client_secret, "sid": sid} + + # We don't actually know which medium this 3PID is. Thus we first assume it's email, + # and if validation fails we try msisdn + validation_session = None + + # Try to validate as email + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Ask our delegated email identity server + validation_session = yield self.threepid_from_creds( + self.hs.config.account_threepid_delegate_email, threepid_creds + ) + elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + # Get a validated session matching these details + validation_session = yield self.store.get_threepid_validation_session( + "email", client_secret, sid=sid, validated=True + ) + + if validation_session: + return validation_session + + # Try to validate as msisdn + if self.hs.config.account_threepid_delegate_msisdn: + # Ask our delegated msisdn identity server + validation_session = yield self.threepid_from_creds( + self.hs.config.account_threepid_delegate_msisdn, threepid_creds + ) + + return validation_session + def create_id_access_token_header(id_access_token): """Create an Authorization header for passing to SimpleHttpClient as the header value diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 1139bb156c..b8c48dc8f1 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -21,12 +21,7 @@ from six.moves import http_client from twisted.internet import defer from synapse.api.constants import LoginType -from synapse.api.errors import ( - Codes, - HttpResponseException, - SynapseError, - ThreepidValidationError, -) +from synapse.api.errors import Codes, SynapseError, ThreepidValidationError from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import finish_request from synapse.http.servlet import ( @@ -485,10 +480,8 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): def on_POST(self, request): body = parse_json_object_from_request(request) assert_params_in_dict( - body, - ["id_server", "client_secret", "country", "phone_number", "send_attempt"], + body, ["client_secret", "country", "phone_number", "send_attempt"] ) - id_server = "https://" + body["id_server"] # Assume https client_secret = body["client_secret"] country = body["country"] phone_number = body["phone_number"] @@ -509,8 +502,23 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): if existing_user_id is not None: raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) + if not self.hs.config.account_threepid_delegate_msisdn: + logger.warn( + "No upstream msisdn account_threepid_delegate configured on the server to " + "handle this request" + ) + raise SynapseError( + 400, + "Adding phone numbers to user account is not supported by this homeserver", + ) + ret = yield self.identity_handler.requestMsisdnToken( - id_server, country, phone_number, client_secret, send_attempt, next_link + self.hs.config.account_threepid_delegate_msisdn, + country, + phone_number, + client_secret, + send_attempt, + next_link, ) return 200, ret @@ -627,81 +635,88 @@ class ThreepidRestServlet(RestServlet): client_secret = threepid_creds["client_secret"] sid = threepid_creds["sid"] - # We don't actually know which medium this 3PID is. Thus we first assume it's email, - # and if validation fails we try msisdn - validation_session = None - - # Try to validate as email - if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - # Ask our delegated email identity server - try: - validation_session = yield self.identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_email, threepid_creds - ) - except HttpResponseException: - logger.debug( - "%s reported non-validated threepid: %s", - self.hs.config.account_threepid_delegate_email, - threepid_creds, - ) - elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - # Get a validated session matching these details - validation_session = yield self.datastore.get_threepid_validation_session( - "email", client_secret, sid=sid, validated=True - ) - - # Old versions of Sydent return a 200 http code even on a failed validation check. - # Thus, in addition to the HttpResponseException check above (which checks for - # non-200 errors), we need to make sure validation_session isn't actually an error, - # identified by containing an "error" key - # See https://github.com/matrix-org/sydent/issues/215 for details - if validation_session and "error" not in validation_session: - yield self._add_threepid_to_account(user_id, validation_session) + validation_session = yield self.identity_handler.validate_threepid_session( + client_secret, sid + ) + if validation_session: + yield self.auth_handler.add_threepid( + user_id, + validation_session["medium"], + validation_session["address"], + validation_session["validated_at"], + ) return 200, {} - # Try to validate as msisdn - if self.hs.config.account_threepid_delegate_msisdn: - # Ask our delegated msisdn identity server - try: - validation_session = yield self.identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_msisdn, threepid_creds - ) - except HttpResponseException: - logger.debug( - "%s reported non-validated threepid: %s", - self.hs.config.account_threepid_delegate_email, - threepid_creds, - ) + raise SynapseError( + 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED + ) + - # Check that validation_session isn't actually an error due to old Sydent instances - # See explanatory comment above - if validation_session and "error" not in validation_session: - yield self._add_threepid_to_account(user_id, validation_session) - return 200, {} +class ThreepidAddRestServlet(RestServlet): + PATTERNS = client_patterns("/account/3pid/add$", releases=(), unstable=True) + + def __init__(self, hs): + super(ThreepidAddRestServlet, self).__init__() + self.hs = hs + self.identity_handler = hs.get_handlers().identity_handler + self.auth = hs.get_auth() + self.auth_handler = hs.get_auth_handler() + + @defer.inlineCallbacks + def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + body = parse_json_object_from_request(request) + + assert_params_in_dict(body, ["client_secret", "sid"]) + client_secret = body["client_secret"] + sid = body["sid"] + + validation_session = yield self.identity_handler.validate_threepid_session( + client_secret, sid + ) + if validation_session: + yield self.auth_handler.add_threepid( + user_id, + validation_session["medium"], + validation_session["address"], + validation_session["validated_at"], + ) + return 200, {} raise SynapseError( 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED ) + +class ThreepidBindRestServlet(RestServlet): + PATTERNS = client_patterns("/account/3pid/bind$", releases=(), unstable=True) + + def __init__(self, hs): + super(ThreepidBindRestServlet, self).__init__() + self.hs = hs + self.identity_handler = hs.get_handlers().identity_handler + self.auth = hs.get_auth() + @defer.inlineCallbacks - def _add_threepid_to_account(self, user_id, validation_session): - """Add a threepid wrapped in a validation_session dict to an account + def on_POST(self, request): + body = parse_json_object_from_request(request) - Args: - user_id (str): The mxid of the user to add this 3PID to + assert_params_in_dict(body, ["id_server", "sid", "client_secret"]) + id_server = body["id_server"] + sid = body["sid"] + client_secret = body["client_secret"] + id_access_token = body.get("id_access_token") # optional - validation_session (dict): A dict containing the following: - * medium - medium of the threepid - * address - address of the threepid - * validated_at - timestamp of when the validation occurred - """ - yield self.auth_handler.add_threepid( - user_id, - validation_session["medium"], - validation_session["address"], - validation_session["validated_at"], + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + yield self.identity_handler.bind_threepid( + client_secret, sid, user_id, id_server, id_access_token ) + return 200, {} + class ThreepidUnbindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/unbind$", releases=(), unstable=True) @@ -794,6 +809,8 @@ def register_servlets(hs, http_server): MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) AddThreepidSubmitTokenServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) + ThreepidAddRestServlet(hs).register(http_server) + ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index e99b1f5c45..135a70808f 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -246,6 +246,12 @@ class RegistrationSubmitTokenServlet(RestServlet): [self.config.email_registration_template_failure_html], ) + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + self.failure_email_template, = load_jinja2_templates( + self.config.email_template_dir, + [self.config.email_registration_template_failure_html], + ) + @defer.inlineCallbacks def on_GET(self, request, medium): if medium != "email": diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index da27ad76b6..805411a6b2 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -586,6 +586,26 @@ class RegistrationWorkerStore(SQLBaseStore): desc="add_user_bound_threepid", ) + def user_get_bound_threepids(self, user_id): + """Get the threepids that a user has bound to an identity server through the homeserver + The homeserver remembers where binds to an identity server occurred. Using this + method can retrieve those threepids. + + Args: + user_id (str): The ID of the user to retrieve threepids for + + Returns: + Deferred[list[dict]]: List of dictionaries containing the following: + medium (str): The medium of the threepid (e.g "email") + address (str): The address of the threepid (e.g "bob@example.com") + """ + return self._simple_select_list( + table="user_threepid_id_server", + keyvalues={"user_id": user_id}, + retcols=["medium", "address"], + desc="user_get_bound_threepids", + ) + def remove_user_bound_threepid(self, user_id, medium, address, id_server): """The server proxied an unbind request to the given identity server on behalf of the given user, so we remove the mapping of threepid to @@ -655,7 +675,7 @@ class RegistrationWorkerStore(SQLBaseStore): self, medium, client_secret, address=None, sid=None, validated=True ): """Gets a session_id and last_send_attempt (if available) for a - client_secret/medium/(address|session_id) combo + combination of validation metadata Args: medium (str|None): The medium of the 3PID diff --git a/sytest-blacklist b/sytest-blacklist index 04698cb068..11785fd43f 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -29,12 +29,3 @@ Enabling an unknown default rule fails with 404 # Blacklisted due to https://github.com/matrix-org/synapse/issues/1663 New federated private chats get full presence information (SYN-115) - -# Blacklisted temporarily due to https://github.com/matrix-org/matrix-doc/pull/2290 -# These sytests need to be updated with new endpoints, which will come in a later PR -# That PR will also remove this blacklist -Can bind 3PID via home server -Can bind and unbind 3PID via homeserver -3PIDs are unbound after account deactivation -Can bind and unbind 3PID via /unbind by specifying the identity server -Can bind and unbind 3PID via /unbind without specifying the identity server -- cgit 1.4.1 From 691a70190b76aa29481f6299580b71160068ef8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Sep 2019 16:04:41 +0100 Subject: Newsfile --- changelog.d/6089.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6089.misc (limited to 'changelog.d') diff --git a/changelog.d/6089.misc b/changelog.d/6089.misc new file mode 100644 index 0000000000..fa3c197c54 --- /dev/null +++ b/changelog.d/6089.misc @@ -0,0 +1 @@ +Move last seen info into devices table. -- cgit 1.4.1 From a2a09d42dd0ac45eb56ea1df6d1416baf961228b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 23 Sep 2019 17:22:55 +0100 Subject: Changelog --- changelog.d/6092.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6092.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6092.bugfix b/changelog.d/6092.bugfix new file mode 100644 index 0000000000..01a7498ec6 --- /dev/null +++ b/changelog.d/6092.bugfix @@ -0,0 +1 @@ +Fix the logged number of updated items for the users_set_deactivated_flag background update. -- cgit 1.4.1 From 2c99c634532a62fa3479c1f90929b3eabe7880bc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 18:49:00 +0200 Subject: Add POST submit_token endpoint for MSISDN (#6078) First part of solving #6076 --- changelog.d/6078.feature | 1 + synapse/handlers/identity.py | 34 ++++++++++++++++++++++++ synapse/rest/client/v2_alpha/account.py | 47 +++++++++++++++++++++++++++++++-- 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6078.feature (limited to 'changelog.d') diff --git a/changelog.d/6078.feature b/changelog.d/6078.feature new file mode 100644 index 0000000000..fae1e52322 --- /dev/null +++ b/changelog.d/6078.feature @@ -0,0 +1 @@ +Add `POST /add_threepid/msisdn/submit_token` endpoint for proxying submitToken on an account_threepid_handler. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index d50d485e06..af6f591942 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -491,6 +491,40 @@ class IdentityHandler(BaseHandler): return validation_session + @defer.inlineCallbacks + def proxy_msisdn_submit_token(self, id_server, client_secret, sid, token): + """Proxy a POST submitToken request to an identity server for verification purposes + + Args: + id_server (str): The identity server URL to contact + + client_secret (str): Secret provided by the client + + sid (str): The ID of the session + + token (str): The verification token + + Raises: + SynapseError: If we failed to contact the identity server + + Returns: + Deferred[dict]: The response dict from the identity server + """ + body = {"client_secret": client_secret, "sid": sid, "token": token} + + try: + return ( + yield self.http_client.post_json_get_json( + id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", + body, + ) + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + except HttpResponseException as e: + logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) + raise SynapseError(400, "Error contacting the identity server") + def create_id_access_token_header(id_access_token): """Create an Authorization header for passing to SimpleHttpClient as the header value diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index b8c48dc8f1..f99676fd30 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -524,7 +524,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): return 200, ret -class AddThreepidSubmitTokenServlet(RestServlet): +class AddThreepidEmailSubmitTokenServlet(RestServlet): """Handles 3PID validation token submission for adding an email to a user's account""" PATTERNS = client_patterns( @@ -600,6 +600,48 @@ class AddThreepidSubmitTokenServlet(RestServlet): finish_request(request) +class AddThreepidMsisdnSubmitTokenServlet(RestServlet): + """Handles 3PID validation token submission for adding a phone number to a user's + account + """ + + PATTERNS = client_patterns( + "/add_threepid/msisdn/submit_token$", releases=(), unstable=True + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super().__init__() + self.config = hs.config + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.identity_handler = hs.get_handlers().identity_handler + + @defer.inlineCallbacks + def on_POST(self, request): + if not self.config.account_threepid_delegate_msisdn: + raise SynapseError( + 400, + "This homeserver is not validating phone numbers. Use an identity server " + "instead.", + ) + + body = parse_json_object_from_request(request) + assert_params_in_dict(body, ["client_secret", "sid", "token"]) + + # Proxy submit_token request to msisdn threepid delegate + response = yield self.identity_handler.proxy_msisdn_submit_token( + self.config.account_threepid_delegate_msisdn, + body["client_secret"], + body["sid"], + body["token"], + ) + return 200, response + + class ThreepidRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid$") @@ -807,7 +849,8 @@ def register_servlets(hs, http_server): DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) - AddThreepidSubmitTokenServlet(hs).register(http_server) + AddThreepidEmailSubmitTokenServlet(hs).register(http_server) + AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) ThreepidAddRestServlet(hs).register(http_server) ThreepidBindRestServlet(hs).register(http_server) -- cgit 1.4.1 From b38aa82b83334573e40cb56f076eaf820c51c9ba Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 19:52:43 +0200 Subject: Add m.require_identity_server to /versions unstable_flags (#5972) As MSC2263 states, m.require_identity_server must be set to false when it does not require an identity server to be provided by the client for the purposes of email registration or password reset. Adds an m.require_identity_server flag to /versionss unstable_flags section. This will advertise that Synapse no longer needs id_server as a parameter. --- changelog.d/5972.misc | 1 + synapse/rest/client/versions.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5972.misc (limited to 'changelog.d') diff --git a/changelog.d/5972.misc b/changelog.d/5972.misc new file mode 100644 index 0000000000..1dc217e899 --- /dev/null +++ b/changelog.d/5972.misc @@ -0,0 +1 @@ +Add m.require_identity_server flag to /version's unstable_features. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 0058b6b459..3c9ec59d72 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -48,7 +48,19 @@ class VersionsRestServlet(RestServlet): "r0.5.0", ], # as per MSC1497: - "unstable_features": {"m.lazy_load_members": True}, + "unstable_features": { + "m.lazy_load_members": True, + # Advertise to clients that they need not include an `id_server` + # parameter during registration or password reset, as Synapse now decides + # itself which identity server to use (or none at all). + # + # This is also used by a client when they wish to bind a 3PID to their + # account, but not bind it to an identity server, the endpoint for which + # also requires `id_server`. If the homeserver is handling 3PID + # verification itself, there is no need to ask the user for `id_server` to + # be supplied. + "m.require_identity_server": False, + }, }, ) -- cgit 1.4.1 From 1ea3ed76201de678c8c19c568bb3456ae4989a97 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 21:19:25 +0200 Subject: Add m.id_access_token to /versions unstable_features (MSC2264) (#5974) Adds a flag to /versions' unstable_features section indicating that this Synapse understands what an id_access_token is, as per MSC2264. Fixes #5927 --- changelog.d/5974.feature | 1 + synapse/rest/client/versions.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/5974.feature (limited to 'changelog.d') diff --git a/changelog.d/5974.feature b/changelog.d/5974.feature new file mode 100644 index 0000000000..387a444fc4 --- /dev/null +++ b/changelog.d/5974.feature @@ -0,0 +1 @@ +Add m.id_access_token to unstable_features in /versions as per MSC2264. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3c9ec59d72..fdab0ddb42 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -50,6 +50,9 @@ class VersionsRestServlet(RestServlet): # as per MSC1497: "unstable_features": { "m.lazy_load_members": True, + # as per MSC2190, as amended by MSC2264 + # to be removed in r0.6.0 + "m.id_access_token": True, # Advertise to clients that they need not include an `id_server` # parameter during registration or password reset, as Synapse now decides # itself which identity server to use (or none at all). -- cgit 1.4.1 From e08ea43463bacd5efacbf6c790c6be0f3cd06ce6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 21:23:20 +0200 Subject: Use the federation blacklist for requests to untrusted Identity Servers (#6000) Uses a SimpleHttpClient instance equipped with the federation_ip_range_blacklist list for requests to identity servers provided by user input. Does not use a blacklist when contacting identity servers specified by account_threepid_delegates. The homeserver trusts the latter and we don't want to prevent homeserver admins from specifying delegates that are on internal IP addresses. Fixes #5935 --- changelog.d/6000.feature | 1 + docs/sample_config.yaml | 3 +++ synapse/config/server.py | 3 +++ synapse/handlers/identity.py | 18 +++++++++++++++--- synapse/handlers/room_member.py | 7 ++++++- 5 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6000.feature (limited to 'changelog.d') diff --git a/changelog.d/6000.feature b/changelog.d/6000.feature new file mode 100644 index 0000000000..0a159bd10d --- /dev/null +++ b/changelog.d/6000.feature @@ -0,0 +1 @@ +Apply the federation blacklist to requests to identity servers. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 61d9f09a99..e53b979c35 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -110,6 +110,9 @@ pid_file: DATADIR/homeserver.pid # blacklist IP address CIDR ranges. If this option is not specified, or # specified with an empty list, no ip range blacklist will be enforced. # +# As of Synapse v1.4.0 this option also affects any outbound requests to identity +# servers provided by user input. +# # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly # listed here, since they correspond to unroutable addresses.) # diff --git a/synapse/config/server.py b/synapse/config/server.py index 7f8d315954..419787a89c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -545,6 +545,9 @@ class ServerConfig(Config): # blacklist IP address CIDR ranges. If this option is not specified, or # specified with an empty list, no ip range blacklist will be enforced. # + # As of Synapse v1.4.0 this option also affects any outbound requests to identity + # servers provided by user input. + # # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly # listed here, since they correspond to unroutable addresses.) # diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index af6f591942..264bdc2189 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour +from synapse.http.client import SimpleHttpClient from synapse.util.stringutils import random_string from ._base import BaseHandler @@ -42,7 +43,12 @@ class IdentityHandler(BaseHandler): def __init__(self, hs): super(IdentityHandler, self).__init__(hs) - self.http_client = hs.get_simple_http_client() + self.http_client = SimpleHttpClient(hs) + # We create a blacklisting instance of SimpleHttpClient for contacting identity + # servers specified by clients + self.blacklisting_http_client = SimpleHttpClient( + hs, ip_blacklist=hs.config.federation_ip_range_blacklist + ) self.federation_http_client = hs.get_http_client() self.hs = hs @@ -143,7 +149,9 @@ class IdentityHandler(BaseHandler): bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) try: - data = yield self.http_client.post_json_get_json( + # Use the blacklisting http client as this call is only to identity servers + # provided by a client + data = yield self.blacklisting_http_client.post_json_get_json( bind_url, bind_data, headers=headers ) @@ -246,7 +254,11 @@ class IdentityHandler(BaseHandler): headers = {b"Authorization": auth_headers} try: - yield self.http_client.post_json_get_json(url, content, headers) + # Use the blacklisting http client as this call is only to identity servers + # provided by a client + yield self.blacklisting_http_client.post_json_get_json( + url, content, headers + ) changed = True except HttpResponseException as e: changed = False diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 39df0f128d..94cd0cf3ef 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -31,6 +31,7 @@ from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header +from synapse.http.client import SimpleHttpClient from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room @@ -62,7 +63,11 @@ class RoomMemberHandler(object): self.auth = hs.get_auth() self.state_handler = hs.get_state_handler() self.config = hs.config - self.simple_http_client = hs.get_simple_http_client() + # We create a blacklisting instance of SimpleHttpClient for contacting identity + # servers specified by clients + self.simple_http_client = SimpleHttpClient( + hs, ip_blacklist=hs.config.federation_ip_range_blacklist + ) self.federation_handler = hs.get_handlers().federation_handler self.directory_handler = hs.get_handlers().directory_handler -- cgit 1.4.1 From 2b071a2ff1ce59c5b7a4930c471470c739c5efe2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 21:46:34 +0200 Subject: Add an unstable feature flag for separate add/bind 3pid APIs (#6044) Add a m.separate_add_and_bind flag set to True. See MSC2290's Backward Compatibility section for details. --- changelog.d/6044.feature | 1 + synapse/rest/client/versions.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6044.feature (limited to 'changelog.d') diff --git a/changelog.d/6044.feature b/changelog.d/6044.feature new file mode 100644 index 0000000000..7dc05d4845 --- /dev/null +++ b/changelog.d/6044.feature @@ -0,0 +1 @@ +Add an unstable feature flag for separate add/bind 3pid APIs. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index fdab0ddb42..1044ae7b4e 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -63,6 +63,8 @@ class VersionsRestServlet(RestServlet): # verification itself, there is no need to ask the user for `id_server` to # be supplied. "m.require_identity_server": False, + # as per MSC2290 + "m.separate_add_and_bind": True, }, }, ) -- cgit 1.4.1 From 50776261e1565afe45a1cfd4a991c24110c2e519 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 23 Sep 2019 22:21:03 +0200 Subject: Add submit_url response parameter to msisdn /requestToken (#6079) Second part of solving #6076 Fixes #6076 We return a submit_url parameter on calls to POST */msisdn/requestToken so that clients know where to submit token information to. --- changelog.d/6079.feature | 1 + docs/sample_config.yaml | 2 ++ synapse/config/registration.py | 2 ++ synapse/handlers/identity.py | 12 +++++++++++- 4 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6079.feature (limited to 'changelog.d') diff --git a/changelog.d/6079.feature b/changelog.d/6079.feature new file mode 100644 index 0000000000..bcbb49ac58 --- /dev/null +++ b/changelog.d/6079.feature @@ -0,0 +1 @@ +Add `submit_url` response parameter to `*/msisdn/requestToken` endpoints. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bd208b17dd..46af6edf1f 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -940,6 +940,8 @@ uploads_path: "DATADIR/uploads" # by the Matrix Identity Service API specification: # https://matrix.org/docs/spec/identity_service/latest # +# If a delegate is specified, the config option public_baseurl must also be filled out. +# account_threepid_delegates: #email: https://example.com # Delegate email sending to example.org #msisdn: http://localhost:8090 # Delegate SMS sending to this local process diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d4654e99b3..bef89e2bf4 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -293,6 +293,8 @@ class RegistrationConfig(Config): # by the Matrix Identity Service API specification: # https://matrix.org/docs/spec/identity_service/latest # + # If a delegate is specified, the config option public_baseurl must also be filled out. + # account_threepid_delegates: #email: https://example.com # Delegate email sending to example.org #msisdn: http://localhost:8090 # Delegate SMS sending to this local process diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 264bdc2189..1f16afd14e 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -452,13 +452,23 @@ class IdentityHandler(BaseHandler): id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", params, ) - return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() except TimeoutError: raise SynapseError(500, "Timed out contacting identity server") + assert self.hs.config.public_baseurl + + # we need to tell the client to send the token back to us, since it doesn't + # otherwise know where to send it, so add submit_url response parameter + # (see also MSC2078) + data["submit_url"] = ( + self.hs.config.public_baseurl + + "_matrix/client/unstable/add_threepid/msisdn/submit_token" + ) + return data + @defer.inlineCallbacks def validate_threepid_session(self, client_secret, sid): """Validates a threepid session with only the client secret and session ID -- cgit 1.4.1 From 40fb00f5b7a8a9df15169900df218df19423b93e Mon Sep 17 00:00:00 2001 From: "J. Ryan Stinnett" Date: Tue, 24 Sep 2019 14:39:50 +0100 Subject: Add sid to next_link for email validation (#6097) --- changelog.d/6097.bugfix | 1 + synapse/handlers/identity.py | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/6097.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6097.bugfix b/changelog.d/6097.bugfix new file mode 100644 index 0000000000..750a8ecf0a --- /dev/null +++ b/changelog.d/6097.bugfix @@ -0,0 +1 @@ +Add sid to next_link for email validation. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 1f16afd14e..6d42a1aed8 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -18,6 +18,7 @@ """Utilities for interacting with Identity Servers""" import logging +import urllib from canonicaljson import json @@ -328,6 +329,15 @@ class IdentityHandler(BaseHandler): # Generate a session id session_id = random_string(16) + if next_link: + # Manipulate the next_link to add the sid, because the caller won't get + # it until we send a response, by which time we've sent the mail. + if "?" in next_link: + next_link += "&" + else: + next_link += "?" + next_link += "sid=" + urllib.parse.quote(session_id) + # Generate a new validation token token = random_string(32) -- cgit 1.4.1 From f8b02c54207e5e99fcd95cb3e19a11423768e696 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Sep 2019 15:59:43 +0100 Subject: Newsfile --- changelog.d/6098.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6098.feature (limited to 'changelog.d') diff --git a/changelog.d/6098.feature b/changelog.d/6098.feature new file mode 100644 index 0000000000..f3c693c06b --- /dev/null +++ b/changelog.d/6098.feature @@ -0,0 +1 @@ +Add support for pruning old rows in `user_ips` table. -- cgit 1.4.1 From 566ac40939404649d58c053e97dba75810f95339 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 24 Sep 2019 17:01:09 +0100 Subject: remove unused parameter to get_user_id_by_threepid (#6099) Added in #5377, apparently in error --- changelog.d/6099.misc | 1 + synapse/storage/registration.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6099.misc (limited to 'changelog.d') diff --git a/changelog.d/6099.misc b/changelog.d/6099.misc new file mode 100644 index 0000000000..8415c6759b --- /dev/null +++ b/changelog.d/6099.misc @@ -0,0 +1 @@ +Remove unused parameter to get_user_id_by_threepid. diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 805411a6b2..5cf2c893aa 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -495,7 +495,7 @@ class RegistrationWorkerStore(SQLBaseStore): ) @defer.inlineCallbacks - def get_user_id_by_threepid(self, medium, address, require_verified=False): + def get_user_id_by_threepid(self, medium, address): """Returns user id from threepid Args: -- cgit 1.4.1 From 8004d6ca2faf0f2f843fcdcaf225d7bcab847503 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 25 Sep 2019 11:32:05 +0100 Subject: Refactor code for calculating registration flows (#6106) because, frankly, it looked like it was written by an axe-murderer. This should be a non-functional change, except that where `m.login.dummy` was previously advertised *before* `m.login.terms`, it will now be advertised afterwards. AFAICT that should have no effect, and will be more consistent with the flows that involve passing a 3pid. --- changelog.d/6106.misc | 1 + synapse/rest/client/v2_alpha/register.py | 124 ++++++++++++++-------------- tests/rest/client/v2_alpha/test_register.py | 79 +++++++++++++++--- tests/test_terms_auth.py | 24 ++++-- 4 files changed, 145 insertions(+), 83 deletions(-) create mode 100644 changelog.d/6106.misc (limited to 'changelog.d') diff --git a/changelog.d/6106.misc b/changelog.d/6106.misc new file mode 100644 index 0000000000..d732091779 --- /dev/null +++ b/changelog.d/6106.misc @@ -0,0 +1 @@ +Refactor code for calculating registration flows. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 135a70808f..e3f3d9126f 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -16,6 +16,7 @@ import hmac import logging +from typing import List, Union from six import string_types @@ -31,8 +32,11 @@ from synapse.api.errors import ( ThreepidValidationError, UnrecognizedRequestError, ) +from synapse.config.captcha import CaptchaConfig +from synapse.config.consent_config import ConsentConfig from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.config.registration import RegistrationConfig from synapse.config.server import is_threepid_reserved from synapse.http.server import finish_request from synapse.http.servlet import ( @@ -371,6 +375,8 @@ class RegisterRestServlet(RestServlet): self.ratelimiter = hs.get_registration_ratelimiter() self.clock = hs.get_clock() + self._registration_flows = _calculate_registration_flows(hs.config) + @interactive_auth_handler @defer.inlineCallbacks def on_POST(self, request): @@ -491,69 +497,8 @@ class RegisterRestServlet(RestServlet): assigned_user_id=registered_user_id, ) - # FIXME: need a better error than "no auth flow found" for scenarios - # where we required 3PID for registration but the user didn't give one - require_email = "email" in self.hs.config.registrations_require_3pid - require_msisdn = "msisdn" in self.hs.config.registrations_require_3pid - - show_msisdn = True - if self.hs.config.disable_msisdn_registration: - show_msisdn = False - require_msisdn = False - - flows = [] - if self.hs.config.enable_registration_captcha: - # only support 3PIDless registration if no 3PIDs are required - if not require_email and not require_msisdn: - # Also add a dummy flow here, otherwise if a client completes - # recaptcha first we'll assume they were going for this flow - # and complete the request, when they could have been trying to - # complete one of the flows with email/msisdn auth. - flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) - # only support the email-only flow if we don't require MSISDN 3PIDs - if not require_msisdn: - flows.extend([[LoginType.RECAPTCHA, LoginType.EMAIL_IDENTITY]]) - - if show_msisdn: - # only support the MSISDN-only flow if we don't require email 3PIDs - if not require_email: - flows.extend([[LoginType.RECAPTCHA, LoginType.MSISDN]]) - # always let users provide both MSISDN & email - flows.extend( - [[LoginType.RECAPTCHA, LoginType.MSISDN, LoginType.EMAIL_IDENTITY]] - ) - else: - # only support 3PIDless registration if no 3PIDs are required - if not require_email and not require_msisdn: - flows.extend([[LoginType.DUMMY]]) - # only support the email-only flow if we don't require MSISDN 3PIDs - if not require_msisdn: - flows.extend([[LoginType.EMAIL_IDENTITY]]) - - if show_msisdn: - # only support the MSISDN-only flow if we don't require email 3PIDs - if not require_email or require_msisdn: - flows.extend([[LoginType.MSISDN]]) - # always let users provide both MSISDN & email - flows.extend([[LoginType.MSISDN, LoginType.EMAIL_IDENTITY]]) - - # Append m.login.terms to all flows if we're requiring consent - if self.hs.config.user_consent_at_registration: - new_flows = [] - for flow in flows: - inserted = False - # m.login.terms should go near the end but before msisdn or email auth - for i, stage in enumerate(flow): - if stage == LoginType.EMAIL_IDENTITY or stage == LoginType.MSISDN: - flow.insert(i, LoginType.TERMS) - inserted = True - break - if not inserted: - flow.append(LoginType.TERMS) - flows.extend(new_flows) - auth_result, params, session_id = yield self.auth_handler.check_auth( - flows, body, self.hs.get_ip_from_request(request) + self._registration_flows, body, self.hs.get_ip_from_request(request) ) # Check that we're not trying to register a denied 3pid. @@ -716,6 +661,61 @@ class RegisterRestServlet(RestServlet): ) +def _calculate_registration_flows( + # technically `config` has to provide *all* of these interfaces, not just one + config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig], +) -> List[List[str]]: + """Get a suitable flows list for registration + + Args: + config: server configuration + + Returns: a list of supported flows + """ + # FIXME: need a better error than "no auth flow found" for scenarios + # where we required 3PID for registration but the user didn't give one + require_email = "email" in config.registrations_require_3pid + require_msisdn = "msisdn" in config.registrations_require_3pid + + show_msisdn = True + if config.disable_msisdn_registration: + show_msisdn = False + require_msisdn = False + + flows = [] + + # only support 3PIDless registration if no 3PIDs are required + if not require_email and not require_msisdn: + # Add a dummy step here, otherwise if a client completes + # recaptcha first we'll assume they were going for this flow + # and complete the request, when they could have been trying to + # complete one of the flows with email/msisdn auth. + flows.append([LoginType.DUMMY]) + + # only support the email-only flow if we don't require MSISDN 3PIDs + if not require_msisdn: + flows.append([LoginType.EMAIL_IDENTITY]) + + # only support the MSISDN-only flow if we don't require email 3PIDs + if show_msisdn and not require_email: + flows.append([LoginType.MSISDN]) + + if show_msisdn: + flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY]) + + # Prepend m.login.terms to all flows if we're requiring consent + if config.user_consent_at_registration: + for flow in flows: + flow.insert(0, LoginType.TERMS) + + # Prepend recaptcha to all flows if we're requiring captcha + if config.enable_registration_captcha: + for flow in flows: + flow.insert(0, LoginType.RECAPTCHA) + + return flows + + def register_servlets(hs, http_server): EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index ab4d7d70d0..bc2dc47973 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -34,19 +34,12 @@ from tests import unittest class RegisterRestServletTestCase(unittest.HomeserverTestCase): servlets = [register.register_servlets] + url = b"/_matrix/client/r0/register" - def make_homeserver(self, reactor, clock): - - self.url = b"/_matrix/client/r0/register" - - self.hs = self.setup_test_homeserver() - self.hs.config.enable_registration = True - self.hs.config.registrations_require_3pid = [] - self.hs.config.auto_join_rooms = [] - self.hs.config.enable_registration_captcha = False - self.hs.config.allow_guest_access = True - - return self.hs + def default_config(self, name="test"): + config = super().default_config(name) + config["allow_guest_access"] = True + return config def test_POST_appservice_registration_valid(self): user_id = "@as_user_kermit:test" @@ -199,6 +192,68 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"200", channel.result) + def test_advertised_flows(self): + request, channel = self.make_request(b"POST", self.url, b"{}") + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) + flows = channel.json_body["flows"] + + # with the stock config, we expect all four combinations of 3pid + self.assertCountEqual( + [ + ["m.login.dummy"], + ["m.login.email.identity"], + ["m.login.msisdn"], + ["m.login.msisdn", "m.login.email.identity"], + ], + (f["stages"] for f in flows), + ) + + @unittest.override_config( + { + "enable_registration_captcha": True, + "user_consent": { + "version": "1", + "template_dir": "/", + "require_at_registration": True, + }, + } + ) + def test_advertised_flows_captcha_and_terms(self): + request, channel = self.make_request(b"POST", self.url, b"{}") + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) + flows = channel.json_body["flows"] + + self.assertCountEqual( + [ + ["m.login.recaptcha", "m.login.terms", "m.login.dummy"], + ["m.login.recaptcha", "m.login.terms", "m.login.email.identity"], + ["m.login.recaptcha", "m.login.terms", "m.login.msisdn"], + [ + "m.login.recaptcha", + "m.login.terms", + "m.login.msisdn", + "m.login.email.identity", + ], + ], + (f["stages"] for f in flows), + ) + + @unittest.override_config( + {"registrations_require_3pid": ["email"], "disable_msisdn_registration": True} + ) + def test_advertised_flows_no_msisdn_email_required(self): + request, channel = self.make_request(b"POST", self.url, b"{}") + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) + flows = channel.json_body["flows"] + + # with the stock config, we expect all four combinations of 3pid + self.assertCountEqual( + [["m.login.email.identity"]], (f["stages"] for f in flows) + ) + class AccountValidityTestCase(unittest.HomeserverTestCase): diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 52739fbabc..5ec5d2b358 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -28,6 +28,21 @@ from tests import unittest class TermsTestCase(unittest.HomeserverTestCase): servlets = [register_servlets] + def default_config(self, name="test"): + config = super().default_config(name) + config.update( + { + "public_baseurl": "https://example.org/", + "user_consent": { + "version": "1.0", + "policy_name": "My Cool Privacy Policy", + "template_dir": "/", + "require_at_registration": True, + }, + } + ) + return config + def prepare(self, reactor, clock, hs): self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) @@ -35,17 +50,8 @@ class TermsTestCase(unittest.HomeserverTestCase): self.registration_handler = Mock() self.auth_handler = Mock() self.device_handler = Mock() - hs.config.enable_registration = True - hs.config.registrations_require_3pid = [] - hs.config.auto_join_rooms = [] - hs.config.enable_registration_captcha = False def test_ui_auth(self): - self.hs.config.user_consent_at_registration = True - self.hs.config.user_consent_policy_name = "My Cool Privacy Policy" - self.hs.config.public_baseurl = "https://example.org/" - self.hs.config.user_consent_version = "1.0" - # Do a UI auth request request, channel = self.make_request(b"POST", self.url, b"{}") self.render(request) -- cgit 1.4.1 From 2cd98812ba338eefe83fee4ae2390d00f5499de9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 25 Sep 2019 11:33:03 +0100 Subject: Refactor the user-interactive auth handling (#6105) Pull the checkers out to their own classes, rather than having them lost in a massive 1000-line class which does everything. This is also preparation for some more intelligent advertising of flows, as per #6100 --- changelog.d/6105.misc | 1 + synapse/handlers/auth.py | 141 ++------------------- synapse/handlers/ui_auth/__init__.py | 22 ++++ synapse/handlers/ui_auth/checkers.py | 216 ++++++++++++++++++++++++++++++++ tests/rest/client/v2_alpha/test_auth.py | 26 ++-- 5 files changed, 265 insertions(+), 141 deletions(-) create mode 100644 changelog.d/6105.misc create mode 100644 synapse/handlers/ui_auth/__init__.py create mode 100644 synapse/handlers/ui_auth/checkers.py (limited to 'changelog.d') diff --git a/changelog.d/6105.misc b/changelog.d/6105.misc new file mode 100644 index 0000000000..2e838a35c6 --- /dev/null +++ b/changelog.d/6105.misc @@ -0,0 +1 @@ +Refactor the user-interactive auth handling. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 374372b69e..f920c2f6c1 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -21,10 +21,8 @@ import unicodedata import attr import bcrypt import pymacaroons -from canonicaljson import json from twisted.internet import defer -from twisted.web.client import PartialDownloadError import synapse.util.stringutils as stringutils from synapse.api.constants import LoginType @@ -38,7 +36,8 @@ from synapse.api.errors import ( UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.config.emailconfig import ThreepidBehaviour +from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS +from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi from synapse.types import UserID @@ -58,13 +57,12 @@ class AuthHandler(BaseHandler): hs (synapse.server.HomeServer): """ super(AuthHandler, self).__init__(hs) - self.checkers = { - LoginType.RECAPTCHA: self._check_recaptcha, - LoginType.EMAIL_IDENTITY: self._check_email_identity, - LoginType.MSISDN: self._check_msisdn, - LoginType.DUMMY: self._check_dummy_auth, - LoginType.TERMS: self._check_terms_auth, - } + + self.checkers = {} # type: dict[str, UserInteractiveAuthChecker] + for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: + inst = auth_checker_class(hs) + self.checkers[inst.AUTH_TYPE] = inst + self.bcrypt_rounds = hs.config.bcrypt_rounds # This is not a cache per se, but a store of all current sessions that @@ -292,7 +290,7 @@ class AuthHandler(BaseHandler): sess["creds"] = {} creds = sess["creds"] - result = yield self.checkers[stagetype](authdict, clientip) + result = yield self.checkers[stagetype].check_auth(authdict, clientip) if result: creds[stagetype] = result self._save_session(sess) @@ -363,7 +361,7 @@ class AuthHandler(BaseHandler): login_type = authdict["type"] checker = self.checkers.get(login_type) if checker is not None: - res = yield checker(authdict, clientip=clientip) + res = yield checker.check_auth(authdict, clientip=clientip) return res # build a v1-login-style dict out of the authdict and fall back to the @@ -376,125 +374,6 @@ class AuthHandler(BaseHandler): (canonical_id, callback) = yield self.validate_login(user_id, authdict) return canonical_id - @defer.inlineCallbacks - def _check_recaptcha(self, authdict, clientip, **kwargs): - try: - user_response = authdict["response"] - except KeyError: - # Client tried to provide captcha but didn't give the parameter: - # bad request. - raise LoginError( - 400, "Captcha response is required", errcode=Codes.CAPTCHA_NEEDED - ) - - logger.info( - "Submitting recaptcha response %s with remoteip %s", user_response, clientip - ) - - # TODO: get this from the homeserver rather than creating a new one for - # each request - try: - client = self.hs.get_simple_http_client() - resp_body = yield client.post_urlencoded_get_json( - self.hs.config.recaptcha_siteverify_api, - args={ - "secret": self.hs.config.recaptcha_private_key, - "response": user_response, - "remoteip": clientip, - }, - ) - except PartialDownloadError as pde: - # Twisted is silly - data = pde.response - resp_body = json.loads(data) - - if "success" in resp_body: - # Note that we do NOT check the hostname here: we explicitly - # intend the CAPTCHA to be presented by whatever client the - # user is using, we just care that they have completed a CAPTCHA. - logger.info( - "%s reCAPTCHA from hostname %s", - "Successful" if resp_body["success"] else "Failed", - resp_body.get("hostname"), - ) - if resp_body["success"]: - return True - raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) - - def _check_email_identity(self, authdict, **kwargs): - return self._check_threepid("email", authdict, **kwargs) - - def _check_msisdn(self, authdict, **kwargs): - return self._check_threepid("msisdn", authdict) - - def _check_dummy_auth(self, authdict, **kwargs): - return defer.succeed(True) - - def _check_terms_auth(self, authdict, **kwargs): - return defer.succeed(True) - - @defer.inlineCallbacks - def _check_threepid(self, medium, authdict, **kwargs): - if "threepid_creds" not in authdict: - raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) - - threepid_creds = authdict["threepid_creds"] - - identity_handler = self.hs.get_handlers().identity_handler - - logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - if medium == "email": - threepid = yield identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_email, threepid_creds - ) - elif medium == "msisdn": - threepid = yield identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_msisdn, threepid_creds - ) - else: - raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,)) - elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - row = yield self.store.get_threepid_validation_session( - medium, - threepid_creds["client_secret"], - sid=threepid_creds["sid"], - validated=True, - ) - - threepid = ( - { - "medium": row["medium"], - "address": row["address"], - "validated_at": row["validated_at"], - } - if row - else None - ) - - if row: - # Valid threepid returned, delete from the db - yield self.store.delete_threepid_session(threepid_creds["sid"]) - else: - raise SynapseError( - 400, "Password resets are not enabled on this homeserver" - ) - - if not threepid: - raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) - - if threepid["medium"] != medium: - raise LoginError( - 401, - "Expecting threepid of type '%s', got '%s'" - % (medium, threepid["medium"]), - errcode=Codes.UNAUTHORIZED, - ) - - threepid["threepid_creds"] = authdict["threepid_creds"] - - return threepid - def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py new file mode 100644 index 0000000000..824f37f8f8 --- /dev/null +++ b/synapse/handlers/ui_auth/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module implements user-interactive auth verification. + +TODO: move more stuff out of AuthHandler in here. + +""" + +from synapse.handlers.ui_auth.checkers import INTERACTIVE_AUTH_CHECKERS # noqa: F401 diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py new file mode 100644 index 0000000000..fd633b7b0e --- /dev/null +++ b/synapse/handlers/ui_auth/checkers.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from canonicaljson import json + +from twisted.internet import defer +from twisted.web.client import PartialDownloadError + +from synapse.api.constants import LoginType +from synapse.api.errors import Codes, LoginError, SynapseError +from synapse.config.emailconfig import ThreepidBehaviour + +logger = logging.getLogger(__name__) + + +class UserInteractiveAuthChecker: + """Abstract base class for an interactive auth checker""" + + def __init__(self, hs): + pass + + def check_auth(self, authdict, clientip): + """Given the authentication dict from the client, attempt to check this step + + Args: + authdict (dict): authentication dictionary from the client + clientip (str): The IP address of the client. + + Raises: + SynapseError if authentication failed + + Returns: + Deferred: the result of authentication (to pass back to the client?) + """ + raise NotImplementedError() + + +class DummyAuthChecker(UserInteractiveAuthChecker): + AUTH_TYPE = LoginType.DUMMY + + def check_auth(self, authdict, clientip): + return defer.succeed(True) + + +class TermsAuthChecker(UserInteractiveAuthChecker): + AUTH_TYPE = LoginType.TERMS + + def check_auth(self, authdict, clientip): + return defer.succeed(True) + + +class RecaptchaAuthChecker(UserInteractiveAuthChecker): + AUTH_TYPE = LoginType.RECAPTCHA + + def __init__(self, hs): + super().__init__(hs) + self._http_client = hs.get_simple_http_client() + self._url = hs.config.recaptcha_siteverify_api + self._secret = hs.config.recaptcha_private_key + + @defer.inlineCallbacks + def check_auth(self, authdict, clientip): + try: + user_response = authdict["response"] + except KeyError: + # Client tried to provide captcha but didn't give the parameter: + # bad request. + raise LoginError( + 400, "Captcha response is required", errcode=Codes.CAPTCHA_NEEDED + ) + + logger.info( + "Submitting recaptcha response %s with remoteip %s", user_response, clientip + ) + + # TODO: get this from the homeserver rather than creating a new one for + # each request + try: + resp_body = yield self._http_client.post_urlencoded_get_json( + self._url, + args={ + "secret": self._secret, + "response": user_response, + "remoteip": clientip, + }, + ) + except PartialDownloadError as pde: + # Twisted is silly + data = pde.response + resp_body = json.loads(data) + + if "success" in resp_body: + # Note that we do NOT check the hostname here: we explicitly + # intend the CAPTCHA to be presented by whatever client the + # user is using, we just care that they have completed a CAPTCHA. + logger.info( + "%s reCAPTCHA from hostname %s", + "Successful" if resp_body["success"] else "Failed", + resp_body.get("hostname"), + ) + if resp_body["success"]: + return True + raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) + + +class _BaseThreepidAuthChecker: + def __init__(self, hs): + self.hs = hs + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def _check_threepid(self, medium, authdict): + if "threepid_creds" not in authdict: + raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) + + threepid_creds = authdict["threepid_creds"] + + identity_handler = self.hs.get_handlers().identity_handler + + logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + if medium == "email": + threepid = yield identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_email, threepid_creds + ) + elif medium == "msisdn": + threepid = yield identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_msisdn, threepid_creds + ) + else: + raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,)) + elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + row = yield self.store.get_threepid_validation_session( + medium, + threepid_creds["client_secret"], + sid=threepid_creds["sid"], + validated=True, + ) + + threepid = ( + { + "medium": row["medium"], + "address": row["address"], + "validated_at": row["validated_at"], + } + if row + else None + ) + + if row: + # Valid threepid returned, delete from the db + yield self.store.delete_threepid_session(threepid_creds["sid"]) + else: + raise SynapseError( + 400, "Password resets are not enabled on this homeserver" + ) + + if not threepid: + raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) + + if threepid["medium"] != medium: + raise LoginError( + 401, + "Expecting threepid of type '%s', got '%s'" + % (medium, threepid["medium"]), + errcode=Codes.UNAUTHORIZED, + ) + + threepid["threepid_creds"] = authdict["threepid_creds"] + + return threepid + + +class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): + AUTH_TYPE = LoginType.EMAIL_IDENTITY + + def __init__(self, hs): + UserInteractiveAuthChecker.__init__(self, hs) + _BaseThreepidAuthChecker.__init__(self, hs) + + def check_auth(self, authdict, clientip): + return self._check_threepid("email", authdict) + + +class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): + AUTH_TYPE = LoginType.MSISDN + + def __init__(self, hs): + UserInteractiveAuthChecker.__init__(self, hs) + _BaseThreepidAuthChecker.__init__(self, hs) + + def check_auth(self, authdict, clientip): + return self._check_threepid("msisdn", authdict) + + +INTERACTIVE_AUTH_CHECKERS = [ + DummyAuthChecker, + TermsAuthChecker, + RecaptchaAuthChecker, + EmailIdentityAuthChecker, + MsisdnAuthChecker, +] +"""A list of UserInteractiveAuthChecker classes""" diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index b9ef46e8fb..b6df1396ad 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -18,11 +18,22 @@ from twisted.internet.defer import succeed import synapse.rest.admin from synapse.api.constants import LoginType +from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.rest.client.v2_alpha import auth, register from tests import unittest +class DummyRecaptchaChecker(UserInteractiveAuthChecker): + def __init__(self, hs): + super().__init__(hs) + self.recaptcha_attempts = [] + + def check_auth(self, authdict, clientip): + self.recaptcha_attempts.append((authdict, clientip)) + return succeed(True) + + class FallbackAuthTests(unittest.HomeserverTestCase): servlets = [ @@ -44,15 +55,9 @@ class FallbackAuthTests(unittest.HomeserverTestCase): return hs def prepare(self, reactor, clock, hs): + self.recaptcha_checker = DummyRecaptchaChecker(hs) auth_handler = hs.get_auth_handler() - - self.recaptcha_attempts = [] - - def _recaptcha(authdict, clientip): - self.recaptcha_attempts.append((authdict, clientip)) - return succeed(True) - - auth_handler.checkers[LoginType.RECAPTCHA] = _recaptcha + auth_handler.checkers[LoginType.RECAPTCHA] = self.recaptcha_checker @unittest.INFO def test_fallback_captcha(self): @@ -89,8 +94,9 @@ class FallbackAuthTests(unittest.HomeserverTestCase): self.assertEqual(request.code, 200) # The recaptcha handler is called with the response given - self.assertEqual(len(self.recaptcha_attempts), 1) - self.assertEqual(self.recaptcha_attempts[0][0]["response"], "a") + attempts = self.recaptcha_checker.recaptcha_attempts + self.assertEqual(len(attempts), 1) + self.assertEqual(attempts[0][0]["response"], "a") # also complete the dummy auth request, channel = self.make_request( -- cgit 1.4.1 From 5c1af6d1b8ea8bad770fe8a70d9badb28dcfb9b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Sep 2019 11:42:00 +0100 Subject: Newsfile --- changelog.d/6108.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6108.misc (limited to 'changelog.d') diff --git a/changelog.d/6108.misc b/changelog.d/6108.misc new file mode 100644 index 0000000000..6c3f9460e9 --- /dev/null +++ b/changelog.d/6108.misc @@ -0,0 +1 @@ +Remove `get_user_by_req` opentracing span and add some tags. -- cgit 1.4.1 From 990928abde4f3ccd7d43e6214abd7d36434953a9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 25 Sep 2019 12:10:26 +0100 Subject: Stop advertising unsupported flows for registration (#6107) If email or msisdn verification aren't supported, let's stop advertising them for registration. Fixes #6100. --- changelog.d/6107.bugfix | 1 + synapse/handlers/auth.py | 11 +++++++++- synapse/handlers/ui_auth/checkers.py | 26 +++++++++++++++++++++++ synapse/rest/client/v2_alpha/register.py | 32 ++++++++++++++++++++++++++--- tests/rest/client/v2_alpha/test_register.py | 29 +++++++++++++++----------- 5 files changed, 83 insertions(+), 16 deletions(-) create mode 100644 changelog.d/6107.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6107.bugfix b/changelog.d/6107.bugfix new file mode 100644 index 0000000000..d4b9516ac7 --- /dev/null +++ b/changelog.d/6107.bugfix @@ -0,0 +1 @@ +Ensure that servers which are not configured to support email address verification do not offer it in the registration flows. \ No newline at end of file diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f920c2f6c1..333eb30625 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -61,7 +61,8 @@ class AuthHandler(BaseHandler): self.checkers = {} # type: dict[str, UserInteractiveAuthChecker] for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) - self.checkers[inst.AUTH_TYPE] = inst + if inst.is_enabled(): + self.checkers[inst.AUTH_TYPE] = inst self.bcrypt_rounds = hs.config.bcrypt_rounds @@ -156,6 +157,14 @@ class AuthHandler(BaseHandler): return params + def get_enabled_auth_types(self): + """Return the enabled user-interactive authentication types + + Returns the UI-Auth types which are supported by the homeserver's current + config. + """ + return self.checkers.keys() + @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip): """ diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index fd633b7b0e..ee69223243 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -32,6 +32,13 @@ class UserInteractiveAuthChecker: def __init__(self, hs): pass + def is_enabled(self): + """Check if the configuration of the homeserver allows this checker to work + + Returns: + bool: True if this login type is enabled. + """ + def check_auth(self, authdict, clientip): """Given the authentication dict from the client, attempt to check this step @@ -51,6 +58,9 @@ class UserInteractiveAuthChecker: class DummyAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.DUMMY + def is_enabled(self): + return True + def check_auth(self, authdict, clientip): return defer.succeed(True) @@ -58,6 +68,9 @@ class DummyAuthChecker(UserInteractiveAuthChecker): class TermsAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.TERMS + def is_enabled(self): + return True + def check_auth(self, authdict, clientip): return defer.succeed(True) @@ -67,10 +80,14 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker): def __init__(self, hs): super().__init__(hs) + self._enabled = bool(hs.config.recaptcha_private_key) self._http_client = hs.get_simple_http_client() self._url = hs.config.recaptcha_siteverify_api self._secret = hs.config.recaptcha_private_key + def is_enabled(self): + return self._enabled + @defer.inlineCallbacks def check_auth(self, authdict, clientip): try: @@ -191,6 +208,12 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) + def is_enabled(self): + return self.hs.config.threepid_behaviour_email in ( + ThreepidBehaviour.REMOTE, + ThreepidBehaviour.LOCAL, + ) + def check_auth(self, authdict, clientip): return self._check_threepid("email", authdict) @@ -202,6 +225,9 @@ class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) + def is_enabled(self): + return bool(self.hs.config.account_threepid_delegate_msisdn) + def check_auth(self, authdict, clientip): return self._check_threepid("msisdn", authdict) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index e3f3d9126f..4f24a124a6 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -32,12 +32,14 @@ from synapse.api.errors import ( ThreepidValidationError, UnrecognizedRequestError, ) +from synapse.config import ConfigError from synapse.config.captcha import CaptchaConfig from synapse.config.consent_config import ConsentConfig from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.registration import RegistrationConfig from synapse.config.server import is_threepid_reserved +from synapse.handlers.auth import AuthHandler from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, @@ -375,7 +377,9 @@ class RegisterRestServlet(RestServlet): self.ratelimiter = hs.get_registration_ratelimiter() self.clock = hs.get_clock() - self._registration_flows = _calculate_registration_flows(hs.config) + self._registration_flows = _calculate_registration_flows( + hs.config, self.auth_handler + ) @interactive_auth_handler @defer.inlineCallbacks @@ -664,11 +668,13 @@ class RegisterRestServlet(RestServlet): def _calculate_registration_flows( # technically `config` has to provide *all* of these interfaces, not just one config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig], + auth_handler: AuthHandler, ) -> List[List[str]]: """Get a suitable flows list for registration Args: config: server configuration + auth_handler: authorization handler Returns: a list of supported flows """ @@ -678,10 +684,29 @@ def _calculate_registration_flows( require_msisdn = "msisdn" in config.registrations_require_3pid show_msisdn = True + show_email = True + if config.disable_msisdn_registration: show_msisdn = False require_msisdn = False + enabled_auth_types = auth_handler.get_enabled_auth_types() + if LoginType.EMAIL_IDENTITY not in enabled_auth_types: + show_email = False + if require_email: + raise ConfigError( + "Configuration requires email address at registration, but email " + "validation is not configured" + ) + + if LoginType.MSISDN not in enabled_auth_types: + show_msisdn = False + if require_msisdn: + raise ConfigError( + "Configuration requires msisdn at registration, but msisdn " + "validation is not configured" + ) + flows = [] # only support 3PIDless registration if no 3PIDs are required @@ -693,14 +718,15 @@ def _calculate_registration_flows( flows.append([LoginType.DUMMY]) # only support the email-only flow if we don't require MSISDN 3PIDs - if not require_msisdn: + if show_email and not require_msisdn: flows.append([LoginType.EMAIL_IDENTITY]) # only support the MSISDN-only flow if we don't require email 3PIDs if show_msisdn and not require_email: flows.append([LoginType.MSISDN]) - if show_msisdn: + if show_email and show_msisdn: + # always let users provide both MSISDN & email flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY]) # Prepend m.login.terms to all flows if we're requiring consent diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index bc2dc47973..dab87e5edf 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -198,16 +198,8 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] - # with the stock config, we expect all four combinations of 3pid - self.assertCountEqual( - [ - ["m.login.dummy"], - ["m.login.email.identity"], - ["m.login.msisdn"], - ["m.login.msisdn", "m.login.email.identity"], - ], - (f["stages"] for f in flows), - ) + # with the stock config, we only expect the dummy flow + self.assertCountEqual([["m.login.dummy"]], (f["stages"] for f in flows)) @unittest.override_config( { @@ -217,9 +209,13 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "template_dir": "/", "require_at_registration": True, }, + "account_threepid_delegates": { + "email": "https://id_server", + "msisdn": "https://id_server", + }, } ) - def test_advertised_flows_captcha_and_terms(self): + def test_advertised_flows_captcha_and_terms_and_3pids(self): request, channel = self.make_request(b"POST", self.url, b"{}") self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) @@ -241,7 +237,16 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) @unittest.override_config( - {"registrations_require_3pid": ["email"], "disable_msisdn_registration": True} + { + "public_baseurl": "https://test_server", + "registrations_require_3pid": ["email"], + "disable_msisdn_registration": True, + "email": { + "smtp_host": "mail_server", + "smtp_port": 2525, + "notif_from": "sender@host", + }, + } ) def test_advertised_flows_no_msisdn_email_required(self): request, channel = self.make_request(b"POST", self.url, b"{}") -- cgit 1.4.1 From 77dc7093a738ec4e172c92b7a53d58aa41bfec0a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 25 Sep 2019 12:29:35 +0100 Subject: Threepid validity checks on msisdns should not be dependent on 'threepid_behaviour_email'. (#6104) Fixes #6103 --- changelog.d/6104.bugfix | 1 + synapse/handlers/ui_auth/checkers.py | 63 +++++++++++++++++++----------------- 2 files changed, 35 insertions(+), 29 deletions(-) create mode 100644 changelog.d/6104.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6104.bugfix b/changelog.d/6104.bugfix new file mode 100644 index 0000000000..41114a66ef --- /dev/null +++ b/changelog.d/6104.bugfix @@ -0,0 +1 @@ +Threepid validity checks on msisdns should not be dependent on 'threepid_behaviour_email'. diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index ee69223243..29aa1e5aaf 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -148,42 +148,47 @@ class _BaseThreepidAuthChecker: identity_handler = self.hs.get_handlers().identity_handler logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - if medium == "email": + + # msisdns are currently always ThreepidBehaviour.REMOTE + if medium == "msisdn": + if not self.hs.config.account_threepid_delegate_msisdn: + raise SynapseError( + 400, "Phone number verification is not enabled on this homeserver" + ) + threepid = yield identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_msisdn, threepid_creds + ) + elif medium == "email": + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.account_threepid_delegate_email threepid = yield identity_handler.threepid_from_creds( self.hs.config.account_threepid_delegate_email, threepid_creds ) - elif medium == "msisdn": - threepid = yield identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_msisdn, threepid_creds + elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + threepid = None + row = yield self.store.get_threepid_validation_session( + medium, + threepid_creds["client_secret"], + sid=threepid_creds["sid"], + validated=True, ) - else: - raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,)) - elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - row = yield self.store.get_threepid_validation_session( - medium, - threepid_creds["client_secret"], - sid=threepid_creds["sid"], - validated=True, - ) - threepid = ( - { - "medium": row["medium"], - "address": row["address"], - "validated_at": row["validated_at"], - } - if row - else None - ) + if row: + threepid = { + "medium": row["medium"], + "address": row["address"], + "validated_at": row["validated_at"], + } - if row: - # Valid threepid returned, delete from the db - yield self.store.delete_threepid_session(threepid_creds["sid"]) + # Valid threepid returned, delete from the db + yield self.store.delete_threepid_session(threepid_creds["sid"]) + else: + raise SynapseError( + 400, "Email address verification is not enabled on this homeserver" + ) else: - raise SynapseError( - 400, "Password resets are not enabled on this homeserver" - ) + # this can't happen! + raise AssertionError("Unrecognized threepid medium: %s" % (medium,)) if not threepid: raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) -- cgit 1.4.1 From a4f3ca48b5250a1c2c4de8a363f69bbeb0adeefd Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 25 Sep 2019 17:27:35 +0100 Subject: Enable cleaning up extremities with dummy events by default to prevent undue build up of forward extremities. (#5884) --- changelog.d/5884.feature | 1 + synapse/config/server.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5884.feature (limited to 'changelog.d') diff --git a/changelog.d/5884.feature b/changelog.d/5884.feature new file mode 100644 index 0000000000..bfd0489392 --- /dev/null +++ b/changelog.d/5884.feature @@ -0,0 +1 @@ +Enable cleaning up extremities with dummy events by default to prevent undue build up of forward extremities. diff --git a/synapse/config/server.py b/synapse/config/server.py index 419787a89c..3a7a49bc91 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -355,10 +355,8 @@ class ServerConfig(Config): _check_resource_config(self.listeners) - # An experimental option to try and periodically clean up extremities - # by sending dummy events. self.cleanup_extremities_with_dummy_events = config.get( - "cleanup_extremities_with_dummy_events", False + "cleanup_extremities_with_dummy_events", True ) def has_tls_listener(self): -- cgit 1.4.1 From 034db2ba2115d935ce62b641b4051e477a454eac Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 26 Sep 2019 11:47:53 +0100 Subject: Fix dummy event insertion consent bug (#6053) Fixes #5905 --- changelog.d/6053.bugfix | 1 + synapse/handlers/message.py | 99 ++++++++++++++++------ synapse/storage/event_federation.py | 18 +++- tests/storage/test_cleanup_extrems.py | 147 +++++++++++++++++++++++++++++++-- tests/storage/test_event_federation.py | 40 +++++++++ 5 files changed, 266 insertions(+), 39 deletions(-) create mode 100644 changelog.d/6053.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6053.bugfix b/changelog.d/6053.bugfix new file mode 100644 index 0000000000..6311157bf6 --- /dev/null +++ b/changelog.d/6053.bugfix @@ -0,0 +1 @@ +Prevent exceptions being logged when extremity-cleanup events fail due to lack of user consent to the terms of service. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1f8272784e..0f8cce8ffe 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -222,6 +222,13 @@ class MessageHandler(object): } +# The duration (in ms) after which rooms should be removed +# `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try +# to generate a dummy event for them once more) +# +_DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000 + + class EventCreationHandler(object): def __init__(self, hs): self.hs = hs @@ -258,6 +265,13 @@ class EventCreationHandler(object): self.config.block_events_without_consent_error ) + # Rooms which should be excluded from dummy insertion. (For instance, + # those without local users who can send events into the room). + # + # map from room id to time-of-last-attempt. + # + self._rooms_to_exclude_from_dummy_event_insertion = {} # type: dict[str, int] + # we need to construct a ConsentURIBuilder here, as it checks that the necessary # config options, but *only* if we have a configuration for which we are # going to need it. @@ -888,9 +902,11 @@ class EventCreationHandler(object): """Background task to send dummy events into rooms that have a large number of extremities """ - + self._expire_rooms_to_exclude_from_dummy_event_insertion() room_ids = yield self.store.get_rooms_with_many_extremities( - min_count=10, limit=5 + min_count=10, + limit=5, + room_id_filter=self._rooms_to_exclude_from_dummy_event_insertion.keys(), ) for room_id in room_ids: @@ -904,32 +920,61 @@ class EventCreationHandler(object): members = yield self.state.get_current_users_in_room( room_id, latest_event_ids=latest_event_ids ) + dummy_event_sent = False + for user_id in members: + if not self.hs.is_mine_id(user_id): + continue + requester = create_requester(user_id) + try: + event, context = yield self.create_event( + requester, + { + "type": "org.matrix.dummy_event", + "content": {}, + "room_id": room_id, + "sender": user_id, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) - user_id = None - for member in members: - if self.hs.is_mine_id(member): - user_id = member - break - - if not user_id: - # We don't have a joined user. - # TODO: We should do something here to stop the room from - # appearing next time. - continue + event.internal_metadata.proactively_send = False - requester = create_requester(user_id) + yield self.send_nonmember_event( + requester, event, context, ratelimit=False + ) + dummy_event_sent = True + break + except ConsentNotGivenError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of consent. Will try another user" % (room_id, user_id) + ) + except AuthError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of power. Will try another user" % (room_id, user_id) + ) - event, context = yield self.create_event( - requester, - { - "type": "org.matrix.dummy_event", - "content": {}, - "room_id": room_id, - "sender": user_id, - }, - prev_events_and_hashes=prev_events_and_hashes, + if not dummy_event_sent: + # Did not find a valid user in the room, so remove from future attempts + # Exclusion is time limited, so the room will be rechecked in the future + # dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY + logger.info( + "Failed to send dummy event into room %s. Will exclude it from " + "future attempts until cache expires" % (room_id,) + ) + now = self.clock.time_msec() + self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now + + def _expire_rooms_to_exclude_from_dummy_event_insertion(self): + expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY + to_expire = set() + for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items(): + if time < expire_before: + to_expire.add(room_id) + for room_id in to_expire: + logger.debug( + "Expiring room id %s from dummy event insertion exclusion cache", + room_id, ) - - event.internal_metadata.proactively_send = False - - yield self.send_nonmember_event(requester, event, context, ratelimit=False) + del self._rooms_to_exclude_from_dummy_event_insertion[room_id] diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 4f500d893e..f5e8c39262 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import itertools import logging import random @@ -190,12 +191,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas room_id, ) - def get_rooms_with_many_extremities(self, min_count, limit): + def get_rooms_with_many_extremities(self, min_count, limit, room_id_filter): """Get the top rooms with at least N extremities. Args: min_count (int): The minimum number of extremities limit (int): The maximum number of rooms to return. + room_id_filter (iterable[str]): room_ids to exclude from the results Returns: Deferred[list]: At most `limit` room IDs that have at least @@ -203,15 +205,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas """ def _get_rooms_with_many_extremities_txn(txn): + where_clause = "1=1" + if room_id_filter: + where_clause = "room_id NOT IN (%s)" % ( + ",".join("?" for _ in room_id_filter), + ) + sql = """ SELECT room_id FROM event_forward_extremities + WHERE %s GROUP BY room_id HAVING count(*) > ? ORDER BY count(*) DESC LIMIT ? - """ + """ % ( + where_clause, + ) - txn.execute(sql, (min_count, limit)) + query_args = list(itertools.chain(room_id_filter, [min_count, limit])) + txn.execute(sql, query_args) return [room_id for room_id, in txn] return self.runInteraction( diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index e9e2d5337c..34f9c72709 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -14,7 +14,13 @@ # limitations under the License. import os.path +from unittest.mock import patch +from mock import Mock + +import synapse.rest.admin +from synapse.api.constants import EventTypes +from synapse.rest.client.v1 import login, room from synapse.storage import prepare_database from synapse.types import Requester, UserID @@ -225,6 +231,14 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): class CleanupExtremDummyEventsTestCase(HomeserverTestCase): + CONSENT_VERSION = "1" + EXTREMITIES_COUNT = 50 + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + room.register_servlets, + ] + def make_homeserver(self, reactor, clock): config = self.default_config() config["cleanup_extremities_with_dummy_events"] = True @@ -233,33 +247,148 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.room_creator = homeserver.get_room_creation_handler() + self.event_creator_handler = homeserver.get_event_creation_handler() # Create a test user and room - self.user = UserID("alice", "test") + self.user = UserID.from_string(self.register_user("user1", "password")) + self.token1 = self.login("user1", "password") self.requester = Requester(self.user, None, False, None, None) info = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] + self.event_creator = homeserver.get_event_creation_handler() + homeserver.config.user_consent_version = self.CONSENT_VERSION def test_send_dummy_event(self): - # Create a bushy graph with 50 extremities. - - event_id_start = self.create_and_send_event(self.room_id, self.user) + self._create_extremity_rich_graph() - for _ in range(50): - self.create_and_send_event( - self.room_id, self.user, prev_event_ids=[event_id_start] - ) + # Pump the reactor repeatedly so that the background updates have a + # chance to run. + self.pump(10 * 60) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(len(latest_event_ids), 50) + self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids)) + @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0) + def test_send_dummy_events_when_insufficient_power(self): + self._create_extremity_rich_graph() + # Criple power levels + self.helper.send_state( + self.room_id, + EventTypes.PowerLevels, + body={"users": {str(self.user): -1}}, + tok=self.token1, + ) # Pump the reactor repeatedly so that the background updates have a # chance to run. self.pump(10 * 60) + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + # Check that the room has not been pruned + self.assertTrue(len(latest_event_ids) > 10) + + # New user with regular levels + user2 = self.register_user("user2", "password") + token2 = self.login("user2", "password") + self.helper.join(self.room_id, user2, tok=token2) + self.pump(10 * 60) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids)) + + @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0) + def test_send_dummy_event_without_consent(self): + self._create_extremity_rich_graph() + self._enable_consent_checking() + + # Pump the reactor repeatedly so that the background updates have a + # chance to run. Attempt to add dummy event with user that has not consented + # Check that dummy event send fails. + self.pump(10 * 60) + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertTrue(len(latest_event_ids) == self.EXTREMITIES_COUNT) + + # Create new user, and add consent + user2 = self.register_user("user2", "password") + token2 = self.login("user2", "password") + self.get_success( + self.store.user_set_consent_version(user2, self.CONSENT_VERSION) + ) + self.helper.join(self.room_id, user2, tok=token2) + + # Background updates should now cause a dummy event to be added to the graph + self.pump(10 * 60) + latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids)) + + @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=250) + def test_expiry_logic(self): + """Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion() + expires old entries correctly. + """ + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ + "1" + ] = 100000 + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ + "2" + ] = 200000 + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ + "3" + ] = 300000 + self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() + # All entries within time frame + self.assertEqual( + len( + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion + ), + 3, + ) + # Oldest room to expire + self.pump(1) + self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() + self.assertEqual( + len( + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion + ), + 2, + ) + # All rooms to expire + self.pump(2) + self.assertEqual( + len( + self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion + ), + 0, + ) + + def _create_extremity_rich_graph(self): + """Helper method to create bushy graph on demand""" + + event_id_start = self.create_and_send_event(self.room_id, self.user) + + for _ in range(self.EXTREMITIES_COUNT): + self.create_and_send_event( + self.room_id, self.user, prev_event_ids=[event_id_start] + ) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(len(latest_event_ids), 50) + + def _enable_consent_checking(self): + """Helper method to enable consent checking""" + self.event_creator._block_events_without_consent_error = "No consent from user" + consent_uri_builder = Mock() + consent_uri_builder.build_user_consent_uri.return_value = "http://example.com" + self.event_creator._consent_uri_builder = consent_uri_builder diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 86c7ac350d..b58386994e 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -75,3 +75,43 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): el = r[i] depth = el[2] self.assertLessEqual(5, depth) + + @defer.inlineCallbacks + def test_get_rooms_with_many_extremities(self): + room1 = "#room1" + room2 = "#room2" + room3 = "#room3" + + def insert_event(txn, i, room_id): + event_id = "$event_%i:local" % i + txn.execute( + ( + "INSERT INTO event_forward_extremities (room_id, event_id) " + "VALUES (?, ?)" + ), + (room_id, event_id), + ) + + for i in range(0, 20): + yield self.store.runInteraction("insert", insert_event, i, room1) + yield self.store.runInteraction("insert", insert_event, i, room2) + yield self.store.runInteraction("insert", insert_event, i, room3) + + # Test simple case + r = yield self.store.get_rooms_with_many_extremities(5, 5, []) + self.assertEqual(len(r), 3) + + # Does filter work? + + r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1]) + self.assertTrue(room2 in r) + self.assertTrue(room3 in r) + self.assertEqual(len(r), 2) + + r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1, room2]) + self.assertEqual(r, [room3]) + + # Does filter and limit work? + + r = yield self.store.get_rooms_with_many_extremities(5, 1, [room1]) + self.assertTrue(r == [room2] or r == [room3]) -- cgit 1.4.1 From 1b23f991abb99c50908aca7c4ccfdea0c789c900 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 26 Sep 2019 12:30:10 +0100 Subject: Clarify upgrade notes ahead of 1.4.0 release --- UPGRADE.rst | 193 ++++++++++++++++++++++++++++++++++++++++----------- changelog.d/6027.doc | 1 + 2 files changed, 152 insertions(+), 42 deletions(-) create mode 100644 changelog.d/6027.doc (limited to 'changelog.d') diff --git a/UPGRADE.rst b/UPGRADE.rst index 4ede973a08..9562114d59 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -78,52 +78,160 @@ for example: Upgrading to v1.4.0 =================== -Config options --------------- - -**Note: Registration by email address or phone number will not work in this release unless -some config options are changed from their defaults.** - -This is due to Synapse v1.4.0 now defaulting to sending registration and password reset tokens -itself. This is for security reasons as well as putting less reliance on identity servers. -However, currently Synapse only supports sending emails, and does not have support for -phone-based password reset or account registration. If Synapse is configured to handle these on -its own, phone-based password resets and registration will be disabled. For Synapse to send -emails, the ``email`` block of the config must be filled out. If not, then password resets and -registration via email will be disabled entirely. - -This release also deprecates the ``email.trust_identity_server_for_password_resets`` option and -replaces it with the ``account_threepid_delegates`` dictionary. This option defines whether the -homeserver should delegate an external server (typically an `identity server -`_) to handle sending password reset or -registration messages via email and SMS. - -If ``email.trust_identity_server_for_password_resets`` is set to ``true``, and -``account_threepid_delegates.email`` is not set, then the first entry in -``trusted_third_party_id_servers`` will be used as the account threepid delegate for email. -This is to ensure compatibility with existing Synapse installs that set up external server -handling for these tasks before v1.4.0. If ``email.trust_identity_server_for_password_resets`` -is ``true`` and no trusted identity server domains are configured, Synapse will throw an error. +New custom templates +-------------------- -If ``email.trust_identity_server_for_password_resets`` is ``false`` or absent and a threepid -type in ``account_threepid_delegates`` is not set to a domain, then Synapse will attempt to -send password reset and registration messages for that type. +If you have configured a custom template directory with the +``email.template_dir`` option, be aware that there are new templates regarding +registration and threepid management (see below) that must be included. -Email templates ---------------- +* ``registration.html`` and ``registration.txt`` +* ``registration_success.html`` and ``registration_failure.html`` +* ``add_threepid.html`` and ``add_threepid.txt`` +* ``add_threepid_failure.html`` and ``add_threepid_success.html`` -If you have configured a custom template directory with the ``email.template_dir`` option, be -aware that there are new templates regarding registration. ``registration.html`` and -``registration.txt`` have been added and contain the content that is sent to a client upon -registering via an email address. +Synapse will expect these files to exist inside the configured template +directory, and **will fail to start** if they are absent. +To view the default templates, see `synapse/res/templates +`_. -``registration_success.html`` and ``registration_failure.html`` are also new HTML templates -that will be shown to the user when they click the link in their registration emai , either -showing them a success or failure page (assuming a redirect URL is not configured). +3pid verification changes +------------------------- + +**Note: As of this release, users will be unable to add phone numbers or email +addresses to their accounts, without changes to the Synapse configuration. This +includes adding an email address during registration.** + +It is possible for a user to associate an email address or phone number +with their account, for a number of reasons: + +* for use when logging in, as an alternative to the user id. +* in the case of email, as an alternative contact to help with account recovery. +* in the case of email, to receive notifications of missed messages. + +Before an email address or phone number can be added to a user's account, +or before such an address is used to carry out a password-reset, Synapse must +confirm the operation with the owner of the email address or phone number. +It does this by sending an email or text giving the user a link or token to confirm +receipt. This process is known as '3pid verification'. ('3pid', or 'threepid', +stands for third-party identifier, and we use it to refer to external +identifiers such as email addresses and phone numbers.) + +Previous versions of Synapse delegated the task of 3pid verification to an +identity server by default. In most cases this server is ``vector.im`` or +``matrix.org``. + +In Synapse 1.4.0, for security and privacy reasons, the homeserver will no +longer delegate this task to an identity server by default. Instead, +the server administrator will need to explicitly decide how they would like the +verification messages to be sent. + +In the medium term, the ``vector.im`` and ``matrix.org`` identity servers will +disable support for delegated 3pid verification entirely. However, in order to +ease the transition, they will retain the capability for a limited +period. Delegated email verification will be disabled on Monday 2nd December +2019 (giving roughly 2 months notice). Disabling delegated SMS verification +will follow some time after that once SMS verification support lands in +Synapse. + +Once delegated 3pid verification support has been disabled in the ``vector.im`` and +``matrix.org`` identity servers, all Synapse versions that depend on those +instances will be unable to verify email and phone numbers through them. There +are no imminent plans to remove delegated 3pid verification from Sydent +generally. (Sydent is the identity server project that backs the ``vector.im`` and +``matrix.org`` instances). -Synapse will expect these files to exist inside the configured template directory. To view the -default templates, see `synapse/res/templates -`_. +Email +~~~~~ +Following upgrade, to continue verifying email (e.g. as part of the +registration process), admins can either:- + +* Configure Synapse to use an email server. +* Run or choose an identity server which allows delegated email verification + and delegate to it. + +Configure SMTP in Synapse ++++++++++++++++++++++++++ + +To configure an SMTP server for Synapse, modify the configuration section +headed ``email``, and be sure to have at least the ``smtp_host, smtp_port`` +and ``notif_from`` fields filled out. + +You may also need to set ``smtp_user``, ``smtp_pass``, and +``require_transport_security``. + +See the `sample configuration file `_ for more details +on these settings. + +Delegate email to an identity server +++++++++++++++++++++++++++++++++++++ + +Some admins will wish to continue using email verification as part of the +registration process, but will not immediately have an appropriate SMTP server +at hand. + +To this end, we will continue to support email verification delegation via the +``vector.im`` and ``matrix.org`` identity servers for two months. Support for +delegated email verification will be disabled on Monday 2nd December. + +The ``account_threepid_delegates`` dictionary defines whether the homeserver +should delegate an external server (typically an `identity server +`_) to handle sending +confirmation messages via email and SMS. + +So to delegate email verification, in ``homeserver.yaml``, set +``account_threepid_delegates.email`` to the base URL of an identity server. For +example: + +.. code:: yaml + + account_threepid_delegates: + email: https://example.com # Delegate email sending to example.com + +Note that ``account_threepid_delegates.email`` replaces the deprecated +``email.trust_identity_server_for_password_resets``: if +``email.trust_identity_server_for_password_resets`` is set to ``true``, and +``account_threepid_delegates.email`` is not set, then the first entry in +``trusted_third_party_id_servers`` will be used as the +``account_threepid_delegate`` for email. This is to ensure compatibility with +existing Synapse installs that set up external server handling for these tasks +before v1.4.0. If ``email.trust_identity_server_for_password_resets`` is +``true`` and no trusted identity server domains are configured, Synapse will +report an error and refuse to start. + +If ``email.trust_identity_server_for_password_resets`` is ``false`` or absent +and no ``email`` delegate is configured in ``account_threepid_delegates``, +then Synapse will send email verification messages itself, using the configured +SMTP server (see above). +that type. + +Phone numbers +~~~~~~~~~~~~~ + +Synapse does not support phone-number verification itself, so the only way to +maintain the ability for users to add phone numbers to their accounts will be +by continuing to delegate phone number verification to the ``matrix.org`` and +``vector.im`` identity servers (or another identity server that supports SMS +sending). + +The ``account_threepid_delegates`` dictionary defines whether the homeserver +should delegate an external server (typically an `identity server +`_) to handle sending +confirmation messages via email and SMS. + +So to delegate phone number verification, in ``homeserver.yaml``, set +``account_threepid_delegates.msisdn`` to the base URL of an identity +server. For example: + +.. code:: yaml + + account_threepid_delegates: + msisdn: https://example.com # Delegate sms sending to example.com + +The ``matrix.org`` and ``vector.im`` identity servers will continue to support +delegated phone number verification via SMS until such time as it is possible +for admins to configure their servers to perform phone number verification +directly. More details will follow in a future release. Rolling back to v1.3.1 ---------------------- @@ -140,7 +248,8 @@ v1.3.1, subject to the following: The room statistics are essentially unused in v1.3.1 (in future versions of Synapse, they will be used to populate the room directory), so there should be no loss of functionality. However, the statistics engine will write errors - to the logs, which can be avoided by setting the following in `homeserver.yaml`: + to the logs, which can be avoided by setting the following in + `homeserver.yaml`: .. code:: yaml diff --git a/changelog.d/6027.doc b/changelog.d/6027.doc new file mode 100644 index 0000000000..f0af68f3b1 --- /dev/null +++ b/changelog.d/6027.doc @@ -0,0 +1 @@ +Clarify Synapse 1.4.0 upgrade notes. -- cgit 1.4.1 From 8b8f8c7b3c6136ea777265fff8052afed2b7031e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 26 Sep 2019 12:57:01 +0100 Subject: Explicitly log when a homeserver does not have a trusted key server configured (#6090) --- changelog.d/6090.feature | 1 + docs/sample_config.yaml | 14 ++++++++++---- synapse/config/key.py | 48 ++++++++++++++++++++++++++++++++++++++++++++---- synapse/config/server.py | 16 ++++++++-------- 4 files changed, 63 insertions(+), 16 deletions(-) create mode 100644 changelog.d/6090.feature (limited to 'changelog.d') diff --git a/changelog.d/6090.feature b/changelog.d/6090.feature new file mode 100644 index 0000000000..a6da448a1a --- /dev/null +++ b/changelog.d/6090.feature @@ -0,0 +1 @@ +Explicitly log when a homeserver does not have the 'trusted_key_servers' config field configured. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8f801daf35..254e1b17b4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1072,6 +1072,10 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # This setting supercedes an older setting named `perspectives`. The old format # is still supported for backwards-compatibility, but it is deprecated. # +# 'trusted_key_servers' defaults to matrix.org, but using it will generate a +# warning on start-up. To suppress this warning, set +# 'suppress_key_server_warning' to true. +# # Options for each entry in the list include: # # server_name: the name of the server. required. @@ -1096,11 +1100,13 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" # - server_name: "my_other_trusted_server.example.com" # -# The default configuration is: -# -#trusted_key_servers: -# - server_name: "matrix.org" +trusted_key_servers: + - server_name: "matrix.org" + +# Uncomment the following to disable the warning that is emitted when the +# trusted_key_servers include 'matrix.org'. See above. # +#suppress_key_server_warning: true # The signing keys to use when acting as a trusted key server. If not specified # defaults to the server signing key. diff --git a/synapse/config/key.py b/synapse/config/key.py index ba2199bceb..f039f96e9c 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -50,6 +50,33 @@ and you should enable 'federation_verify_certificates' in your configuration. If you are *sure* you want to do this, set 'accept_keys_insecurely' on the trusted_key_server configuration.""" +TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN = """\ +Synapse requires that a list of trusted key servers are specified in order to +provide signing keys for other servers in the federation. + +This homeserver does not have a trusted key server configured in +homeserver.yaml and will fall back to the default of 'matrix.org'. + +Trusted key servers should be long-lived and stable which makes matrix.org a +good choice for many admins, but some admins may wish to choose another. To +suppress this warning, the admin should set 'trusted_key_servers' in +homeserver.yaml to their desired key server and 'suppress_key_server_warning' +to 'true'. + +In a future release the software-defined default will be removed entirely and +the trusted key server will be defined exclusively by the value of +'trusted_key_servers'. +--------------------------------------------------------------------------------""" + +TRUSTED_KEY_SERVER_CONFIGURED_AS_M_ORG_WARN = """\ +This server is configured to use 'matrix.org' as its trusted key server via the +'trusted_key_servers' config option. 'matrix.org' is a good choice for a key +server since it is long-lived, stable and trusted. However, some admins may +wish to use another server for this purpose. + +To suppress this warning and continue using 'matrix.org', admins should set +'suppress_key_server_warning' to 'true' in homeserver.yaml. +--------------------------------------------------------------------------------""" logger = logging.getLogger(__name__) @@ -85,6 +112,7 @@ class KeyConfig(Config): config.get("key_refresh_interval", "1d") ) + suppress_key_server_warning = config.get("suppress_key_server_warning", False) key_server_signing_keys_path = config.get("key_server_signing_keys_path") if key_server_signing_keys_path: self.key_server_signing_keys = self.read_signing_keys( @@ -95,6 +123,7 @@ class KeyConfig(Config): # if neither trusted_key_servers nor perspectives are given, use the default. if "perspectives" not in config and "trusted_key_servers" not in config: + logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN) key_servers = [{"server_name": "matrix.org"}] else: key_servers = config.get("trusted_key_servers", []) @@ -108,6 +137,11 @@ class KeyConfig(Config): # merge the 'perspectives' config into the 'trusted_key_servers' config. key_servers.extend(_perspectives_to_key_servers(config)) + if not suppress_key_server_warning and "matrix.org" in ( + s["server_name"] for s in key_servers + ): + logger.warning(TRUSTED_KEY_SERVER_CONFIGURED_AS_M_ORG_WARN) + # list of TrustedKeyServer objects self.key_servers = list( _parse_key_servers(key_servers, self.federation_verify_certificates) @@ -190,6 +224,10 @@ class KeyConfig(Config): # This setting supercedes an older setting named `perspectives`. The old format # is still supported for backwards-compatibility, but it is deprecated. # + # 'trusted_key_servers' defaults to matrix.org, but using it will generate a + # warning on start-up. To suppress this warning, set + # 'suppress_key_server_warning' to true. + # # Options for each entry in the list include: # # server_name: the name of the server. required. @@ -214,11 +252,13 @@ class KeyConfig(Config): # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" # - server_name: "my_other_trusted_server.example.com" # - # The default configuration is: - # - #trusted_key_servers: - # - server_name: "matrix.org" + trusted_key_servers: + - server_name: "matrix.org" + + # Uncomment the following to disable the warning that is emitted when the + # trusted_key_servers include 'matrix.org'. See above. # + #suppress_key_server_warning: true # The signing keys to use when acting as a trusted key server. If not specified # defaults to the server signing key. diff --git a/synapse/config/server.py b/synapse/config/server.py index 9d3f1b5bfc..5ad7ee911d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -48,6 +48,13 @@ ROOM_COMPLEXITY_TOO_GREAT = ( "to join this room." ) +METRICS_PORT_WARNING = """\ +The metrics_port configuration option is deprecated in Synapse 0.31 in favour of +a listener. Please see +https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md +on how to configure the new listener. +--------------------------------------------------------------------------------""" + class ServerConfig(Config): def read_config(self, config, **kwargs): @@ -341,14 +348,7 @@ class ServerConfig(Config): metrics_port = config.get("metrics_port") if metrics_port: - logger.warn( - ( - "The metrics_port configuration option is deprecated in Synapse 0.31 " - "in favour of a listener. Please see " - "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md" - " on how to configure the new listener." - ) - ) + logger.warning(METRICS_PORT_WARNING) self.listeners.append( { -- cgit 1.4.1 From 3fbca80a8da753e07dcf6c9539978c45c06cd1e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Sep 2019 12:34:35 +0100 Subject: changelog --- CHANGES.md | 156 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/5633.bugfix | 1 - changelog.d/5680.misc | 1 - changelog.d/5771.feature | 1 - changelog.d/5776.misc | 1 - changelog.d/5835.feature | 1 - changelog.d/5844.misc | 1 - changelog.d/5845.feature | 1 - changelog.d/5849.doc | 1 - changelog.d/5850.feature | 1 - changelog.d/5852.feature | 1 - changelog.d/5853.feature | 1 - changelog.d/5855.misc | 1 - changelog.d/5856.feature | 1 - changelog.d/5857.bugfix | 1 - changelog.d/5859.feature | 1 - changelog.d/5860.misc | 1 - changelog.d/5863.bugfix | 1 - changelog.d/5864.feature | 1 - changelog.d/5868.feature | 1 - changelog.d/5875.misc | 1 - changelog.d/5876.feature | 1 - changelog.d/5877.removal | 1 - changelog.d/5878.feature | 1 - changelog.d/5884.feature | 1 - changelog.d/5885.bugfix | 1 - changelog.d/5886.misc | 1 - changelog.d/5892.misc | 1 - changelog.d/5893.misc | 1 - changelog.d/5894.misc | 1 - changelog.d/5895.feature | 1 - changelog.d/5896.misc | 1 - changelog.d/5897.feature | 1 - changelog.d/5900.feature | 1 - changelog.d/5902.feature | 1 - changelog.d/5904.feature | 1 - changelog.d/5906.feature | 1 - changelog.d/5909.misc | 1 - changelog.d/5911.misc | 1 - changelog.d/5914.feature | 1 - changelog.d/5915.bugfix | 1 - changelog.d/5920.bugfix | 1 - changelog.d/5922.misc | 1 - changelog.d/5926.misc | 1 - changelog.d/5931.misc | 1 - changelog.d/5934.feature | 1 - changelog.d/5938.misc | 1 - changelog.d/5940.feature | 1 - changelog.d/5943.misc | 1 - changelog.d/5953.misc | 1 - changelog.d/5962.misc | 1 - changelog.d/5963.misc | 1 - changelog.d/5964.feature | 1 - changelog.d/5966.bugfix | 1 - changelog.d/5967.bugfix | 1 - changelog.d/5969.feature | 1 - changelog.d/5970.docker | 1 - changelog.d/5971.bugfix | 1 - changelog.d/5972.misc | 1 - changelog.d/5974.feature | 1 - changelog.d/5975.misc | 1 - changelog.d/5979.feature | 1 - changelog.d/5980.feature | 1 - changelog.d/5981.feature | 1 - changelog.d/5982.bugfix | 1 - changelog.d/5983.feature | 1 - changelog.d/5984.bugfix | 1 - changelog.d/5985.feature | 1 - changelog.d/5986.feature | 1 - changelog.d/5988.bugfix | 1 - changelog.d/5989.misc | 1 - changelog.d/5991.bugfix | 1 - changelog.d/5992.feature | 1 - changelog.d/5993.feature | 1 - changelog.d/5994.feature | 1 - changelog.d/5995.bugfix | 1 - changelog.d/5996.bugfix | 1 - changelog.d/5998.bugfix | 1 - changelog.d/6000.feature | 1 - changelog.d/6003.misc | 1 - changelog.d/6004.bugfix | 1 - changelog.d/6005.feature | 1 - changelog.d/6009.misc | 1 - changelog.d/6010.misc | 1 - changelog.d/6011.feature | 1 - changelog.d/6012.feature | 1 - changelog.d/6013.misc | 1 - changelog.d/6015.feature | 1 - changelog.d/6016.misc | 1 - changelog.d/6017.misc | 1 - changelog.d/6020.bugfix | 1 - changelog.d/6023.misc | 1 - changelog.d/6024.bugfix | 1 - changelog.d/6025.bugfix | 1 - changelog.d/6026.feature | 1 - changelog.d/6027.doc | 1 - changelog.d/6028.feature | 1 - changelog.d/6029.bugfix | 1 - changelog.d/6032.misc | 1 - changelog.d/6037.feature | 1 - changelog.d/6042.feature | 1 - changelog.d/6043.feature | 1 - changelog.d/6044.feature | 1 - changelog.d/6047.misc | 2 - changelog.d/6049.doc | 1 - changelog.d/6050.doc | 1 - changelog.d/6053.bugfix | 1 - changelog.d/6056.bugfix | 1 - changelog.d/6058.docker | 1 - changelog.d/6059.bugfix | 1 - changelog.d/6062.bugfix | 1 - changelog.d/6063.bugfix | 1 - changelog.d/6064.misc | 1 - changelog.d/6067.feature | 1 - changelog.d/6069.bugfix | 1 - changelog.d/6072.misc | 1 - changelog.d/6073.feature | 1 - changelog.d/6074.feature | 1 - changelog.d/6075.misc | 1 - changelog.d/6078.feature | 1 - changelog.d/6079.feature | 1 - changelog.d/6082.feature | 1 - changelog.d/6089.misc | 1 - changelog.d/6090.feature | 1 - changelog.d/6092.bugfix | 1 - changelog.d/6097.bugfix | 1 - changelog.d/6098.feature | 1 - changelog.d/6099.misc | 1 - changelog.d/6104.bugfix | 1 - changelog.d/6105.misc | 1 - changelog.d/6106.misc | 1 - changelog.d/6107.bugfix | 1 - 132 files changed, 156 insertions(+), 132 deletions(-) delete mode 100644 changelog.d/5633.bugfix delete mode 100644 changelog.d/5680.misc delete mode 100644 changelog.d/5771.feature delete mode 100644 changelog.d/5776.misc delete mode 100644 changelog.d/5835.feature delete mode 100644 changelog.d/5844.misc delete mode 100644 changelog.d/5845.feature delete mode 100644 changelog.d/5849.doc delete mode 100644 changelog.d/5850.feature delete mode 100644 changelog.d/5852.feature delete mode 100644 changelog.d/5853.feature delete mode 100644 changelog.d/5855.misc delete mode 100644 changelog.d/5856.feature delete mode 100644 changelog.d/5857.bugfix delete mode 100644 changelog.d/5859.feature delete mode 100644 changelog.d/5860.misc delete mode 100644 changelog.d/5863.bugfix delete mode 100644 changelog.d/5864.feature delete mode 100644 changelog.d/5868.feature delete mode 100644 changelog.d/5875.misc delete mode 100644 changelog.d/5876.feature delete mode 100644 changelog.d/5877.removal delete mode 100644 changelog.d/5878.feature delete mode 100644 changelog.d/5884.feature delete mode 100644 changelog.d/5885.bugfix delete mode 100644 changelog.d/5886.misc delete mode 100644 changelog.d/5892.misc delete mode 100644 changelog.d/5893.misc delete mode 100644 changelog.d/5894.misc delete mode 100644 changelog.d/5895.feature delete mode 100644 changelog.d/5896.misc delete mode 100644 changelog.d/5897.feature delete mode 100644 changelog.d/5900.feature delete mode 100644 changelog.d/5902.feature delete mode 100644 changelog.d/5904.feature delete mode 100644 changelog.d/5906.feature delete mode 100644 changelog.d/5909.misc delete mode 100644 changelog.d/5911.misc delete mode 100644 changelog.d/5914.feature delete mode 100644 changelog.d/5915.bugfix delete mode 100644 changelog.d/5920.bugfix delete mode 100644 changelog.d/5922.misc delete mode 100644 changelog.d/5926.misc delete mode 100644 changelog.d/5931.misc delete mode 100644 changelog.d/5934.feature delete mode 100644 changelog.d/5938.misc delete mode 100644 changelog.d/5940.feature delete mode 100644 changelog.d/5943.misc delete mode 100644 changelog.d/5953.misc delete mode 100644 changelog.d/5962.misc delete mode 100644 changelog.d/5963.misc delete mode 100644 changelog.d/5964.feature delete mode 100644 changelog.d/5966.bugfix delete mode 100644 changelog.d/5967.bugfix delete mode 100644 changelog.d/5969.feature delete mode 100644 changelog.d/5970.docker delete mode 100644 changelog.d/5971.bugfix delete mode 100644 changelog.d/5972.misc delete mode 100644 changelog.d/5974.feature delete mode 100644 changelog.d/5975.misc delete mode 100644 changelog.d/5979.feature delete mode 100644 changelog.d/5980.feature delete mode 100644 changelog.d/5981.feature delete mode 100644 changelog.d/5982.bugfix delete mode 100644 changelog.d/5983.feature delete mode 100644 changelog.d/5984.bugfix delete mode 100644 changelog.d/5985.feature delete mode 100644 changelog.d/5986.feature delete mode 100644 changelog.d/5988.bugfix delete mode 100644 changelog.d/5989.misc delete mode 100644 changelog.d/5991.bugfix delete mode 100644 changelog.d/5992.feature delete mode 100644 changelog.d/5993.feature delete mode 100644 changelog.d/5994.feature delete mode 100644 changelog.d/5995.bugfix delete mode 100644 changelog.d/5996.bugfix delete mode 100644 changelog.d/5998.bugfix delete mode 100644 changelog.d/6000.feature delete mode 100644 changelog.d/6003.misc delete mode 100644 changelog.d/6004.bugfix delete mode 100644 changelog.d/6005.feature delete mode 100644 changelog.d/6009.misc delete mode 100644 changelog.d/6010.misc delete mode 100644 changelog.d/6011.feature delete mode 100644 changelog.d/6012.feature delete mode 100644 changelog.d/6013.misc delete mode 100644 changelog.d/6015.feature delete mode 100644 changelog.d/6016.misc delete mode 100644 changelog.d/6017.misc delete mode 100644 changelog.d/6020.bugfix delete mode 100644 changelog.d/6023.misc delete mode 100644 changelog.d/6024.bugfix delete mode 100644 changelog.d/6025.bugfix delete mode 100644 changelog.d/6026.feature delete mode 100644 changelog.d/6027.doc delete mode 100644 changelog.d/6028.feature delete mode 100644 changelog.d/6029.bugfix delete mode 100644 changelog.d/6032.misc delete mode 100644 changelog.d/6037.feature delete mode 100644 changelog.d/6042.feature delete mode 100644 changelog.d/6043.feature delete mode 100644 changelog.d/6044.feature delete mode 100644 changelog.d/6047.misc delete mode 100644 changelog.d/6049.doc delete mode 100644 changelog.d/6050.doc delete mode 100644 changelog.d/6053.bugfix delete mode 100644 changelog.d/6056.bugfix delete mode 100644 changelog.d/6058.docker delete mode 100644 changelog.d/6059.bugfix delete mode 100644 changelog.d/6062.bugfix delete mode 100644 changelog.d/6063.bugfix delete mode 100644 changelog.d/6064.misc delete mode 100644 changelog.d/6067.feature delete mode 100644 changelog.d/6069.bugfix delete mode 100644 changelog.d/6072.misc delete mode 100644 changelog.d/6073.feature delete mode 100644 changelog.d/6074.feature delete mode 100644 changelog.d/6075.misc delete mode 100644 changelog.d/6078.feature delete mode 100644 changelog.d/6079.feature delete mode 100644 changelog.d/6082.feature delete mode 100644 changelog.d/6089.misc delete mode 100644 changelog.d/6090.feature delete mode 100644 changelog.d/6092.bugfix delete mode 100644 changelog.d/6097.bugfix delete mode 100644 changelog.d/6098.feature delete mode 100644 changelog.d/6099.misc delete mode 100644 changelog.d/6104.bugfix delete mode 100644 changelog.d/6105.misc delete mode 100644 changelog.d/6106.misc delete mode 100644 changelog.d/6107.bugfix (limited to 'changelog.d') diff --git a/CHANGES.md b/CHANGES.md index f25c7d0c1a..9f610e4c12 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,159 @@ +Synapse 1.4.0rc1 (2019-09-26) +============================= + +Note that this release includes significant changes around 3pid +verification. Administrators are reminded to review the [upgrade notes](UPGRADE.rst##upgrading-to-v140). + +Features +-------- + +- Changes to 3pid verification: + - Add the ability to send registration emails from the homeserver rather than delegating to an identity server. ([\#5835](https://github.com/matrix-org/synapse/issues/5835), [\#5940](https://github.com/matrix-org/synapse/issues/5940), [\#5993](https://github.com/matrix-org/synapse/issues/5993), [\#5994](https://github.com/matrix-org/synapse/issues/5994), [\#5868](https://github.com/matrix-org/synapse/issues/5868)) + - Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`. ([\#5876](https://github.com/matrix-org/synapse/issues/5876), [\#5969](https://github.com/matrix-org/synapse/issues/5969), [\#6028](https://github.com/matrix-org/synapse/issues/6028)) + - Switch to using the v2 Identity Service `/lookup` API where available, with fallback to v1. (Implements [MSC2134](https://github.com/matrix-org/matrix-doc/pull/2134) plus id_access_token authentication for v2 Identity Service APIs from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140)). ([\#5897](https://github.com/matrix-org/synapse/issues/5897)) + - Remove `bind_email` and `bind_msisdn` parameters from `/register` ala [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140). ([\#5964](https://github.com/matrix-org/synapse/issues/5964)) + - Add `m.id_access_token` to `unstable_features` in `/versions` as per [MSC2264](https://github.com/matrix-org/matrix-doc/pull/2264). ([\#5974](https://github.com/matrix-org/synapse/issues/5974)) + - Use the v2 Identity Service API for 3PID invites. ([\#5979](https://github.com/matrix-org/synapse/issues/5979)) + - Add `POST /_matrix/client/unstable/account/3pid/unbind` endpoint from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140) for unbinding a 3PID from an identity server without removing it from the homeserver user account. ([\#5980](https://github.com/matrix-org/synapse/issues/5980), [\#6062](https://github.com/matrix-org/synapse/issues/6062)) +) + - Use `account_threepid_delegate.email` and `account_threepid_delegate.msisdn` for validating threepid sessions. ([\#6011](https://github.com/matrix-org/synapse/issues/6011)) + - Allow homeserver to handle or delegate email validation when adding an email to a user's account. ([\#6042](https://github.com/matrix-org/synapse/issues/6042)) + - Implement new Client Server API endpoints `/account/3pid/add` and `/account/3pid/bind` as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290). ([\#6043](https://github.com/matrix-org/synapse/issues/6043)) + - Add an unstable feature flag for separate add/bind 3pid APIs. ([\#6044](https://github.com/matrix-org/synapse/issues/6044)) + - Remove `bind` parameter from Client Server POST `/account` endpoint as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290/). ([\#6067](https://github.com/matrix-org/synapse/issues/6067)) + - Add `POST /add_threepid/msisdn/submit_token` endpoint for proxying submitToken on an account_threepid_handler. ([\#6078](https://github.com/matrix-org/synapse/issues/6078)) + - Add `submit_url` response parameter to `*/msisdn/requestToken` endpoints. ([\#6079](https://github.com/matrix-org/synapse/issues/6079)) + - Add `m.require_identity_server` flag to /version's unstable_features. ([\#5972](https://github.com/matrix-org/synapse/issues/5972)) +- Enhancements to OpenTracing support: + - Make OpenTracing work in worker mode. ([\#5771](https://github.com/matrix-org/synapse/issues/5771)) + - Pass OpenTracing contexts between servers when transmitting EDUs. ([\#5852](https://github.com/matrix-org/synapse/issues/5852)) + - OpenTracing for device list updates. ([\#5853](https://github.com/matrix-org/synapse/issues/5853)) + - Add a tag recording a request's authenticated entity and corresponding servlet in OpenTracing. ([\#5856](https://github.com/matrix-org/synapse/issues/5856)) + - Add minimum OpenTracing for client servlets. ([\#5983](https://github.com/matrix-org/synapse/issues/5983)) + - Check at setup that OpenTracing is installed if it's enabled in the config. ([\#5985](https://github.com/matrix-org/synapse/issues/5985)) + - Trace replication send times. ([\#5986](https://github.com/matrix-org/synapse/issues/5986)) + - Include missing OpenTracing contexts in outbout replication requests. ([\#5982](https://github.com/matrix-org/synapse/issues/5982)) + - Fix sending of EDUs when OpenTracing is enabled with an empty whitelist. ([\#5984](https://github.com/matrix-org/synapse/issues/5984)) + - Fix invalid references to None while OpenTracing if the log context slips. ([\#5988](https://github.com/matrix-org/synapse/issues/5988), [\#5991](https://github.com/matrix-org/synapse/issues/5991)) + - OpenTracing for room and e2e keys. ([\#5855](https://github.com/matrix-org/synapse/issues/5855)) + - Add OpenTracing span over HTTP push processing. ([\#6003](https://github.com/matrix-org/synapse/issues/6003)) +- Add an admin API to purge old rooms from the database. ([\#5845](https://github.com/matrix-org/synapse/issues/5845)) +- Retry well-known lookups if we have recently seen a valid well-known record for the server. ([\#5850](https://github.com/matrix-org/synapse/issues/5850)) +- Add support for filtered room-directory search requests over federation ([MSC2197](https://github.com/matrix-org/matrix-doc/pull/2197), in order to allow upcoming room directory query performance improvements. ([\#5859](https://github.com/matrix-org/synapse/issues/5859)) +- Correctly retry all hosts returned from SRV when we fail to connect. ([\#5864](https://github.com/matrix-org/synapse/issues/5864)) +- Add admin API endpoint for setting whether or not a user is a server administrator. ([\#5878](https://github.com/matrix-org/synapse/issues/5878)) +- Enable cleaning up extremities with dummy events by default to prevent undue build up of forward extremities. ([\#5884](https://github.com/matrix-org/synapse/issues/5884)) +- Add config option to sign remote key query responses with a separate key. ([\#5895](https://github.com/matrix-org/synapse/issues/5895)) +- Add support for config templating. ([\#5900](https://github.com/matrix-org/synapse/issues/5900)) +- Users with the type of "support" or "bot" are no longer required to consent. ([\#5902](https://github.com/matrix-org/synapse/issues/5902)) +- Let synctl accept a directory of config files. ([\#5904](https://github.com/matrix-org/synapse/issues/5904)) +- Increase max display name size to 256. ([\#5906](https://github.com/matrix-org/synapse/issues/5906)) +- Add admin API endpoint for getting whether or not a user is a server administrator. ([\#5914](https://github.com/matrix-org/synapse/issues/5914)) +- Redact events in the database that have been redacted for a month. ([\#5934](https://github.com/matrix-org/synapse/issues/5934)) +- New prometheus metrics: + - `synapse_federation_known_servers`: represents the total number of servers your server knows about (i.e. is in rooms with), including itself. Enable by setting `metrics_flags.known_servers` to True in the configuration.([\#5981](https://github.com/matrix-org/synapse/issues/5981)) + - `synapse_build_info`: exposes the Python version, OS version, and Synapse version of the running server. ([\#6005](https://github.com/matrix-org/synapse/issues/6005)) +- Give appropriate exit codes when synctl fails. ([\#5992](https://github.com/matrix-org/synapse/issues/5992)) +- Apply the federation blacklist to requests to identity servers. ([\#6000](https://github.com/matrix-org/synapse/issues/6000)) +- Add `report_stats_endpoint` option to configure where stats are reported to, if enabled. Contributed by @Sorunome. ([\#6012](https://github.com/matrix-org/synapse/issues/6012)) +- Add config option to increase ratelimits for room admins redacting messages. ([\#6015](https://github.com/matrix-org/synapse/issues/6015)) +- Stop sending federation transactions to servers which have been down for a long time. ([\#6026](https://github.com/matrix-org/synapse/issues/6026)) +- Make the process for mapping SAML2 users to matrix IDs more flexible. ([\#6037](https://github.com/matrix-org/synapse/issues/6037)) +- Return a clearer error message when a timeout occurs when attempting to contact an identity server. ([\#6073](https://github.com/matrix-org/synapse/issues/6073)) +- Prevent password reset's submit_token endpoint from accepting trailing slashes. ([\#6074](https://github.com/matrix-org/synapse/issues/6074)) +- Return 403 on `/register/available` if registration has been disabled. ([\#6082](https://github.com/matrix-org/synapse/issues/6082)) +- Explicitly log when a homeserver does not have the `trusted_key_servers` config field configured. ([\#6090](https://github.com/matrix-org/synapse/issues/6090)) +- Add support for pruning old rows in `user_ips` table. ([\#6098](https://github.com/matrix-org/synapse/issues/6098)) + +Bugfixes +-------- + +- Don't create broken room when `power_level_content_override.users` does not contain `creator_id`. ([\#5633](https://github.com/matrix-org/synapse/issues/5633)) +- Fix database index so that different backup versions can have the same sessions. ([\#5857](https://github.com/matrix-org/synapse/issues/5857)) +- Fix Synapse looking for config options `password_reset_failure_template` and `password_reset_success_template`, when they are actually `password_reset_template_failure_html`, `password_reset_template_success_html`. ([\#5863](https://github.com/matrix-org/synapse/issues/5863)) +- Fix stack overflow when recovering an appservice which had an outage. ([\#5885](https://github.com/matrix-org/synapse/issues/5885)) +- Fix error message which referred to `public_base_url` instead of `public_baseurl`. Thanks to @aaronraimist for the fix! ([\#5909](https://github.com/matrix-org/synapse/issues/5909)) +- Fix 404 for thumbnail download when `dynamic_thumbnails` is `false` and the thumbnail was dynamically generated. Fix reported by rkfg. ([\#5915](https://github.com/matrix-org/synapse/issues/5915)) +- Fix a cache-invalidation bug for worker-based deployments. ([\#5920](https://github.com/matrix-org/synapse/issues/5920)) +- Fix admin API for listing media in a room not being available with an external media repo. ([\#5966](https://github.com/matrix-org/synapse/issues/5966)) +- Fix list media admin API always returning an error. ([\#5967](https://github.com/matrix-org/synapse/issues/5967)) +- Fix room and user stats tracking. ([\#5971](https://github.com/matrix-org/synapse/issues/5971), [\#5998](https://github.com/matrix-org/synapse/issues/5998), [\#6029](https://github.com/matrix-org/synapse/issues/6029)) +- Return a `M_MISSING_PARAM` if `sid` is not provided to `/account/3pid`. ([\#5995](https://github.com/matrix-org/synapse/issues/5995)) +- `federation_certificate_verification_whitelist` now will not cause `TypeErrors` to be raised (a regression in 1.3). Additionally, it now supports internationalised domain names in their non-canonical representation. ([\#5996](https://github.com/matrix-org/synapse/issues/5996)) +- Only count real users when checking for auto-creation of auto-join room. ([\#6004](https://github.com/matrix-org/synapse/issues/6004)) +- Ensure support users can be registered even if MAU limit is reached. ([\#6020](https://github.com/matrix-org/synapse/issues/6020)) +- Fix bug where login error was shown incorrectly on SSO fallback login. ([\#6024](https://github.com/matrix-org/synapse/issues/6024)) +- Fix bug in calculating the federation retry backoff period. ([\#6025](https://github.com/matrix-org/synapse/issues/6025)) +- Prevent exceptions being logged when extremity-cleanup events fail due to lack of user consent to the terms of service. ([\#6053](https://github.com/matrix-org/synapse/issues/6053)) +- Remove POST method from password-reset `submit_token` endpoint until we implement `submit_url` functionality. ([\#6056](https://github.com/matrix-org/synapse/issues/6056)) +- Fix logcontext spam on non-Linux platforms. ([\#6059](https://github.com/matrix-org/synapse/issues/6059)) +- Ensure query parameters in email validation links are URL-encoded. ([\#6063](https://github.com/matrix-org/synapse/issues/6063)) +- Fix a bug which caused SAML attribute maps to be overridden by defaults. ([\#6069](https://github.com/matrix-org/synapse/issues/6069)) +- Fix the logged number of updated items for the users_set_deactivated_flag background update. ([\#6092](https://github.com/matrix-org/synapse/issues/6092)) +- Add sid to `next_link` for email validation. ([\#6097](https://github.com/matrix-org/synapse/issues/6097)) +- Threepid validity checks on msisdns should not be dependent on `threepid_behaviour_email`. ([\#6104](https://github.com/matrix-org/synapse/issues/6104)) +- Ensure that servers which are not configured to support email address verification do not offer it in the registration flows. ([\#6107](https://github.com/matrix-org/synapse/issues/6107)) + + +Updates to the Docker image +--------------------------- + +- Avoid changing UID/GID if they are already correct. ([\#5970](https://github.com/matrix-org/synapse/issues/5970)) +- Provide SYNAPSE_WORKER envvar to specify python module. ([\#6058](https://github.com/matrix-org/synapse/issues/6058)) + + +Improved Documentation +---------------------- + +- Convert documentation to markdown (from rst) ([\#5849](https://github.com/matrix-org/synapse/issues/5849)) +- Update `INSTALL.md` to say that Python 2 is no longer supported. ([\#5953](https://github.com/matrix-org/synapse/issues/5953)) +- Add developer documentation for using SAML2. ([\#6032](https://github.com/matrix-org/synapse/issues/6032)) +- Add some notes on rolling back to v1.3.1. ([\#6049](https://github.com/matrix-org/synapse/issues/6049)) +- Update the upgrade notes. ([\#6050](https://github.com/matrix-org/synapse/issues/6050)) + + +Deprecations and Removals +------------------------- + +- Remove shared-secret registration from `/_matrix/client/r0/register` endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5877](https://github.com/matrix-org/synapse/issues/5877)) +- Deprecate the `trusted_third_party_id_servers` option. ([\#5875](https://github.com/matrix-org/synapse/issues/5875)) + + +Internal Changes +---------------- + +- Lay the groundwork for structured logging output. ([\#5680](https://github.com/matrix-org/synapse/issues/5680)) +- Retry well-known lookup before the cache expires, giving a grace period where the remote well-known can be down but we still use the old result. ([\#5844](https://github.com/matrix-org/synapse/issues/5844)) +- Remove log line for debugging issue #5407. ([\#5860](https://github.com/matrix-org/synapse/issues/5860)) +- Refactor the Appservice scheduler code. ([\#5886](https://github.com/matrix-org/synapse/issues/5886)) +- Compatibility with v2 Identity Service APIs other than /lookup. ([\#5892](https://github.com/matrix-org/synapse/issues/5892), [\#6013](https://github.com/matrix-org/synapse/issues/6013)) +- Stop populating some unused tables. ([\#5893](https://github.com/matrix-org/synapse/issues/5893), [\#6047](https://github.com/matrix-org/synapse/issues/6047)) +- Add missing index on users_in_public_rooms to improve the performance of directory queries. ([\#5894](https://github.com/matrix-org/synapse/issues/5894)) +- Improve the logging when we have an error when fetching signing keys. ([\#5896](https://github.com/matrix-org/synapse/issues/5896)) +- Add support for database engine-specific schema deltas, based on file extension. ([\#5911](https://github.com/matrix-org/synapse/issues/5911)) +- Update Buildkite pipeline to use plugins instead of buildkite-agent commands. ([\#5922](https://github.com/matrix-org/synapse/issues/5922)) +- Add link in sample config to the logging config schema. ([\#5926](https://github.com/matrix-org/synapse/issues/5926)) +- Remove unnecessary parentheses in return statements. ([\#5931](https://github.com/matrix-org/synapse/issues/5931)) +- Remove unused `jenkins/prepare_sytest.sh` file. ([\#5938](https://github.com/matrix-org/synapse/issues/5938)) +- Move Buildkite pipeline config to the pipelines repo. ([\#5943](https://github.com/matrix-org/synapse/issues/5943)) +- Remove unnecessary return statements in the codebase which were the result of a regex run. ([\#5962](https://github.com/matrix-org/synapse/issues/5962)) +- Remove left-over methods from v1 registration API. ([\#5963](https://github.com/matrix-org/synapse/issues/5963)) +- Cleanup event auth type initialisation. ([\#5975](https://github.com/matrix-org/synapse/issues/5975)) +- Clean up dependency checking at setup. ([\#5989](https://github.com/matrix-org/synapse/issues/5989)) +- Update OpenTracing docs to use the unified `trace` method. ([\#5776](https://github.com/matrix-org/synapse/issues/5776)) +- Small refactor of function arguments and docstrings in` RoomMemberHandler`. ([\#6009](https://github.com/matrix-org/synapse/issues/6009)) +- Remove unused `origin` argument on `FederationHandler.add_display_name_to_third_party_invite`. ([\#6010](https://github.com/matrix-org/synapse/issues/6010)) +- Add a `failure_ts` column to the `destinations` database table. ([\#6016](https://github.com/matrix-org/synapse/issues/6016), [\#6072](https://github.com/matrix-org/synapse/issues/6072)) +- Clean up some code in the retry logic. ([\#6017](https://github.com/matrix-org/synapse/issues/6017)) +- Fix the structured logging tests stomping on the global log configuration for subsequent tests. ([\#6023](https://github.com/matrix-org/synapse/issues/6023)) +- Clean up the sample config for SAML authentication. ([\#6064](https://github.com/matrix-org/synapse/issues/6064)) +- Change mailer logging to reflect Synapse doesn't just do chat notifications by email now. ([\#6075](https://github.com/matrix-org/synapse/issues/6075)) +- Move last-seen info into devices table. ([\#6089](https://github.com/matrix-org/synapse/issues/6089)) +- Remove unused parameter to `get_user_id_by_threepid`. ([\#6099](https://github.com/matrix-org/synapse/issues/6099)) +- Refactor the user-interactive auth handling. ([\#6105](https://github.com/matrix-org/synapse/issues/6105)) +- Refactor code for calculating registration flows. ([\#6106](https://github.com/matrix-org/synapse/issues/6106)) + + Synapse 1.3.1 (2019-08-17) ========================== diff --git a/changelog.d/5633.bugfix b/changelog.d/5633.bugfix deleted file mode 100644 index b2ff803b9d..0000000000 --- a/changelog.d/5633.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't create broken room when power_level_content_override.users does not contain creator_id. \ No newline at end of file diff --git a/changelog.d/5680.misc b/changelog.d/5680.misc deleted file mode 100644 index 46a403a188..0000000000 --- a/changelog.d/5680.misc +++ /dev/null @@ -1 +0,0 @@ -Lay the groundwork for structured logging output. diff --git a/changelog.d/5771.feature b/changelog.d/5771.feature deleted file mode 100644 index f2f4de1fdd..0000000000 --- a/changelog.d/5771.feature +++ /dev/null @@ -1 +0,0 @@ -Make Opentracing work in worker mode. diff --git a/changelog.d/5776.misc b/changelog.d/5776.misc deleted file mode 100644 index 1fb1b9c152..0000000000 --- a/changelog.d/5776.misc +++ /dev/null @@ -1 +0,0 @@ -Update opentracing docs to use the unified `trace` method. diff --git a/changelog.d/5835.feature b/changelog.d/5835.feature deleted file mode 100644 index 3e8bf5068d..0000000000 --- a/changelog.d/5835.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send registration emails from the homeserver rather than delegating to an identity server. diff --git a/changelog.d/5844.misc b/changelog.d/5844.misc deleted file mode 100644 index a0826af0d2..0000000000 --- a/changelog.d/5844.misc +++ /dev/null @@ -1 +0,0 @@ -Retry well-known lookup before the cache expires, giving a grace period where the remote well-known can be down but we still use the old result. diff --git a/changelog.d/5845.feature b/changelog.d/5845.feature deleted file mode 100644 index 7b0dc9a95e..0000000000 --- a/changelog.d/5845.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API to purge old rooms from the database. diff --git a/changelog.d/5849.doc b/changelog.d/5849.doc deleted file mode 100644 index fbe62e8633..0000000000 --- a/changelog.d/5849.doc +++ /dev/null @@ -1 +0,0 @@ -Convert documentation to markdown (from rst) diff --git a/changelog.d/5850.feature b/changelog.d/5850.feature deleted file mode 100644 index b565929a54..0000000000 --- a/changelog.d/5850.feature +++ /dev/null @@ -1 +0,0 @@ -Add retry to well-known lookups if we have recently seen a valid well-known record for the server. diff --git a/changelog.d/5852.feature b/changelog.d/5852.feature deleted file mode 100644 index 4a0fc6c542..0000000000 --- a/changelog.d/5852.feature +++ /dev/null @@ -1 +0,0 @@ -Pass opentracing contexts between servers when transmitting EDUs. diff --git a/changelog.d/5853.feature b/changelog.d/5853.feature deleted file mode 100644 index 80a04ae2ee..0000000000 --- a/changelog.d/5853.feature +++ /dev/null @@ -1 +0,0 @@ -Opentracing for device list updates. diff --git a/changelog.d/5855.misc b/changelog.d/5855.misc deleted file mode 100644 index 32db7fbe37..0000000000 --- a/changelog.d/5855.misc +++ /dev/null @@ -1 +0,0 @@ -Opentracing for room and e2e keys. diff --git a/changelog.d/5856.feature b/changelog.d/5856.feature deleted file mode 100644 index f4310b9244..0000000000 --- a/changelog.d/5856.feature +++ /dev/null @@ -1 +0,0 @@ -Add a tag recording a request's authenticated entity and corresponding servlet in opentracing. diff --git a/changelog.d/5857.bugfix b/changelog.d/5857.bugfix deleted file mode 100644 index 008799ccbb..0000000000 --- a/changelog.d/5857.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix database index so that different backup versions can have the same sessions. diff --git a/changelog.d/5859.feature b/changelog.d/5859.feature deleted file mode 100644 index 52df7fc81b..0000000000 --- a/changelog.d/5859.feature +++ /dev/null @@ -1 +0,0 @@ -Add unstable support for MSC2197 (filtered search requests over federation), in order to allow upcoming room directory query performance improvements. diff --git a/changelog.d/5860.misc b/changelog.d/5860.misc deleted file mode 100644 index f9960b17b4..0000000000 --- a/changelog.d/5860.misc +++ /dev/null @@ -1 +0,0 @@ -Remove log line for debugging issue #5407. diff --git a/changelog.d/5863.bugfix b/changelog.d/5863.bugfix deleted file mode 100644 index bceae5be67..0000000000 --- a/changelog.d/5863.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix Synapse looking for config options `password_reset_failure_template` and `password_reset_success_template`, when they are actually `password_reset_template_failure_html`, `password_reset_template_success_html`. diff --git a/changelog.d/5864.feature b/changelog.d/5864.feature deleted file mode 100644 index 40ac11db64..0000000000 --- a/changelog.d/5864.feature +++ /dev/null @@ -1 +0,0 @@ -Correctly retry all hosts returned from SRV when we fail to connect. diff --git a/changelog.d/5868.feature b/changelog.d/5868.feature deleted file mode 100644 index 69605c1ae1..0000000000 --- a/changelog.d/5868.feature +++ /dev/null @@ -1 +0,0 @@ -Add `m.require_identity_server` key to `/versions`'s `unstable_features` section. \ No newline at end of file diff --git a/changelog.d/5875.misc b/changelog.d/5875.misc deleted file mode 100644 index e188c28d2f..0000000000 --- a/changelog.d/5875.misc +++ /dev/null @@ -1 +0,0 @@ -Deprecate the `trusted_third_party_id_servers` option. \ No newline at end of file diff --git a/changelog.d/5876.feature b/changelog.d/5876.feature deleted file mode 100644 index df88193fbd..0000000000 --- a/changelog.d/5876.feature +++ /dev/null @@ -1 +0,0 @@ -Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`. \ No newline at end of file diff --git a/changelog.d/5877.removal b/changelog.d/5877.removal deleted file mode 100644 index b6d84fb401..0000000000 --- a/changelog.d/5877.removal +++ /dev/null @@ -1 +0,0 @@ -Remove shared secret registration from client/r0/register endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. diff --git a/changelog.d/5878.feature b/changelog.d/5878.feature deleted file mode 100644 index d9d6df880e..0000000000 --- a/changelog.d/5878.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin API endpoint for setting whether or not a user is a server administrator. diff --git a/changelog.d/5884.feature b/changelog.d/5884.feature deleted file mode 100644 index bfd0489392..0000000000 --- a/changelog.d/5884.feature +++ /dev/null @@ -1 +0,0 @@ -Enable cleaning up extremities with dummy events by default to prevent undue build up of forward extremities. diff --git a/changelog.d/5885.bugfix b/changelog.d/5885.bugfix deleted file mode 100644 index 411d925fd4..0000000000 --- a/changelog.d/5885.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix stack overflow when recovering an appservice which had an outage. diff --git a/changelog.d/5886.misc b/changelog.d/5886.misc deleted file mode 100644 index 22adba3d85..0000000000 --- a/changelog.d/5886.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the Appservice scheduler code. diff --git a/changelog.d/5892.misc b/changelog.d/5892.misc deleted file mode 100644 index 939fe8c655..0000000000 --- a/changelog.d/5892.misc +++ /dev/null @@ -1 +0,0 @@ -Compatibility with v2 Identity Service APIs other than /lookup. \ No newline at end of file diff --git a/changelog.d/5893.misc b/changelog.d/5893.misc deleted file mode 100644 index 5ef171cb3e..0000000000 --- a/changelog.d/5893.misc +++ /dev/null @@ -1 +0,0 @@ -Stop populating some unused tables. diff --git a/changelog.d/5894.misc b/changelog.d/5894.misc deleted file mode 100644 index fca4485ff7..0000000000 --- a/changelog.d/5894.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing index on users_in_public_rooms to improve the performance of directory queries. diff --git a/changelog.d/5895.feature b/changelog.d/5895.feature deleted file mode 100644 index c394a3772c..0000000000 --- a/changelog.d/5895.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option to sign remote key query responses with a separate key. diff --git a/changelog.d/5896.misc b/changelog.d/5896.misc deleted file mode 100644 index ed47c747bd..0000000000 --- a/changelog.d/5896.misc +++ /dev/null @@ -1 +0,0 @@ -Improve the logging when we have an error when fetching signing keys. diff --git a/changelog.d/5897.feature b/changelog.d/5897.feature deleted file mode 100644 index 1557e559e8..0000000000 --- a/changelog.d/5897.feature +++ /dev/null @@ -1 +0,0 @@ -Switch to using the v2 Identity Service `/lookup` API where available, with fallback to v1. (Implements [MSC2134](https://github.com/matrix-org/matrix-doc/pull/2134) plus id_access_token authentication for v2 Identity Service APIs from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140)). diff --git a/changelog.d/5900.feature b/changelog.d/5900.feature deleted file mode 100644 index b62d88a76b..0000000000 --- a/changelog.d/5900.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for config templating. diff --git a/changelog.d/5902.feature b/changelog.d/5902.feature deleted file mode 100644 index 0660f65cfa..0000000000 --- a/changelog.d/5902.feature +++ /dev/null @@ -1 +0,0 @@ -Users with the type of "support" or "bot" are no longer required to consent. \ No newline at end of file diff --git a/changelog.d/5904.feature b/changelog.d/5904.feature deleted file mode 100644 index 43b5304f39..0000000000 --- a/changelog.d/5904.feature +++ /dev/null @@ -1 +0,0 @@ -Let synctl accept a directory of config files. diff --git a/changelog.d/5906.feature b/changelog.d/5906.feature deleted file mode 100644 index 7c789510a6..0000000000 --- a/changelog.d/5906.feature +++ /dev/null @@ -1 +0,0 @@ -Increase max display name size to 256. diff --git a/changelog.d/5909.misc b/changelog.d/5909.misc deleted file mode 100644 index 03d0c4367b..0000000000 --- a/changelog.d/5909.misc +++ /dev/null @@ -1 +0,0 @@ -Fix error message which referred to public_base_url instead of public_baseurl. Thanks to @aaronraimist for the fix! diff --git a/changelog.d/5911.misc b/changelog.d/5911.misc deleted file mode 100644 index fe5a8fd59c..0000000000 --- a/changelog.d/5911.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for database engine-specific schema deltas, based on file extension. \ No newline at end of file diff --git a/changelog.d/5914.feature b/changelog.d/5914.feature deleted file mode 100644 index 85c7bf5963..0000000000 --- a/changelog.d/5914.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin API endpoint for getting whether or not a user is a server administrator. diff --git a/changelog.d/5915.bugfix b/changelog.d/5915.bugfix deleted file mode 100644 index bf5b99fedc..0000000000 --- a/changelog.d/5915.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 404 for thumbnail download when `dynamic_thumbnails` is `false` and the thumbnail was dynamically generated. Fix reported by rkfg. diff --git a/changelog.d/5920.bugfix b/changelog.d/5920.bugfix deleted file mode 100644 index e45eb0ffee..0000000000 --- a/changelog.d/5920.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a cache-invalidation bug for worker-based deployments. diff --git a/changelog.d/5922.misc b/changelog.d/5922.misc deleted file mode 100644 index 2cc864897e..0000000000 --- a/changelog.d/5922.misc +++ /dev/null @@ -1 +0,0 @@ -Update Buildkite pipeline to use plugins instead of buildkite-agent commands. diff --git a/changelog.d/5926.misc b/changelog.d/5926.misc deleted file mode 100644 index 4383c302ec..0000000000 --- a/changelog.d/5926.misc +++ /dev/null @@ -1 +0,0 @@ -Add link in sample config to the logging config schema. diff --git a/changelog.d/5931.misc b/changelog.d/5931.misc deleted file mode 100644 index ac8e74f5b9..0000000000 --- a/changelog.d/5931.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary parentheses in return statements. \ No newline at end of file diff --git a/changelog.d/5934.feature b/changelog.d/5934.feature deleted file mode 100644 index eae969a52a..0000000000 --- a/changelog.d/5934.feature +++ /dev/null @@ -1 +0,0 @@ -Redact events in the database that have been redacted for a month. diff --git a/changelog.d/5938.misc b/changelog.d/5938.misc deleted file mode 100644 index b5a3b6ee3b..0000000000 --- a/changelog.d/5938.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused jenkins/prepare_sytest.sh file. diff --git a/changelog.d/5940.feature b/changelog.d/5940.feature deleted file mode 100644 index 5b69b97fe7..0000000000 --- a/changelog.d/5940.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send registration emails from the homeserver rather than delegating to an identity server. \ No newline at end of file diff --git a/changelog.d/5943.misc b/changelog.d/5943.misc deleted file mode 100644 index 6545e1244a..0000000000 --- a/changelog.d/5943.misc +++ /dev/null @@ -1 +0,0 @@ -Move Buildkite pipeline config to the pipelines repo. diff --git a/changelog.d/5953.misc b/changelog.d/5953.misc deleted file mode 100644 index 38e885f42a..0000000000 --- a/changelog.d/5953.misc +++ /dev/null @@ -1 +0,0 @@ -Update INSTALL.md to say that Python 2 is no longer supported. diff --git a/changelog.d/5962.misc b/changelog.d/5962.misc deleted file mode 100644 index d97d376c36..0000000000 --- a/changelog.d/5962.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary return statements in the codebase which were the result of a regex run. \ No newline at end of file diff --git a/changelog.d/5963.misc b/changelog.d/5963.misc deleted file mode 100644 index 0d6c3c3d65..0000000000 --- a/changelog.d/5963.misc +++ /dev/null @@ -1 +0,0 @@ -Remove left-over methods from C/S registration API. \ No newline at end of file diff --git a/changelog.d/5964.feature b/changelog.d/5964.feature deleted file mode 100644 index 273c9df026..0000000000 --- a/changelog.d/5964.feature +++ /dev/null @@ -1 +0,0 @@ -Remove `bind_email` and `bind_msisdn` parameters from /register ala MSC2140. \ No newline at end of file diff --git a/changelog.d/5966.bugfix b/changelog.d/5966.bugfix deleted file mode 100644 index b8ef5a7819..0000000000 --- a/changelog.d/5966.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix admin API for listing media in a room not being available with an external media repo. diff --git a/changelog.d/5967.bugfix b/changelog.d/5967.bugfix deleted file mode 100644 index 8d7bf5c2e9..0000000000 --- a/changelog.d/5967.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix list media admin API always returning an error. diff --git a/changelog.d/5969.feature b/changelog.d/5969.feature deleted file mode 100644 index cf603fa0c6..0000000000 --- a/changelog.d/5969.feature +++ /dev/null @@ -1 +0,0 @@ -Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`. diff --git a/changelog.d/5970.docker b/changelog.d/5970.docker deleted file mode 100644 index c9d04da9cd..0000000000 --- a/changelog.d/5970.docker +++ /dev/null @@ -1 +0,0 @@ -Avoid changing UID/GID if they are already correct. diff --git a/changelog.d/5971.bugfix b/changelog.d/5971.bugfix deleted file mode 100644 index 9ea095103b..0000000000 --- a/changelog.d/5971.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room and user stats tracking. diff --git a/changelog.d/5972.misc b/changelog.d/5972.misc deleted file mode 100644 index 1dc217e899..0000000000 --- a/changelog.d/5972.misc +++ /dev/null @@ -1 +0,0 @@ -Add m.require_identity_server flag to /version's unstable_features. \ No newline at end of file diff --git a/changelog.d/5974.feature b/changelog.d/5974.feature deleted file mode 100644 index 387a444fc4..0000000000 --- a/changelog.d/5974.feature +++ /dev/null @@ -1 +0,0 @@ -Add m.id_access_token to unstable_features in /versions as per MSC2264. \ No newline at end of file diff --git a/changelog.d/5975.misc b/changelog.d/5975.misc deleted file mode 100644 index 5fcd229b89..0000000000 --- a/changelog.d/5975.misc +++ /dev/null @@ -1 +0,0 @@ -Cleanup event auth type initialisation. \ No newline at end of file diff --git a/changelog.d/5979.feature b/changelog.d/5979.feature deleted file mode 100644 index 94888aa2d3..0000000000 --- a/changelog.d/5979.feature +++ /dev/null @@ -1 +0,0 @@ -Use the v2 Identity Service API for 3PID invites. \ No newline at end of file diff --git a/changelog.d/5980.feature b/changelog.d/5980.feature deleted file mode 100644 index e20117cf1c..0000000000 --- a/changelog.d/5980.feature +++ /dev/null @@ -1 +0,0 @@ -Add POST /_matrix/client/unstable/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account. diff --git a/changelog.d/5981.feature b/changelog.d/5981.feature deleted file mode 100644 index e39514273d..0000000000 --- a/changelog.d/5981.feature +++ /dev/null @@ -1 +0,0 @@ -Setting metrics_flags.known_servers to True in the configuration will publish the synapse_federation_known_servers metric over Prometheus. This represents the total number of servers your server knows about (i.e. is in rooms with), including itself. diff --git a/changelog.d/5982.bugfix b/changelog.d/5982.bugfix deleted file mode 100644 index 3ea281a3a0..0000000000 --- a/changelog.d/5982.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include missing opentracing contexts in outbout replication requests. diff --git a/changelog.d/5983.feature b/changelog.d/5983.feature deleted file mode 100644 index aa23ee6dcd..0000000000 --- a/changelog.d/5983.feature +++ /dev/null @@ -1 +0,0 @@ -Add minimum opentracing for client servlets. diff --git a/changelog.d/5984.bugfix b/changelog.d/5984.bugfix deleted file mode 100644 index 3387bf82bb..0000000000 --- a/changelog.d/5984.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix sending of EDUs when opentracing is enabled with an empty whitelist. diff --git a/changelog.d/5985.feature b/changelog.d/5985.feature deleted file mode 100644 index e5e29504af..0000000000 --- a/changelog.d/5985.feature +++ /dev/null @@ -1 +0,0 @@ -Check at setup that opentracing is installed if it's enabled in the config. diff --git a/changelog.d/5986.feature b/changelog.d/5986.feature deleted file mode 100644 index f56aec1b32..0000000000 --- a/changelog.d/5986.feature +++ /dev/null @@ -1 +0,0 @@ -Trace replication send times. diff --git a/changelog.d/5988.bugfix b/changelog.d/5988.bugfix deleted file mode 100644 index 5c3597cb53..0000000000 --- a/changelog.d/5988.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix invalid references to None while opentracing if the log context slips. diff --git a/changelog.d/5989.misc b/changelog.d/5989.misc deleted file mode 100644 index 9f2525fd3e..0000000000 --- a/changelog.d/5989.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up dependency checking at setup. diff --git a/changelog.d/5991.bugfix b/changelog.d/5991.bugfix deleted file mode 100644 index 5c3597cb53..0000000000 --- a/changelog.d/5991.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix invalid references to None while opentracing if the log context slips. diff --git a/changelog.d/5992.feature b/changelog.d/5992.feature deleted file mode 100644 index 31866c2925..0000000000 --- a/changelog.d/5992.feature +++ /dev/null @@ -1 +0,0 @@ -Give appropriate exit codes when synctl fails. diff --git a/changelog.d/5993.feature b/changelog.d/5993.feature deleted file mode 100644 index 3e8bf5068d..0000000000 --- a/changelog.d/5993.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send registration emails from the homeserver rather than delegating to an identity server. diff --git a/changelog.d/5994.feature b/changelog.d/5994.feature deleted file mode 100644 index 5b69b97fe7..0000000000 --- a/changelog.d/5994.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send registration emails from the homeserver rather than delegating to an identity server. \ No newline at end of file diff --git a/changelog.d/5995.bugfix b/changelog.d/5995.bugfix deleted file mode 100644 index e03ab98bc6..0000000000 --- a/changelog.d/5995.bugfix +++ /dev/null @@ -1 +0,0 @@ -Return a M_MISSING_PARAM if `sid` is not provided to `/account/3pid`. \ No newline at end of file diff --git a/changelog.d/5996.bugfix b/changelog.d/5996.bugfix deleted file mode 100644 index 05e31faaa2..0000000000 --- a/changelog.d/5996.bugfix +++ /dev/null @@ -1 +0,0 @@ -federation_certificate_verification_whitelist now will not cause TypeErrors to be raised (a regression in 1.3). Additionally, it now supports internationalised domain names in their non-canonical representation. diff --git a/changelog.d/5998.bugfix b/changelog.d/5998.bugfix deleted file mode 100644 index 9ea095103b..0000000000 --- a/changelog.d/5998.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room and user stats tracking. diff --git a/changelog.d/6000.feature b/changelog.d/6000.feature deleted file mode 100644 index 0a159bd10d..0000000000 --- a/changelog.d/6000.feature +++ /dev/null @@ -1 +0,0 @@ -Apply the federation blacklist to requests to identity servers. \ No newline at end of file diff --git a/changelog.d/6003.misc b/changelog.d/6003.misc deleted file mode 100644 index 4152d05f87..0000000000 --- a/changelog.d/6003.misc +++ /dev/null @@ -1 +0,0 @@ -Add opentracing span over HTTP push processing. diff --git a/changelog.d/6004.bugfix b/changelog.d/6004.bugfix deleted file mode 100644 index 45c179c8fd..0000000000 --- a/changelog.d/6004.bugfix +++ /dev/null @@ -1 +0,0 @@ -Only count real users when checking for auto-creation of auto-join room. diff --git a/changelog.d/6005.feature b/changelog.d/6005.feature deleted file mode 100644 index ed6491d3e4..0000000000 --- a/changelog.d/6005.feature +++ /dev/null @@ -1 +0,0 @@ -The new Prometheus metric `synapse_build_info` exposes the Python version, OS version, and Synapse version of the running server. diff --git a/changelog.d/6009.misc b/changelog.d/6009.misc deleted file mode 100644 index fea479e1dd..0000000000 --- a/changelog.d/6009.misc +++ /dev/null @@ -1 +0,0 @@ -Small refactor of function arguments and docstrings in RoomMemberHandler. \ No newline at end of file diff --git a/changelog.d/6010.misc b/changelog.d/6010.misc deleted file mode 100644 index 0659f12ebd..0000000000 --- a/changelog.d/6010.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused `origin` argument on FederationHandler.add_display_name_to_third_party_invite. \ No newline at end of file diff --git a/changelog.d/6011.feature b/changelog.d/6011.feature deleted file mode 100644 index ad16acb12b..0000000000 --- a/changelog.d/6011.feature +++ /dev/null @@ -1 +0,0 @@ -Use account_threepid_delegate.email and account_threepid_delegate.msisdn for validating threepid sessions. \ No newline at end of file diff --git a/changelog.d/6012.feature b/changelog.d/6012.feature deleted file mode 100644 index 25425510c6..0000000000 --- a/changelog.d/6012.feature +++ /dev/null @@ -1 +0,0 @@ -Add report_stats_endpoint option to configure where stats are reported to, if enabled. Contributed by @Sorunome. diff --git a/changelog.d/6013.misc b/changelog.d/6013.misc deleted file mode 100644 index 939fe8c655..0000000000 --- a/changelog.d/6013.misc +++ /dev/null @@ -1 +0,0 @@ -Compatibility with v2 Identity Service APIs other than /lookup. \ No newline at end of file diff --git a/changelog.d/6015.feature b/changelog.d/6015.feature deleted file mode 100644 index 42aaffced9..0000000000 --- a/changelog.d/6015.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option to increase ratelimits for room admins redacting messages. diff --git a/changelog.d/6016.misc b/changelog.d/6016.misc deleted file mode 100644 index 91cf164714..0000000000 --- a/changelog.d/6016.misc +++ /dev/null @@ -1 +0,0 @@ -Add a 'failure_ts' column to the 'destinations' database table. diff --git a/changelog.d/6017.misc b/changelog.d/6017.misc deleted file mode 100644 index 5ccab9c6ca..0000000000 --- a/changelog.d/6017.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some code in the retry logic. diff --git a/changelog.d/6020.bugfix b/changelog.d/6020.bugfix deleted file mode 100644 index 58a7deba9d..0000000000 --- a/changelog.d/6020.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure support users can be registered even if MAU limit is reached. diff --git a/changelog.d/6023.misc b/changelog.d/6023.misc deleted file mode 100644 index d80410c22c..0000000000 --- a/changelog.d/6023.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the structured logging tests stomping on the global log configuration for subsequent tests. diff --git a/changelog.d/6024.bugfix b/changelog.d/6024.bugfix deleted file mode 100644 index ddad34595b..0000000000 --- a/changelog.d/6024.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where login error was shown incorrectly on SSO fallback login. diff --git a/changelog.d/6025.bugfix b/changelog.d/6025.bugfix deleted file mode 100644 index 50d7f9aab5..0000000000 --- a/changelog.d/6025.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in calculating the federation retry backoff period. \ No newline at end of file diff --git a/changelog.d/6026.feature b/changelog.d/6026.feature deleted file mode 100644 index 2489ff09b5..0000000000 --- a/changelog.d/6026.feature +++ /dev/null @@ -1 +0,0 @@ -Stop sending federation transactions to servers which have been down for a long time. diff --git a/changelog.d/6027.doc b/changelog.d/6027.doc deleted file mode 100644 index f0af68f3b1..0000000000 --- a/changelog.d/6027.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify Synapse 1.4.0 upgrade notes. diff --git a/changelog.d/6028.feature b/changelog.d/6028.feature deleted file mode 100644 index cf603fa0c6..0000000000 --- a/changelog.d/6028.feature +++ /dev/null @@ -1 +0,0 @@ -Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`. diff --git a/changelog.d/6029.bugfix b/changelog.d/6029.bugfix deleted file mode 100644 index 9ea095103b..0000000000 --- a/changelog.d/6029.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room and user stats tracking. diff --git a/changelog.d/6032.misc b/changelog.d/6032.misc deleted file mode 100644 index ec5b5eb881..0000000000 --- a/changelog.d/6032.misc +++ /dev/null @@ -1 +0,0 @@ -Add developer documentation for using SAML2. diff --git a/changelog.d/6037.feature b/changelog.d/6037.feature deleted file mode 100644 index 85553d2da0..0000000000 --- a/changelog.d/6037.feature +++ /dev/null @@ -1 +0,0 @@ -Make the process for mapping SAML2 users to matrix IDs more flexible. diff --git a/changelog.d/6042.feature b/changelog.d/6042.feature deleted file mode 100644 index a737760363..0000000000 --- a/changelog.d/6042.feature +++ /dev/null @@ -1 +0,0 @@ -Allow homeserver to handle or delegate email validation when adding an email to a user's account. diff --git a/changelog.d/6043.feature b/changelog.d/6043.feature deleted file mode 100644 index cd27b0400b..0000000000 --- a/changelog.d/6043.feature +++ /dev/null @@ -1 +0,0 @@ -Implement new Client Server API endpoints `/account/3pid/add` and `/account/3pid/bind` as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290). \ No newline at end of file diff --git a/changelog.d/6044.feature b/changelog.d/6044.feature deleted file mode 100644 index 7dc05d4845..0000000000 --- a/changelog.d/6044.feature +++ /dev/null @@ -1 +0,0 @@ -Add an unstable feature flag for separate add/bind 3pid APIs. \ No newline at end of file diff --git a/changelog.d/6047.misc b/changelog.d/6047.misc deleted file mode 100644 index a4cdb8abb3..0000000000 --- a/changelog.d/6047.misc +++ /dev/null @@ -1,2 +0,0 @@ -Stop populating some unused tables. - diff --git a/changelog.d/6049.doc b/changelog.d/6049.doc deleted file mode 100644 index e0307bf5c1..0000000000 --- a/changelog.d/6049.doc +++ /dev/null @@ -1 +0,0 @@ -Add some notes on rolling back to v1.3.1. diff --git a/changelog.d/6050.doc b/changelog.d/6050.doc deleted file mode 100644 index 3d19c69bc4..0000000000 --- a/changelog.d/6050.doc +++ /dev/null @@ -1 +0,0 @@ -Update the upgrade notes. diff --git a/changelog.d/6053.bugfix b/changelog.d/6053.bugfix deleted file mode 100644 index 6311157bf6..0000000000 --- a/changelog.d/6053.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent exceptions being logged when extremity-cleanup events fail due to lack of user consent to the terms of service. diff --git a/changelog.d/6056.bugfix b/changelog.d/6056.bugfix deleted file mode 100644 index 4d9573a58d..0000000000 --- a/changelog.d/6056.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove POST method from password reset submit_token endpoint until we implement submit_url functionality. \ No newline at end of file diff --git a/changelog.d/6058.docker b/changelog.d/6058.docker deleted file mode 100644 index 30be6933c9..0000000000 --- a/changelog.d/6058.docker +++ /dev/null @@ -1 +0,0 @@ -Provide SYNAPSE_WORKER envvar to specify python module. diff --git a/changelog.d/6059.bugfix b/changelog.d/6059.bugfix deleted file mode 100644 index 49d5bd3fa0..0000000000 --- a/changelog.d/6059.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix logcontext spam on non-Linux platforms. diff --git a/changelog.d/6062.bugfix b/changelog.d/6062.bugfix deleted file mode 100644 index e20117cf1c..0000000000 --- a/changelog.d/6062.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add POST /_matrix/client/unstable/account/3pid/unbind endpoint from MSC2140 for unbinding a 3PID from an identity server without removing it from the homeserver user account. diff --git a/changelog.d/6063.bugfix b/changelog.d/6063.bugfix deleted file mode 100644 index 7485e32a2c..0000000000 --- a/changelog.d/6063.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure query parameters in email validation links are URL-encoded. \ No newline at end of file diff --git a/changelog.d/6064.misc b/changelog.d/6064.misc deleted file mode 100644 index 28dc89111b..0000000000 --- a/changelog.d/6064.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up the sample config for SAML authentication. diff --git a/changelog.d/6067.feature b/changelog.d/6067.feature deleted file mode 100644 index 72685961c9..0000000000 --- a/changelog.d/6067.feature +++ /dev/null @@ -1 +0,0 @@ -Remove `bind` parameter from Client Server POST `/account` endpoint as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290/). \ No newline at end of file diff --git a/changelog.d/6069.bugfix b/changelog.d/6069.bugfix deleted file mode 100644 index a437ac41a9..0000000000 --- a/changelog.d/6069.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which caused SAML attribute maps to be overridden by defaults. diff --git a/changelog.d/6072.misc b/changelog.d/6072.misc deleted file mode 100644 index 91cf164714..0000000000 --- a/changelog.d/6072.misc +++ /dev/null @@ -1 +0,0 @@ -Add a 'failure_ts' column to the 'destinations' database table. diff --git a/changelog.d/6073.feature b/changelog.d/6073.feature deleted file mode 100644 index 15d9933891..0000000000 --- a/changelog.d/6073.feature +++ /dev/null @@ -1 +0,0 @@ -Return a clearer error message when a timeout occurs when attempting to contact an identity server. \ No newline at end of file diff --git a/changelog.d/6074.feature b/changelog.d/6074.feature deleted file mode 100644 index b7aa9c99d8..0000000000 --- a/changelog.d/6074.feature +++ /dev/null @@ -1 +0,0 @@ -Prevent password reset's submit_token endpoint from accepting trailing slashes. \ No newline at end of file diff --git a/changelog.d/6075.misc b/changelog.d/6075.misc deleted file mode 100644 index 914e56bcfe..0000000000 --- a/changelog.d/6075.misc +++ /dev/null @@ -1 +0,0 @@ -Change mailer logging to reflect Synapse doesn't just do chat notifications by email now. \ No newline at end of file diff --git a/changelog.d/6078.feature b/changelog.d/6078.feature deleted file mode 100644 index fae1e52322..0000000000 --- a/changelog.d/6078.feature +++ /dev/null @@ -1 +0,0 @@ -Add `POST /add_threepid/msisdn/submit_token` endpoint for proxying submitToken on an account_threepid_handler. \ No newline at end of file diff --git a/changelog.d/6079.feature b/changelog.d/6079.feature deleted file mode 100644 index bcbb49ac58..0000000000 --- a/changelog.d/6079.feature +++ /dev/null @@ -1 +0,0 @@ -Add `submit_url` response parameter to `*/msisdn/requestToken` endpoints. diff --git a/changelog.d/6082.feature b/changelog.d/6082.feature deleted file mode 100644 index c30662b608..0000000000 --- a/changelog.d/6082.feature +++ /dev/null @@ -1 +0,0 @@ -Return 403 on `/register/available` if registration has been disabled. \ No newline at end of file diff --git a/changelog.d/6089.misc b/changelog.d/6089.misc deleted file mode 100644 index fa3c197c54..0000000000 --- a/changelog.d/6089.misc +++ /dev/null @@ -1 +0,0 @@ -Move last seen info into devices table. diff --git a/changelog.d/6090.feature b/changelog.d/6090.feature deleted file mode 100644 index a6da448a1a..0000000000 --- a/changelog.d/6090.feature +++ /dev/null @@ -1 +0,0 @@ -Explicitly log when a homeserver does not have the 'trusted_key_servers' config field configured. diff --git a/changelog.d/6092.bugfix b/changelog.d/6092.bugfix deleted file mode 100644 index 01a7498ec6..0000000000 --- a/changelog.d/6092.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the logged number of updated items for the users_set_deactivated_flag background update. diff --git a/changelog.d/6097.bugfix b/changelog.d/6097.bugfix deleted file mode 100644 index 750a8ecf0a..0000000000 --- a/changelog.d/6097.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add sid to next_link for email validation. diff --git a/changelog.d/6098.feature b/changelog.d/6098.feature deleted file mode 100644 index f3c693c06b..0000000000 --- a/changelog.d/6098.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for pruning old rows in `user_ips` table. diff --git a/changelog.d/6099.misc b/changelog.d/6099.misc deleted file mode 100644 index 8415c6759b..0000000000 --- a/changelog.d/6099.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused parameter to get_user_id_by_threepid. diff --git a/changelog.d/6104.bugfix b/changelog.d/6104.bugfix deleted file mode 100644 index 41114a66ef..0000000000 --- a/changelog.d/6104.bugfix +++ /dev/null @@ -1 +0,0 @@ -Threepid validity checks on msisdns should not be dependent on 'threepid_behaviour_email'. diff --git a/changelog.d/6105.misc b/changelog.d/6105.misc deleted file mode 100644 index 2e838a35c6..0000000000 --- a/changelog.d/6105.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the user-interactive auth handling. diff --git a/changelog.d/6106.misc b/changelog.d/6106.misc deleted file mode 100644 index d732091779..0000000000 --- a/changelog.d/6106.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor code for calculating registration flows. diff --git a/changelog.d/6107.bugfix b/changelog.d/6107.bugfix deleted file mode 100644 index d4b9516ac7..0000000000 --- a/changelog.d/6107.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure that servers which are not configured to support email address verification do not offer it in the registration flows. \ No newline at end of file -- cgit 1.4.1 From 54569c787b4abbc5674d9c23c012b56d8cc156ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 26 Sep 2019 15:38:25 +0100 Subject: Kill off half-implemented password-reset via sms (#6101) Doing a password reset via SMS has never worked, and in any case is a silly idea because msisdn recycling is a thing. See also matrix-org/matrix-doc#2303. --- changelog.d/6101.misc | 1 + synapse/rest/client/v2_alpha/account.py | 65 +-------------------------------- 2 files changed, 2 insertions(+), 64 deletions(-) create mode 100644 changelog.d/6101.misc (limited to 'changelog.d') diff --git a/changelog.d/6101.misc b/changelog.d/6101.misc new file mode 100644 index 0000000000..9743abb9e9 --- /dev/null +++ b/changelog.d/6101.misc @@ -0,0 +1 @@ +Kill off half-implemented password-reset via sms. diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f99676fd30..80cf7126a0 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -129,66 +129,6 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): return 200, ret -class MsisdnPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/account/password/msisdn/requestToken$") - - def __init__(self, hs): - super(MsisdnPasswordRequestTokenRestServlet, self).__init__() - self.hs = hs - self.datastore = self.hs.get_datastore() - self.identity_handler = hs.get_handlers().identity_handler - - @defer.inlineCallbacks - def on_POST(self, request): - body = parse_json_object_from_request(request) - - assert_params_in_dict( - body, ["client_secret", "country", "phone_number", "send_attempt"] - ) - client_secret = body["client_secret"] - country = body["country"] - phone_number = body["phone_number"] - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param - - msisdn = phone_number_to_msisdn(country, phone_number) - - if not check_3pid_allowed(self.hs, "msisdn", msisdn): - raise SynapseError( - 403, - "Account phone numbers are not authorized on this server", - Codes.THREEPID_DENIED, - ) - - existing_user_id = yield self.datastore.get_user_id_by_threepid( - "msisdn", msisdn - ) - - if existing_user_id is None: - raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) - - if not self.hs.config.account_threepid_delegate_msisdn: - logger.warn( - "No upstream msisdn account_threepid_delegate configured on the server to " - "handle this request" - ) - raise SynapseError( - 400, - "Password reset by phone number is not supported on this homeserver", - ) - - ret = yield self.identity_handler.requestMsisdnToken( - self.hs.config.account_threepid_delegate_msisdn, - country, - phone_number, - client_secret, - send_attempt, - next_link, - ) - - return 200, ret - - class PasswordResetSubmitTokenServlet(RestServlet): """Handles 3PID validation token submission""" @@ -301,9 +241,7 @@ class PasswordRestServlet(RestServlet): else: requester = None result, params, _ = yield self.auth_handler.check_auth( - [[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]], - body, - self.hs.get_ip_from_request(request), + [[LoginType.EMAIL_IDENTITY]], body, self.hs.get_ip_from_request(request) ) if LoginType.EMAIL_IDENTITY in result: @@ -843,7 +781,6 @@ class WhoamiRestServlet(RestServlet): def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) - MsisdnPasswordRequestTokenRestServlet(hs).register(http_server) PasswordResetSubmitTokenServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) -- cgit 1.4.1 From 9d99eade7c8152956d6f0198d4ede1baeadedfe7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 26 Sep 2019 16:46:21 +0100 Subject: Newsfile --- changelog.d/6117.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6117.misc (limited to 'changelog.d') diff --git a/changelog.d/6117.misc b/changelog.d/6117.misc new file mode 100644 index 0000000000..f8bdb58f41 --- /dev/null +++ b/changelog.d/6117.misc @@ -0,0 +1 @@ +Fix up sample config entry for `redaction_retention_period` option. -- cgit 1.4.1 From 8c27bc8b60d4b78c059ea727a78e78dc8cd3df7a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 27 Sep 2019 10:36:20 +0100 Subject: Move lookup-related functions from RoomMemberHandler to IdentityHandler (#5978) Just to have all the methods that make calls to identity services in one place. --- changelog.d/5978.misc | 1 + synapse/handlers/identity.py | 353 ++++++++++++++++++++++++++++++++++++++ synapse/handlers/room_member.py | 370 +--------------------------------------- 3 files changed, 360 insertions(+), 364 deletions(-) create mode 100644 changelog.d/5978.misc (limited to 'changelog.d') diff --git a/changelog.d/5978.misc b/changelog.d/5978.misc new file mode 100644 index 0000000000..6d2b69b11b --- /dev/null +++ b/changelog.d/5978.misc @@ -0,0 +1 @@ +Move lookup-related functions from RoomMemberHandler to IdentityHandler. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 6d42a1aed8..ba99ddf76d 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -21,11 +21,15 @@ import logging import urllib from canonicaljson import json +from signedjson.key import decode_verify_key_bytes +from signedjson.sign import verify_signed_json +from unpaddedbase64 import decode_base64 from twisted.internet import defer from twisted.internet.error import TimeoutError from synapse.api.errors import ( + AuthError, CodeMessageException, Codes, HttpResponseException, @@ -33,12 +37,15 @@ from synapse.api.errors import ( ) from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.client import SimpleHttpClient +from synapse.util.hash import sha256_and_url_safe_base64 from synapse.util.stringutils import random_string from ._base import BaseHandler logger = logging.getLogger(__name__) +id_server_scheme = "https://" + class IdentityHandler(BaseHandler): def __init__(self, hs): @@ -557,6 +564,352 @@ class IdentityHandler(BaseHandler): logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) raise SynapseError(400, "Error contacting the identity server") + @defer.inlineCallbacks + def lookup_3pid(self, id_server, medium, address, id_access_token=None): + """Looks up a 3pid in the passed identity server. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + id_access_token (str|None): The access token to authenticate to the identity + server with + + Returns: + str|None: the matrix ID of the 3pid, or None if it is not recognized. + """ + if id_access_token is not None: + try: + results = yield self._lookup_3pid_v2( + id_server, id_access_token, medium, address + ) + return results + + except Exception as e: + # Catch HttpResponseExcept for a non-200 response code + # Check if this identity server does not know about v2 lookups + if isinstance(e, HttpResponseException) and e.code == 404: + # This is an old identity server that does not yet support v2 lookups + logger.warning( + "Attempted v2 lookup on v1 identity server %s. Falling " + "back to v1", + id_server, + ) + else: + logger.warning("Error when looking up hashing details: %s", e) + return None + + return (yield self._lookup_3pid_v1(id_server, medium, address)) + + @defer.inlineCallbacks + def _lookup_3pid_v1(self, id_server, medium, address): + """Looks up a 3pid in the passed identity server using v1 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + + Returns: + str: the matrix ID of the 3pid, or None if it is not recognized. + """ + try: + data = yield self.blacklisting_http_client.get_json( + "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), + {"medium": medium, "address": address}, + ) + + if "mxid" in data: + if "signatures" not in data: + raise AuthError(401, "No signatures on 3pid binding") + yield self._verify_any_signature(data, id_server) + return data["mxid"] + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + except IOError as e: + logger.warning("Error from v1 identity server lookup: %s" % (e,)) + + return None + + @defer.inlineCallbacks + def _lookup_3pid_v2(self, id_server, id_access_token, medium, address): + """Looks up a 3pid in the passed identity server using v2 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + id_access_token (str): The access token to authenticate to the identity server with + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + + Returns: + Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. + """ + # Check what hashing details are supported by this identity server + try: + hash_details = yield self.blacklisting_http_client.get_json( + "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), + {"access_token": id_access_token}, + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + + if not isinstance(hash_details, dict): + logger.warning( + "Got non-dict object when checking hash details of %s%s: %s", + id_server_scheme, + id_server, + hash_details, + ) + raise SynapseError( + 400, + "Non-dict object from %s%s during v2 hash_details request: %s" + % (id_server_scheme, id_server, hash_details), + ) + + # Extract information from hash_details + supported_lookup_algorithms = hash_details.get("algorithms") + lookup_pepper = hash_details.get("lookup_pepper") + if ( + not supported_lookup_algorithms + or not isinstance(supported_lookup_algorithms, list) + or not lookup_pepper + or not isinstance(lookup_pepper, str) + ): + raise SynapseError( + 400, + "Invalid hash details received from identity server %s%s: %s" + % (id_server_scheme, id_server, hash_details), + ) + + # Check if any of the supported lookup algorithms are present + if LookupAlgorithm.SHA256 in supported_lookup_algorithms: + # Perform a hashed lookup + lookup_algorithm = LookupAlgorithm.SHA256 + + # Hash address, medium and the pepper with sha256 + to_hash = "%s %s %s" % (address, medium, lookup_pepper) + lookup_value = sha256_and_url_safe_base64(to_hash) + + elif LookupAlgorithm.NONE in supported_lookup_algorithms: + # Perform a non-hashed lookup + lookup_algorithm = LookupAlgorithm.NONE + + # Combine together plaintext address and medium + lookup_value = "%s %s" % (address, medium) + + else: + logger.warning( + "None of the provided lookup algorithms of %s are supported: %s", + id_server, + supported_lookup_algorithms, + ) + raise SynapseError( + 400, + "Provided identity server does not support any v2 lookup " + "algorithms that this homeserver supports.", + ) + + # Authenticate with identity server given the access token from the client + headers = {"Authorization": create_id_access_token_header(id_access_token)} + + try: + lookup_results = yield self.blacklisting_http_client.post_json_get_json( + "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), + { + "addresses": [lookup_value], + "algorithm": lookup_algorithm, + "pepper": lookup_pepper, + }, + headers=headers, + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + except Exception as e: + logger.warning("Error when performing a v2 3pid lookup: %s", e) + raise SynapseError( + 500, "Unknown error occurred during identity server lookup" + ) + + # Check for a mapping from what we looked up to an MXID + if "mappings" not in lookup_results or not isinstance( + lookup_results["mappings"], dict + ): + logger.warning("No results from 3pid lookup") + return None + + # Return the MXID if it's available, or None otherwise + mxid = lookup_results["mappings"].get(lookup_value) + return mxid + + @defer.inlineCallbacks + def _verify_any_signature(self, data, server_hostname): + if server_hostname not in data["signatures"]: + raise AuthError(401, "No signature from server %s" % (server_hostname,)) + for key_name, signature in data["signatures"][server_hostname].items(): + try: + key_data = yield self.blacklisting_http_client.get_json( + "%s%s/_matrix/identity/api/v1/pubkey/%s" + % (id_server_scheme, server_hostname, key_name) + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + if "public_key" not in key_data: + raise AuthError( + 401, "No public key named %s from %s" % (key_name, server_hostname) + ) + verify_signed_json( + data, + server_hostname, + decode_verify_key_bytes( + key_name, decode_base64(key_data["public_key"]) + ), + ) + return + + @defer.inlineCallbacks + def ask_id_server_for_third_party_invite( + self, + requester, + id_server, + medium, + address, + room_id, + inviter_user_id, + room_alias, + room_avatar_url, + room_join_rules, + room_name, + inviter_display_name, + inviter_avatar_url, + id_access_token=None, + ): + """ + Asks an identity server for a third party invite. + + Args: + requester (Requester) + id_server (str): hostname + optional port for the identity server. + medium (str): The literal string "email". + address (str): The third party address being invited. + room_id (str): The ID of the room to which the user is invited. + inviter_user_id (str): The user ID of the inviter. + room_alias (str): An alias for the room, for cosmetic notifications. + room_avatar_url (str): The URL of the room's avatar, for cosmetic + notifications. + room_join_rules (str): The join rules of the email (e.g. "public"). + room_name (str): The m.room.name of the room. + inviter_display_name (str): The current display name of the + inviter. + inviter_avatar_url (str): The URL of the inviter's avatar. + id_access_token (str|None): The access token to authenticate to the identity + server with + + Returns: + A deferred tuple containing: + token (str): The token which must be signed to prove authenticity. + public_keys ([{"public_key": str, "key_validity_url": str}]): + public_key is a base64-encoded ed25519 public key. + fallback_public_key: One element from public_keys. + display_name (str): A user-friendly name to represent the invited + user. + """ + invite_config = { + "medium": medium, + "address": address, + "room_id": room_id, + "room_alias": room_alias, + "room_avatar_url": room_avatar_url, + "room_join_rules": room_join_rules, + "room_name": room_name, + "sender": inviter_user_id, + "sender_display_name": inviter_display_name, + "sender_avatar_url": inviter_avatar_url, + } + + # Add the identity service access token to the JSON body and use the v2 + # Identity Service endpoints if id_access_token is present + data = None + base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) + + if id_access_token: + key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( + id_server_scheme, + id_server, + ) + + # Attempt a v2 lookup + url = base_url + "/v2/store-invite" + try: + data = yield self.blacklisting_http_client.post_json_get_json( + url, + invite_config, + {"Authorization": create_id_access_token_header(id_access_token)}, + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + except HttpResponseException as e: + if e.code != 404: + logger.info("Failed to POST %s with JSON: %s", url, e) + raise e + + if data is None: + key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( + id_server_scheme, + id_server, + ) + url = base_url + "/api/v1/store-invite" + + try: + data = yield self.blacklisting_http_client.post_json_get_json( + url, invite_config + ) + except TimeoutError: + raise SynapseError(500, "Timed out contacting identity server") + except HttpResponseException as e: + logger.warning( + "Error trying to call /store-invite on %s%s: %s", + id_server_scheme, + id_server, + e, + ) + + if data is None: + # Some identity servers may only support application/x-www-form-urlencoded + # types. This is especially true with old instances of Sydent, see + # https://github.com/matrix-org/sydent/pull/170 + try: + data = yield self.blacklisting_http_client.post_urlencoded_get_json( + url, invite_config + ) + except HttpResponseException as e: + logger.warning( + "Error calling /store-invite on %s%s with fallback " + "encoding: %s", + id_server_scheme, + id_server, + e, + ) + raise e + + # TODO: Check for success + token = data["token"] + public_keys = data.get("public_keys", []) + if "public_key" in data: + fallback_public_key = { + "public_key": data["public_key"], + "key_validity_url": key_validity_url, + } + else: + fallback_public_key = public_keys[0] + + if not public_keys: + public_keys.append(fallback_public_key) + display_name = data["display_name"] + return token, public_keys, fallback_public_key, display_name + def create_id_access_token_header(id_access_token): """Create an Authorization header for passing to SimpleHttpClient as the header value diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 94cd0cf3ef..8abdb1b6e6 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -20,29 +20,19 @@ import logging from six.moves import http_client -from signedjson.key import decode_verify_key_bytes -from signedjson.sign import verify_signed_json -from unpaddedbase64 import decode_base64 - from twisted.internet import defer -from twisted.internet.error import TimeoutError from synapse import types from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError -from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header -from synapse.http.client import SimpleHttpClient +from synapse.api.errors import AuthError, Codes, SynapseError from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room -from synapse.util.hash import sha256_and_url_safe_base64 from ._base import BaseHandler logger = logging.getLogger(__name__) -id_server_scheme = "https://" - class RoomMemberHandler(object): # TODO(paul): This handler currently contains a messy conflation of @@ -63,14 +53,10 @@ class RoomMemberHandler(object): self.auth = hs.get_auth() self.state_handler = hs.get_state_handler() self.config = hs.config - # We create a blacklisting instance of SimpleHttpClient for contacting identity - # servers specified by clients - self.simple_http_client = SimpleHttpClient( - hs, ip_blacklist=hs.config.federation_ip_range_blacklist - ) self.federation_handler = hs.get_handlers().federation_handler self.directory_handler = hs.get_handlers().directory_handler + self.identity_handler = hs.get_handlers().identity_handler self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() @@ -682,7 +668,9 @@ class RoomMemberHandler(object): 403, "Looking up third-party identifiers is denied from this server" ) - invitee = yield self._lookup_3pid(id_server, medium, address, id_access_token) + invitee = yield self.identity_handler.lookup_3pid( + id_server, medium, address, id_access_token + ) if invitee: yield self.update_membership( @@ -700,211 +688,6 @@ class RoomMemberHandler(object): id_access_token=id_access_token, ) - @defer.inlineCallbacks - def _lookup_3pid(self, id_server, medium, address, id_access_token=None): - """Looks up a 3pid in the passed identity server. - - Args: - id_server (str): The server name (including port, if required) - of the identity server to use. - medium (str): The type of the third party identifier (e.g. "email"). - address (str): The third party identifier (e.g. "foo@example.com"). - id_access_token (str|None): The access token to authenticate to the identity - server with - - Returns: - str|None: the matrix ID of the 3pid, or None if it is not recognized. - """ - if id_access_token is not None: - try: - results = yield self._lookup_3pid_v2( - id_server, id_access_token, medium, address - ) - return results - - except Exception as e: - # Catch HttpResponseExcept for a non-200 response code - # Check if this identity server does not know about v2 lookups - if isinstance(e, HttpResponseException) and e.code == 404: - # This is an old identity server that does not yet support v2 lookups - logger.warning( - "Attempted v2 lookup on v1 identity server %s. Falling " - "back to v1", - id_server, - ) - else: - logger.warning("Error when looking up hashing details: %s", e) - return None - - return (yield self._lookup_3pid_v1(id_server, medium, address)) - - @defer.inlineCallbacks - def _lookup_3pid_v1(self, id_server, medium, address): - """Looks up a 3pid in the passed identity server using v1 lookup. - - Args: - id_server (str): The server name (including port, if required) - of the identity server to use. - medium (str): The type of the third party identifier (e.g. "email"). - address (str): The third party identifier (e.g. "foo@example.com"). - - Returns: - str: the matrix ID of the 3pid, or None if it is not recognized. - """ - try: - data = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), - {"medium": medium, "address": address}, - ) - - if "mxid" in data: - if "signatures" not in data: - raise AuthError(401, "No signatures on 3pid binding") - yield self._verify_any_signature(data, id_server) - return data["mxid"] - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - except IOError as e: - logger.warning("Error from v1 identity server lookup: %s" % (e,)) - - return None - - @defer.inlineCallbacks - def _lookup_3pid_v2(self, id_server, id_access_token, medium, address): - """Looks up a 3pid in the passed identity server using v2 lookup. - - Args: - id_server (str): The server name (including port, if required) - of the identity server to use. - id_access_token (str): The access token to authenticate to the identity server with - medium (str): The type of the third party identifier (e.g. "email"). - address (str): The third party identifier (e.g. "foo@example.com"). - - Returns: - Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. - """ - # Check what hashing details are supported by this identity server - try: - hash_details = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), - {"access_token": id_access_token}, - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - - if not isinstance(hash_details, dict): - logger.warning( - "Got non-dict object when checking hash details of %s%s: %s", - id_server_scheme, - id_server, - hash_details, - ) - raise SynapseError( - 400, - "Non-dict object from %s%s during v2 hash_details request: %s" - % (id_server_scheme, id_server, hash_details), - ) - - # Extract information from hash_details - supported_lookup_algorithms = hash_details.get("algorithms") - lookup_pepper = hash_details.get("lookup_pepper") - if ( - not supported_lookup_algorithms - or not isinstance(supported_lookup_algorithms, list) - or not lookup_pepper - or not isinstance(lookup_pepper, str) - ): - raise SynapseError( - 400, - "Invalid hash details received from identity server %s%s: %s" - % (id_server_scheme, id_server, hash_details), - ) - - # Check if any of the supported lookup algorithms are present - if LookupAlgorithm.SHA256 in supported_lookup_algorithms: - # Perform a hashed lookup - lookup_algorithm = LookupAlgorithm.SHA256 - - # Hash address, medium and the pepper with sha256 - to_hash = "%s %s %s" % (address, medium, lookup_pepper) - lookup_value = sha256_and_url_safe_base64(to_hash) - - elif LookupAlgorithm.NONE in supported_lookup_algorithms: - # Perform a non-hashed lookup - lookup_algorithm = LookupAlgorithm.NONE - - # Combine together plaintext address and medium - lookup_value = "%s %s" % (address, medium) - - else: - logger.warning( - "None of the provided lookup algorithms of %s are supported: %s", - id_server, - supported_lookup_algorithms, - ) - raise SynapseError( - 400, - "Provided identity server does not support any v2 lookup " - "algorithms that this homeserver supports.", - ) - - # Authenticate with identity server given the access token from the client - headers = {"Authorization": create_id_access_token_header(id_access_token)} - - try: - lookup_results = yield self.simple_http_client.post_json_get_json( - "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), - { - "addresses": [lookup_value], - "algorithm": lookup_algorithm, - "pepper": lookup_pepper, - }, - headers=headers, - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - except Exception as e: - logger.warning("Error when performing a v2 3pid lookup: %s", e) - raise SynapseError( - 500, "Unknown error occurred during identity server lookup" - ) - - # Check for a mapping from what we looked up to an MXID - if "mappings" not in lookup_results or not isinstance( - lookup_results["mappings"], dict - ): - logger.warning("No results from 3pid lookup") - return None - - # Return the MXID if it's available, or None otherwise - mxid = lookup_results["mappings"].get(lookup_value) - return mxid - - @defer.inlineCallbacks - def _verify_any_signature(self, data, server_hostname): - if server_hostname not in data["signatures"]: - raise AuthError(401, "No signature from server %s" % (server_hostname,)) - for key_name, signature in data["signatures"][server_hostname].items(): - try: - key_data = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/api/v1/pubkey/%s" - % (id_server_scheme, server_hostname, key_name) - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - if "public_key" not in key_data: - raise AuthError( - 401, "No public key named %s from %s" % (key_name, server_hostname) - ) - verify_signed_json( - data, - server_hostname, - decode_verify_key_bytes( - key_name, decode_base64(key_data["public_key"]) - ), - ) - return - @defer.inlineCallbacks def _make_and_store_3pid_invite( self, @@ -951,7 +734,7 @@ class RoomMemberHandler(object): room_avatar_url = room_avatar_event.content.get("url", "") token, public_keys, fallback_public_key, display_name = ( - yield self._ask_id_server_for_third_party_invite( + yield self.identity_handler.ask_id_server_for_third_party_invite( requester=requester, id_server=id_server, medium=medium, @@ -987,147 +770,6 @@ class RoomMemberHandler(object): txn_id=txn_id, ) - @defer.inlineCallbacks - def _ask_id_server_for_third_party_invite( - self, - requester, - id_server, - medium, - address, - room_id, - inviter_user_id, - room_alias, - room_avatar_url, - room_join_rules, - room_name, - inviter_display_name, - inviter_avatar_url, - id_access_token=None, - ): - """ - Asks an identity server for a third party invite. - - Args: - requester (Requester) - id_server (str): hostname + optional port for the identity server. - medium (str): The literal string "email". - address (str): The third party address being invited. - room_id (str): The ID of the room to which the user is invited. - inviter_user_id (str): The user ID of the inviter. - room_alias (str): An alias for the room, for cosmetic notifications. - room_avatar_url (str): The URL of the room's avatar, for cosmetic - notifications. - room_join_rules (str): The join rules of the email (e.g. "public"). - room_name (str): The m.room.name of the room. - inviter_display_name (str): The current display name of the - inviter. - inviter_avatar_url (str): The URL of the inviter's avatar. - id_access_token (str|None): The access token to authenticate to the identity - server with - - Returns: - A deferred tuple containing: - token (str): The token which must be signed to prove authenticity. - public_keys ([{"public_key": str, "key_validity_url": str}]): - public_key is a base64-encoded ed25519 public key. - fallback_public_key: One element from public_keys. - display_name (str): A user-friendly name to represent the invited - user. - """ - invite_config = { - "medium": medium, - "address": address, - "room_id": room_id, - "room_alias": room_alias, - "room_avatar_url": room_avatar_url, - "room_join_rules": room_join_rules, - "room_name": room_name, - "sender": inviter_user_id, - "sender_display_name": inviter_display_name, - "sender_avatar_url": inviter_avatar_url, - } - - # Add the identity service access token to the JSON body and use the v2 - # Identity Service endpoints if id_access_token is present - data = None - base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) - - if id_access_token: - key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( - id_server_scheme, - id_server, - ) - - # Attempt a v2 lookup - url = base_url + "/v2/store-invite" - try: - data = yield self.simple_http_client.post_json_get_json( - url, - invite_config, - {"Authorization": create_id_access_token_header(id_access_token)}, - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - if e.code != 404: - logger.info("Failed to POST %s with JSON: %s", url, e) - raise e - - if data is None: - key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( - id_server_scheme, - id_server, - ) - url = base_url + "/api/v1/store-invite" - - try: - data = yield self.simple_http_client.post_json_get_json( - url, invite_config - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - logger.warning( - "Error trying to call /store-invite on %s%s: %s", - id_server_scheme, - id_server, - e, - ) - - if data is None: - # Some identity servers may only support application/x-www-form-urlencoded - # types. This is especially true with old instances of Sydent, see - # https://github.com/matrix-org/sydent/pull/170 - try: - data = yield self.simple_http_client.post_urlencoded_get_json( - url, invite_config - ) - except HttpResponseException as e: - logger.warning( - "Error calling /store-invite on %s%s with fallback " - "encoding: %s", - id_server_scheme, - id_server, - e, - ) - raise e - - # TODO: Check for success - token = data["token"] - public_keys = data.get("public_keys", []) - if "public_key" in data: - fallback_public_key = { - "public_key": data["public_key"], - "key_validity_url": key_validity_url, - } - else: - fallback_public_key = public_keys[0] - - if not public_keys: - public_keys.append(fallback_public_key) - display_name = data["display_name"] - return token, public_keys, fallback_public_key, display_name - @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): # Have we just created the room, and is this about to be the very -- cgit 1.4.1 From 0804a27c8c7c2cc9f0adbb0329bffcd8ce10e1bd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 27 Sep 2019 15:14:34 +0100 Subject: Changelog --- changelog.d/6125.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6125.feature (limited to 'changelog.d') diff --git a/changelog.d/6125.feature b/changelog.d/6125.feature new file mode 100644 index 0000000000..432e255ad4 --- /dev/null +++ b/changelog.d/6125.feature @@ -0,0 +1 @@ +Reject all pending invite for a user during deactivation. -- cgit 1.4.1 From 25a0a36ad9b63aa2becabc5c311025cb612d466f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 27 Sep 2019 16:10:24 +0100 Subject: Update changelog.d/6125.feature Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/6125.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/6125.feature b/changelog.d/6125.feature index 432e255ad4..cbe5f8d3c8 100644 --- a/changelog.d/6125.feature +++ b/changelog.d/6125.feature @@ -1 +1 @@ -Reject all pending invite for a user during deactivation. +Reject all pending invites for a user during deactivation. -- cgit 1.4.1 From f3451118a6dca1499daadf224c3eab801dad0c0c Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 27 Sep 2019 17:59:18 +0100 Subject: Edit SimpleHttpClient to reference that header keys can be passed as str or bytes (#6077) --- changelog.d/6077.misc | 1 + synapse/http/client.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6077.misc (limited to 'changelog.d') diff --git a/changelog.d/6077.misc b/changelog.d/6077.misc new file mode 100644 index 0000000000..31ac5b97a4 --- /dev/null +++ b/changelog.d/6077.misc @@ -0,0 +1 @@ +Edit header dicts docstrings in SimpleHttpClient to note that `str` or `bytes` can be passed as header keys. diff --git a/synapse/http/client.py b/synapse/http/client.py index 51765ae3c0..cdf828a4ff 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -327,7 +327,7 @@ class SimpleHttpClient(object): Args: uri (str): args (dict[str, str|List[str]]): query params - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: @@ -371,7 +371,7 @@ class SimpleHttpClient(object): Args: uri (str): post_json (object): - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: @@ -414,7 +414,7 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the @@ -438,7 +438,7 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the @@ -482,7 +482,7 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the @@ -516,7 +516,7 @@ class SimpleHttpClient(object): Args: url (str): The URL to GET output_stream (file): File to write the response body to. - headers (dict[str, List[str]]|None): If not None, a map from + headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: A (int,dict,string,int) tuple of the file length, dict of the response -- cgit 1.4.1 From 16cb9a71b8b46604d49944f0b9c316687becca93 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 30 Sep 2019 09:38:41 +0100 Subject: Drop unused tables (#6115) These tables are unused since #5893 (as amended by #6047), so we can now drop them. Fixes #6048. --- changelog.d/6115.misc | 1 + .../schema/delta/56/drop_unused_event_tables.sql | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 changelog.d/6115.misc create mode 100644 synapse/storage/schema/delta/56/drop_unused_event_tables.sql (limited to 'changelog.d') diff --git a/changelog.d/6115.misc b/changelog.d/6115.misc new file mode 100644 index 0000000000..b19e395a99 --- /dev/null +++ b/changelog.d/6115.misc @@ -0,0 +1 @@ +Drop some unused database tables. diff --git a/synapse/storage/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql new file mode 100644 index 0000000000..9f09922c67 --- /dev/null +++ b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql @@ -0,0 +1,20 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- these tables are never used. +DROP TABLE IF EXISTS room_names; +DROP TABLE IF EXISTS topics; +DROP TABLE IF EXISTS history_visibility; +DROP TABLE IF EXISTS guest_access; -- cgit 1.4.1 From 129343cd8a90f27142fbb0c7b723ae2cb16207b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2019 12:00:34 +0100 Subject: Newsfile --- changelog.d/6135.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6135.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6135.bugfix b/changelog.d/6135.bugfix new file mode 100644 index 0000000000..5f9f010cb1 --- /dev/null +++ b/changelog.d/6135.bugfix @@ -0,0 +1 @@ +Fix bug in background update that adds last seen information to the `devices` table, and improve its performance on Postgres. -- cgit 1.4.1 From 2b8352e6387a71f8bea8b512f1a491f1fedf06fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Oct 2019 13:36:29 +0100 Subject: Newsfile --- changelog.d/6141.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6141.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6141.bugfix b/changelog.d/6141.bugfix new file mode 100644 index 0000000000..c93920b7b5 --- /dev/null +++ b/changelog.d/6141.bugfix @@ -0,0 +1 @@ +Fix bad performance of censoring redactions background task. -- cgit 1.4.1 From 33d4ebdf78149705aa4b73cabe593337619ca2a7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 10:18:17 +0100 Subject: Newsfile --- changelog.d/6145.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6145.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6145.bugfix b/changelog.d/6145.bugfix new file mode 100644 index 0000000000..9e0eb5dd4c --- /dev/null +++ b/changelog.d/6145.bugfix @@ -0,0 +1 @@ +Fix fetching censored redactions from DB, which caused APIs like initial sync to fail if it tried to include the censored redaction. -- cgit 1.4.1 From 2bc027ab71c33960e216ec194612a616a4aa11b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 10:41:29 +0100 Subject: Newsfile --- changelog.d/6146.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6146.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6146.bugfix b/changelog.d/6146.bugfix new file mode 100644 index 0000000000..1dad801836 --- /dev/null +++ b/changelog.d/6146.bugfix @@ -0,0 +1 @@ +Fix exceptions when storing large retry intervals for down remote servers. -- cgit 1.4.1 From de1823b521cd9b691d060dbdc477be16decdf2af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 11:23:55 +0100 Subject: Newsfile --- changelog.d/6148.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6148.misc (limited to 'changelog.d') diff --git a/changelog.d/6148.misc b/changelog.d/6148.misc new file mode 100644 index 0000000000..1d5213345c --- /dev/null +++ b/changelog.d/6148.misc @@ -0,0 +1 @@ +Improve performance of `find_next_generated_user_id` DB query. -- cgit 1.4.1 From 2a1470cd05558d4a3bc69a0bc5e8969ba8631426 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 2 Oct 2019 12:04:22 +0100 Subject: Fix yields and copy instead of move push rules on room upgrade (#6144) Copy push rules during a room upgrade from the old room to the new room, instead of deleting them from the old room. For instance, we've defined upgrading of a room multiple times to be possible, and push rules won't be transferred on the second upgrade if they're deleted during the first. Also fix some missing yields that probably broke things quite a bit. --- changelog.d/6144.bugfix | 1 + synapse/handlers/room_member.py | 4 ++-- synapse/storage/push_rule.py | 16 ++++++---------- 3 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelog.d/6144.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6144.bugfix b/changelog.d/6144.bugfix new file mode 100644 index 0000000000..eee63961e4 --- /dev/null +++ b/changelog.d/6144.bugfix @@ -0,0 +1 @@ +Prevent user push rules being deleted from a room when it is upgraded. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 8abdb1b6e6..95a244d86c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -216,8 +216,8 @@ class RoomMemberHandler(object): self.copy_room_tags_and_direct_to_room( predecessor["room_id"], room_id, user_id ) - # Move over old push rules - self.store.move_push_rules_from_room_to_room_for_user( + # Copy over push rules + yield self.store.copy_push_rules_from_room_to_room_for_user( predecessor["room_id"], room_id, user_id ) elif event.membership == Membership.LEAVE: diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index a6517c4cf3..c4e24edff2 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -183,8 +183,8 @@ class PushRulesWorkerStore( return results @defer.inlineCallbacks - def move_push_rule_from_room_to_room(self, new_room_id, user_id, rule): - """Move a single push rule from one room to another for a specific user. + def copy_push_rule_from_room_to_room(self, new_room_id, user_id, rule): + """Copy a single push rule from one room to another for a specific user. Args: new_room_id (str): ID of the new room. @@ -209,14 +209,11 @@ class PushRulesWorkerStore( actions=rule["actions"], ) - # Delete push rule for the old room - yield self.delete_push_rule(user_id, rule["rule_id"]) - @defer.inlineCallbacks - def move_push_rules_from_room_to_room_for_user( + def copy_push_rules_from_room_to_room_for_user( self, old_room_id, new_room_id, user_id ): - """Move all of the push rules from one room to another for a specific + """Copy all of the push rules from one room to another for a specific user. Args: @@ -227,15 +224,14 @@ class PushRulesWorkerStore( # Retrieve push rules for this user user_push_rules = yield self.get_push_rules_for_user(user_id) - # Get rules relating to the old room, move them to the new room, then - # delete them from the old room + # Get rules relating to the old room and copy them to the new room for rule in user_push_rules: conditions = rule.get("conditions", []) if any( (c.get("key") == "room_id" and c.get("pattern") == old_room_id) for c in conditions ): - self.move_push_rule_from_room_to_room(new_room_id, user_id, rule) + yield self.copy_push_rule_from_room_to_room(new_room_id, user_id, rule) @defer.inlineCallbacks def bulk_get_push_rules_for_room(self, event, context): -- cgit 1.4.1 From 24efea338d8643ee15f03b30c080a53320926ee8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 2 Oct 2019 12:20:03 +0100 Subject: Changelog --- changelog.d/6147.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6147.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6147.bugfix b/changelog.d/6147.bugfix new file mode 100644 index 0000000000..b0f936d280 --- /dev/null +++ b/changelog.d/6147.bugfix @@ -0,0 +1 @@ +Don't 500 when trying to exchange a revoked 3PID invite. -- cgit 1.4.1 From 864f14454322c6cba11476667ade8fc6cbea6f44 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 2 Oct 2019 05:29:01 -0700 Subject: Fix up some typechecking (#6150) * type checking fixes * changelog --- .gitignore | 1 + changelog.d/6150.misc | 1 + synapse/api/errors.py | 3 ++- synapse/api/room_versions.py | 5 ++++- synapse/app/_base.py | 4 +++- synapse/config/appservice.py | 5 +++-- synapse/config/consent_config.py | 4 ++-- synapse/config/password_auth_providers.py | 4 +++- synapse/config/repository.py | 5 +++-- synapse/config/server.py | 10 +++++++--- synapse/config/server_notices_config.py | 4 ++-- synapse/logging/opentracing.py | 9 +++++---- synapse/logging/utils.py | 20 ++++++++++++++++---- synapse/metrics/__init__.py | 4 ++-- synapse/metrics/_exposition.py | 4 ++-- synapse/python_dependencies.py | 17 +++++++++++++---- synapse/types.py | 3 ++- synapse/util/async_helpers.py | 10 +++++++--- synapse/util/caches/__init__.py | 3 ++- synapse/util/caches/descriptors.py | 22 ++++++++++++++++++++-- synapse/util/caches/treecache.py | 4 +++- synapse/util/module_loader.py | 2 +- 22 files changed, 104 insertions(+), 40 deletions(-) create mode 100644 changelog.d/6150.misc (limited to 'changelog.d') diff --git a/.gitignore b/.gitignore index e53d4908d5..747b8714d7 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ *.tac _trial_temp/ _trial_temp*/ +/out # stuff that is likely to exist when you run a server locally /*.db diff --git a/changelog.d/6150.misc b/changelog.d/6150.misc new file mode 100644 index 0000000000..a373c091ab --- /dev/null +++ b/changelog.d/6150.misc @@ -0,0 +1 @@ +Expand type-checking on modules imported by synapse.config. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index cf1ebf1af2..1bb2e86789 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -17,6 +17,7 @@ """Contains exceptions and error codes.""" import logging +from typing import Dict from six import iteritems from six.moves import http_client @@ -111,7 +112,7 @@ class ProxiedRequestError(SynapseError): def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None): super(ProxiedRequestError, self).__init__(code, msg, errcode) if additional_fields is None: - self._additional_fields = {} + self._additional_fields = {} # type: Dict else: self._additional_fields = dict(additional_fields) diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 95292b7dec..c6f50fd7b9 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Dict + import attr @@ -102,4 +105,4 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V4, RoomVersions.V5, ) -} # type: dict[str, RoomVersion] +} # type: Dict[str, RoomVersion] diff --git a/synapse/app/_base.py b/synapse/app/_base.py index c30fdeee9a..2ac7d5c064 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -263,7 +263,9 @@ def start(hs, listeners=None): refresh_certificate(hs) # Start the tracer - synapse.logging.opentracing.init_tracer(hs.config) + synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa + hs.config + ) # It is now safe to start your Synapse. hs.start_listening(listeners) diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 8387ff6805..28d36b1bc3 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +from typing import Dict from six import string_types from six.moves.urllib import parse as urlparse @@ -56,8 +57,8 @@ def load_appservices(hostname, config_files): return [] # Dicts of value -> filename - seen_as_tokens = {} - seen_ids = {} + seen_as_tokens = {} # type: Dict[str, str] + seen_ids = {} # type: Dict[str, str] appservices = [] diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index 94916f3a49..48976e17b1 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -73,8 +73,8 @@ DEFAULT_CONFIG = """\ class ConsentConfig(Config): - def __init__(self): - super(ConsentConfig, self).__init__() + def __init__(self, *args): + super(ConsentConfig, self).__init__(*args) self.user_consent_version = None self.user_consent_template_dir = None diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 788c39c9fb..c50e244394 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, List + from synapse.util.module_loader import load_module from ._base import Config @@ -22,7 +24,7 @@ LDAP_PROVIDER = "ldap_auth_provider.LdapAuthProvider" class PasswordAuthProviderConfig(Config): def read_config(self, config, **kwargs): - self.password_providers = [] + self.password_providers = [] # type: List[Any] providers = [] # We want to be backwards compatible with the old `ldap_config` diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 52e014608a..14740891f3 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -15,6 +15,7 @@ import os from collections import namedtuple +from typing import Dict, List from synapse.python_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module @@ -61,7 +62,7 @@ def parse_thumbnail_requirements(thumbnail_sizes): Dictionary mapping from media type string to list of ThumbnailRequirement tuples. """ - requirements = {} + requirements = {} # type: Dict[str, List] for size in thumbnail_sizes: width = size["width"] height = size["height"] @@ -130,7 +131,7 @@ class ContentRepositoryConfig(Config): # # We don't create the storage providers here as not all workers need # them to be started. - self.media_storage_providers = [] + self.media_storage_providers = [] # type: List[tuple] for provider_config in storage_providers: # We special case the module "file_system" so as not to need to diff --git a/synapse/config/server.py b/synapse/config/server.py index 536ee7f29c..709bd387e5 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,6 +19,7 @@ import logging import os.path import re from textwrap import indent +from typing import List import attr import yaml @@ -243,7 +244,7 @@ class ServerConfig(Config): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) - self.listeners = [] + self.listeners = [] # type: List[dict] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): raise ConfigError( @@ -287,7 +288,10 @@ class ServerConfig(Config): validator=attr.validators.instance_of(bool), default=False ) complexity = attr.ib( - validator=attr.validators.instance_of((int, float)), default=1.0 + validator=attr.validators.instance_of( + (float, int) # type: ignore[arg-type] # noqa + ), + default=1.0, ) complexity_error = attr.ib( validator=attr.validators.instance_of(str), @@ -366,7 +370,7 @@ class ServerConfig(Config): "cleanup_extremities_with_dummy_events", True ) - def has_tls_listener(self): + def has_tls_listener(self) -> bool: return any(l["tls"] for l in self.listeners) def generate_config_section( diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index eaac3d73bc..6d4285ef93 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -59,8 +59,8 @@ class ServerNoticesConfig(Config): None if server notices are not enabled. """ - def __init__(self): - super(ServerNoticesConfig, self).__init__() + def __init__(self, *args): + super(ServerNoticesConfig, self).__init__(*args) self.server_notices_mxid = None self.server_notices_mxid_display_name = None self.server_notices_mxid_avatar_url = None diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 308a27213b..cd1ff6a518 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -170,6 +170,7 @@ import inspect import logging import re from functools import wraps +from typing import Dict from canonicaljson import json @@ -547,7 +548,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T return span = opentracing.tracer.active_span - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -584,7 +585,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True): span = opentracing.tracer.active_span - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -639,7 +640,7 @@ def get_active_span_text_map(destination=None): if destination and not whitelisted_homeserver(destination): return {} - carrier = {} + carrier = {} # type: Dict[str, str] opentracing.tracer.inject( opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier ) @@ -653,7 +654,7 @@ def active_span_context_as_string(): Returns: The active span context encoded as a string. """ - carrier = {} + carrier = {} # type: Dict[str, str] if opentracing: opentracing.tracer.inject( opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index 7df0fa6087..6073fc2725 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -119,7 +119,11 @@ def trace_function(f): logger = logging.getLogger(name) level = logging.DEBUG - s = inspect.currentframe().f_back + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + + s = frame.f_back to_print = [ "\t%s:%s %s. Args: args=%s, kwargs=%s" @@ -144,7 +148,7 @@ def trace_function(f): pathname=pathname, lineno=lineno, msg=msg, - args=None, + args=tuple(), exc_info=None, ) @@ -157,7 +161,12 @@ def trace_function(f): def get_previous_frames(): - s = inspect.currentframe().f_back.f_back + + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + + s = frame.f_back.f_back to_return = [] while s: if s.f_globals["__name__"].startswith("synapse"): @@ -174,7 +183,10 @@ def get_previous_frames(): def get_previous_frame(ignore=[]): - s = inspect.currentframe().f_back.f_back + frame = inspect.currentframe() + if frame is None: + raise Exception("Can't get current frame!") + s = frame.f_back.f_back while s: if s.f_globals["__name__"].startswith("synapse"): diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index bec3b13397..0b45e1f52a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -125,7 +125,7 @@ class InFlightGauge(object): ) # Counts number of in flight blocks for a given set of label values - self._registrations = {} + self._registrations = {} # type: Dict # Protects access to _registrations self._lock = threading.Lock() @@ -226,7 +226,7 @@ class BucketCollector(object): # Fetch the data -- this must be synchronous! data = self.data_collector() - buckets = {} + buckets = {} # type: Dict[float, int] res = [] for x in data.keys(): diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 74d9c3ecd3..a248103191 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -36,9 +36,9 @@ from twisted.web.resource import Resource try: from prometheus_client.samples import Sample except ImportError: - Sample = namedtuple( + Sample = namedtuple( # type: ignore[no-redef] # noqa "Sample", ["name", "labels", "value", "timestamp", "exemplar"] - ) # type: ignore + ) CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 0bd563edc7..aa7da1c543 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import Set +from typing import List, Set from pkg_resources import ( DistributionNotFound, @@ -73,6 +73,7 @@ REQUIREMENTS = [ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", + "typing-extensions>=3.7.4", ] CONDITIONAL_REQUIREMENTS = { @@ -144,7 +145,11 @@ def check_requirements(for_feature=None): deps_needed.append(dependency) errors.append( "Needed %s, got %s==%s" - % (dependency, e.dist.project_name, e.dist.version) + % ( + dependency, + e.dist.project_name, # type: ignore[attr-defined] # noqa + e.dist.version, # type: ignore[attr-defined] # noqa + ) ) except DistributionNotFound: deps_needed.append(dependency) @@ -159,7 +164,7 @@ def check_requirements(for_feature=None): if not for_feature: # Check the optional dependencies are up to date. We allow them to not be # installed. - OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) + OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str] for dependency in OPTS: try: @@ -168,7 +173,11 @@ def check_requirements(for_feature=None): deps_needed.append(dependency) errors.append( "Needed optional %s, got %s==%s" - % (dependency, e.dist.project_name, e.dist.version) + % ( + dependency, + e.dist.project_name, # type: ignore[attr-defined] # noqa + e.dist.version, # type: ignore[attr-defined] # noqa + ) ) except DistributionNotFound: # If it's not found, we don't care diff --git a/synapse/types.py b/synapse/types.py index 51eadb6ad4..8f79797f17 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -318,6 +318,7 @@ class StreamToken( ) ): _SEPARATOR = "_" + START = None # type: StreamToken @classmethod def from_string(cls, string): @@ -402,7 +403,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): followed by the "stream_ordering" id of the event it comes after. """ - __slots__ = [] + __slots__ = [] # type: list @classmethod def parse(cls, string): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index f1c46836b1..0d3bdd88ce 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -13,9 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import collections import logging from contextlib import contextmanager +from typing import Dict, Sequence, Set, Union from six.moves import range @@ -213,7 +215,9 @@ class Linearizer(object): # the first element is the number of things executing, and # the second element is an OrderedDict, where the keys are deferreds for the # things blocked from executing. - self.key_to_defer = {} + self.key_to_defer = ( + {} + ) # type: Dict[str, Sequence[Union[int, Dict[defer.Deferred, int]]]] def queue(self, key): # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly. @@ -340,10 +344,10 @@ class ReadWriteLock(object): def __init__(self): # Latest readers queued - self.key_to_current_readers = {} + self.key_to_current_readers = {} # type: Dict[str, Set[defer.Deferred]] # Latest writer queued - self.key_to_current_writer = {} + self.key_to_current_writer = {} # type: Dict[str, defer.Deferred] @defer.inlineCallbacks def read(self, key): diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index b50e3503f0..43fd65d693 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -16,6 +16,7 @@ import logging import os +from typing import Dict import six from six.moves import intern @@ -37,7 +38,7 @@ def get_cache_factor_for(cache_name): caches_by_name = {} -collectors_by_name = {} +collectors_by_name = {} # type: Dict cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"]) cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"]) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 43f66ec4be..5ac2530a6a 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -18,10 +18,12 @@ import inspect import logging import threading from collections import namedtuple +from typing import Any, cast from six import itervalues from prometheus_client import Gauge +from typing_extensions import Protocol from twisted.internet import defer @@ -37,6 +39,18 @@ from . import register_cache logger = logging.getLogger(__name__) +class _CachedFunction(Protocol): + invalidate = None # type: Any + invalidate_all = None # type: Any + invalidate_many = None # type: Any + prefill = None # type: Any + cache = None # type: Any + num_args = None # type: Any + + def __name__(self): + ... + + cache_pending_metric = Gauge( "synapse_util_caches_cache_pending", "Number of lookups currently pending for this cache", @@ -245,7 +259,9 @@ class Cache(object): class _CacheDescriptorBase(object): - def __init__(self, orig, num_args, inlineCallbacks, cache_context=False): + def __init__( + self, orig: _CachedFunction, num_args, inlineCallbacks, cache_context=False + ): self.orig = orig if inlineCallbacks: @@ -404,7 +420,7 @@ class CacheDescriptor(_CacheDescriptorBase): return tuple(get_cache_key_gen(args, kwargs)) @functools.wraps(self.orig) - def wrapped(*args, **kwargs): + def _wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) @@ -440,6 +456,8 @@ class CacheDescriptor(_CacheDescriptorBase): return make_deferred_yieldable(observer) + wrapped = cast(_CachedFunction, _wrapped) + if self.num_args == 1: wrapped.invalidate = lambda key: cache.invalidate(key[0]) wrapped.prefill = lambda key, val: cache.prefill(key[0], val) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 9a72218d85..2ea4e4e911 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -1,3 +1,5 @@ +from typing import Dict + from six import itervalues SENTINEL = object() @@ -12,7 +14,7 @@ class TreeCache(object): def __init__(self): self.size = 0 - self.root = {} + self.root = {} # type: Dict def __setitem__(self, key, value): return self.set(key, value) diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 7ff7eb1e4d..2705cbe5f8 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -54,5 +54,5 @@ def load_python_module(location: str): if spec is None: raise Exception("Unable to load module at %s" % (location,)) mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) + spec.loader.exec_module(mod) # type: ignore return mod -- cgit 1.4.1 From a5166e4d5febc0e03ba9da9db99127a797a0bc4d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 14:08:35 +0100 Subject: Land improved room list based on room stats (#6019) Use room_stats and room_state for room directory search --- changelog.d/6019.misc | 1 + synapse/federation/transport/server.py | 8 + synapse/handlers/room_list.py | 323 ++++++--------------- synapse/rest/client/v1/room.py | 8 + synapse/storage/room.py | 228 ++++++++++----- .../schema/delta/56/public_room_list_idx.sql | 16 + tests/handlers/test_roomlist.py | 39 --- 7 files changed, 273 insertions(+), 350 deletions(-) create mode 100644 changelog.d/6019.misc create mode 100644 synapse/storage/schema/delta/56/public_room_list_idx.sql delete mode 100644 tests/handlers/test_roomlist.py (limited to 'changelog.d') diff --git a/changelog.d/6019.misc b/changelog.d/6019.misc new file mode 100644 index 0000000000..dfee73c28f --- /dev/null +++ b/changelog.d/6019.misc @@ -0,0 +1 @@ +Improve performance of the public room list directory. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7f8a16e355..0f16f21c2d 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -765,6 +765,10 @@ class PublicRoomList(BaseFederationServlet): else: network_tuple = ThirdPartyInstanceID(None, None) + if limit == 0: + # zero is a special value which corresponds to no limit. + limit = None + data = await maybeDeferred( self.handler.get_local_public_room_list, limit, @@ -800,6 +804,10 @@ class PublicRoomList(BaseFederationServlet): if search_filter is None: logger.warning("Nonefilter") + if limit == 0: + # zero is a special value which corresponds to no limit. + limit = None + data = await self.handler.get_local_public_room_list( limit=limit, since_token=since_token, diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index a7e55f00e5..4e1cc5460f 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -16,8 +16,7 @@ import logging from collections import namedtuple -from six import PY3, iteritems -from six.moves import range +from six import iteritems import msgpack from unpaddedbase64 import decode_base64, encode_base64 @@ -27,7 +26,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, HttpResponseException from synapse.types import ThirdPartyInstanceID -from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.util.caches.response_cache import ResponseCache @@ -37,7 +35,6 @@ logger = logging.getLogger(__name__) REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000 - # This is used to indicate we should only return rooms published to the main list. EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None) @@ -72,6 +69,8 @@ class RoomListHandler(BaseHandler): This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. + from_federation (bool): true iff the request comes from the federation + API """ if not self.enable_room_list_search: return defer.succeed({"chunk": [], "total_room_count_estimate": 0}) @@ -133,239 +132,109 @@ class RoomListHandler(BaseHandler): from_federation (bool): Whether this request originated from a federating server or a client. Used for room filtering. timeout (int|None): Amount of seconds to wait for a response before - timing out. + timing out. TODO """ - if since_token and since_token != "END": - since_token = RoomListNextBatch.from_token(since_token) - else: - since_token = None - rooms_to_order_value = {} - rooms_to_num_joined = {} + # Pagination tokens work by storing the room ID sent in the last batch, + # plus the direction (forwards or backwards). Next batch tokens always + # go forwards, prev batch tokens always go backwards. - newly_visible = [] - newly_unpublished = [] if since_token: - stream_token = since_token.stream_ordering - current_public_id = yield self.store.get_current_public_room_stream_id() - public_room_stream_id = since_token.public_room_stream_id - newly_visible, newly_unpublished = yield self.store.get_public_room_changes( - public_room_stream_id, current_public_id, network_tuple=network_tuple - ) - else: - stream_token = yield self.store.get_room_max_stream_ordering() - public_room_stream_id = yield self.store.get_current_public_room_stream_id() - - room_ids = yield self.store.get_public_room_ids_at_stream_id( - public_room_stream_id, network_tuple=network_tuple - ) - - # We want to return rooms in a particular order: the number of joined - # users. We then arbitrarily use the room_id as a tie breaker. - - @defer.inlineCallbacks - def get_order_for_room(room_id): - # Most of the rooms won't have changed between the since token and - # now (especially if the since token is "now"). So, we can ask what - # the current users are in a room (that will hit a cache) and then - # check if the room has changed since the since token. (We have to - # do it in that order to avoid races). - # If things have changed then fall back to getting the current state - # at the since token. - joined_users = yield self.store.get_users_in_room(room_id) - if self.store.has_room_changed_since(room_id, stream_token): - latest_event_ids = yield self.store.get_forward_extremeties_for_room( - room_id, stream_token - ) - - if not latest_event_ids: - return + batch_token = RoomListNextBatch.from_token(since_token) - joined_users = yield self.state_handler.get_current_users_in_room( - room_id, latest_event_ids - ) - - num_joined_users = len(joined_users) - rooms_to_num_joined[room_id] = num_joined_users + last_room_id = batch_token.last_room_id + forwards = batch_token.direction_is_forward + else: + batch_token = None - if num_joined_users == 0: - return + last_room_id = None + forwards = True - # We want larger rooms to be first, hence negating num_joined_users - rooms_to_order_value[room_id] = (-num_joined_users, room_id) + # we request one more than wanted to see if there are more pages to come + probing_limit = limit + 1 if limit is not None else None - logger.info( - "Getting ordering for %i rooms since %s", len(room_ids), stream_token + results = yield self.store.get_largest_public_rooms( + network_tuple, + search_filter, + probing_limit, + last_room_id=last_room_id, + forwards=forwards, + ignore_non_federatable=from_federation, ) - yield concurrently_execute(get_order_for_room, room_ids, 10) - sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1]) - sorted_rooms = [room_id for room_id, _ in sorted_entries] + def build_room_entry(room): + entry = { + "room_id": room["room_id"], + "name": room["name"], + "topic": room["topic"], + "canonical_alias": room["canonical_alias"], + "num_joined_members": room["joined_members"], + "avatar_url": room["avatar"], + "world_readable": room["history_visibility"] == "world_readable", + "guest_can_join": room["guest_access"] == "can_join", + } - # `sorted_rooms` should now be a list of all public room ids that is - # stable across pagination. Therefore, we can use indices into this - # list as our pagination tokens. + # Filter out Nones – rather omit the field altogether + return {k: v for k, v in entry.items() if v is not None} - # Filter out rooms that we don't want to return - rooms_to_scan = [ - r - for r in sorted_rooms - if r not in newly_unpublished and rooms_to_num_joined[r] > 0 - ] + results = [build_room_entry(r) for r in results] - total_room_count = len(rooms_to_scan) + response = {} + num_results = len(results) + if limit is not None: + more_to_come = num_results == probing_limit - if since_token: - # Filter out rooms we've already returned previously - # `since_token.current_limit` is the index of the last room we - # sent down, so we exclude it and everything before/after it. - if since_token.direction_is_forward: - rooms_to_scan = rooms_to_scan[since_token.current_limit + 1 :] + # Depending on direction we trim either the front or back. + if forwards: + results = results[:limit] else: - rooms_to_scan = rooms_to_scan[: since_token.current_limit] - rooms_to_scan.reverse() - - logger.info("After sorting and filtering, %i rooms remain", len(rooms_to_scan)) - - # _append_room_entry_to_chunk will append to chunk but will stop if - # len(chunk) > limit - # - # Normally we will generate enough results on the first iteration here, - # but if there is a search filter, _append_room_entry_to_chunk may - # filter some results out, in which case we loop again. - # - # We don't want to scan over the entire range either as that - # would potentially waste a lot of work. - # - # XXX if there is no limit, we may end up DoSing the server with - # calls to get_current_state_ids for every single room on the - # server. Surely we should cap this somehow? - # - if limit: - step = limit + 1 + results = results[-limit:] else: - # step cannot be zero - step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1 - - chunk = [] - for i in range(0, len(rooms_to_scan), step): - if timeout and self.clock.time() > timeout: - raise Exception("Timed out searching room directory") - - batch = rooms_to_scan[i : i + step] - logger.info("Processing %i rooms for result", len(batch)) - yield concurrently_execute( - lambda r: self._append_room_entry_to_chunk( - r, - rooms_to_num_joined[r], - chunk, - limit, - search_filter, - from_federation=from_federation, - ), - batch, - 5, - ) - logger.info("Now %i rooms in result", len(chunk)) - if len(chunk) >= limit + 1: - break - - chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) - - # Work out the new limit of the batch for pagination, or None if we - # know there are no more results that would be returned. - # i.e., [since_token.current_limit..new_limit] is the batch of rooms - # we've returned (or the reverse if we paginated backwards) - # We tried to pull out limit + 1 rooms above, so if we have <= limit - # then we know there are no more results to return - new_limit = None - if chunk and (not limit or len(chunk) > limit): - - if not since_token or since_token.direction_is_forward: - if limit: - chunk = chunk[:limit] - last_room_id = chunk[-1]["room_id"] + more_to_come = False + + if num_results > 0: + final_room_id = results[-1]["room_id"] + initial_room_id = results[0]["room_id"] + + if forwards: + if batch_token: + # If there was a token given then we assume that there + # must be previous results. + response["prev_batch"] = RoomListNextBatch( + last_room_id=initial_room_id, direction_is_forward=False + ).to_token() + + if more_to_come: + response["next_batch"] = RoomListNextBatch( + last_room_id=final_room_id, direction_is_forward=True + ).to_token() else: - if limit: - chunk = chunk[-limit:] - last_room_id = chunk[0]["room_id"] - - new_limit = sorted_rooms.index(last_room_id) - - results = {"chunk": chunk, "total_room_count_estimate": total_room_count} - - if since_token: - results["new_rooms"] = bool(newly_visible) - - if not since_token or since_token.direction_is_forward: - if new_limit is not None: - results["next_batch"] = RoomListNextBatch( - stream_ordering=stream_token, - public_room_stream_id=public_room_stream_id, - current_limit=new_limit, - direction_is_forward=True, - ).to_token() - - if since_token: - results["prev_batch"] = since_token.copy_and_replace( - direction_is_forward=False, - current_limit=since_token.current_limit + 1, - ).to_token() - else: - if new_limit is not None: - results["prev_batch"] = RoomListNextBatch( - stream_ordering=stream_token, - public_room_stream_id=public_room_stream_id, - current_limit=new_limit, - direction_is_forward=False, - ).to_token() - - if since_token: - results["next_batch"] = since_token.copy_and_replace( - direction_is_forward=True, - current_limit=since_token.current_limit - 1, - ).to_token() - - return results - - @defer.inlineCallbacks - def _append_room_entry_to_chunk( - self, - room_id, - num_joined_users, - chunk, - limit, - search_filter, - from_federation=False, - ): - """Generate the entry for a room in the public room list and append it - to the `chunk` if it matches the search filter - - Args: - room_id (str): The ID of the room. - num_joined_users (int): The number of joined users in the room. - chunk (list) - limit (int|None): Maximum amount of rooms to display. Function will - return if length of chunk is greater than limit + 1. - search_filter (dict|None) - from_federation (bool): Whether this request originated from a - federating server or a client. Used for room filtering. - """ - if limit and len(chunk) > limit + 1: - # We've already got enough, so lets just drop it. - return + if batch_token: + response["next_batch"] = RoomListNextBatch( + last_room_id=final_room_id, direction_is_forward=True + ).to_token() + + if more_to_come: + response["prev_batch"] = RoomListNextBatch( + last_room_id=initial_room_id, direction_is_forward=False + ).to_token() + + for room in results: + # populate search result entries with additional fields, namely + # 'aliases' + room_id = room["room_id"] + + aliases = yield self.store.get_aliases_for_room(room_id) + if aliases: + room["aliases"] = aliases - result = yield self.generate_room_entry(room_id, num_joined_users) - if not result: - return + response["chunk"] = results - if from_federation and not result.get("m.federate", True): - # This is a room that other servers cannot join. Do not show them - # this room. - return + response["total_room_count_estimate"] = yield self.store.count_public_rooms( + network_tuple, ignore_non_federatable=from_federation + ) - if _matches_room_entry(result, search_filter): - chunk.append(result) + return response @cachedInlineCallbacks(num_args=1, cache_context=True) def generate_room_entry( @@ -580,32 +449,18 @@ class RoomListNextBatch( namedtuple( "RoomListNextBatch", ( - "stream_ordering", # stream_ordering of the first public room list - "public_room_stream_id", # public room stream id for first public room list - "current_limit", # The number of previous rooms returned + "last_room_id", # The room_id to get rooms after/before "direction_is_forward", # Bool if this is a next_batch, false if prev_batch ), ) ): - - KEY_DICT = { - "stream_ordering": "s", - "public_room_stream_id": "p", - "current_limit": "n", - "direction_is_forward": "d", - } + KEY_DICT = {"last_room_id": "r", "direction_is_forward": "d"} REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()} @classmethod def from_token(cls, token): - if PY3: - # The argument raw=False is only available on new versions of - # msgpack, and only really needed on Python 3. Gate it behind - # a PY3 check to avoid causing issues on Debian-packaged versions. - decoded = msgpack.loads(decode_base64(token), raw=False) - else: - decoded = msgpack.loads(decode_base64(token)) + decoded = msgpack.loads(decode_base64(token), raw=False) return RoomListNextBatch( **{cls.REVERSE_KEY_DICT[key]: val for key, val in decoded.items()} ) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 6bf924dedc..9c1d41421c 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -361,6 +361,10 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit = parse_integer(request, "limit", 0) since_token = parse_string(request, "since", None) + if limit == 0: + # zero is a special value which corresponds to no limit. + limit = None + handler = self.hs.get_room_list_handler() if server: data = yield handler.get_remote_public_room_list( @@ -398,6 +402,10 @@ class PublicRoomListRestServlet(TransactionRestServlet): else: network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id) + if limit == 0: + # zero is a special value which corresponds to no limit. + limit = None + handler = self.hs.get_room_list_handler() if server: data = yield handler.get_remote_public_room_list( diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 08e13f3a3b..c02787a73d 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -63,103 +64,176 @@ class RoomWorkerStore(SQLBaseStore): desc="get_public_room_ids", ) - @cached(num_args=2, max_entries=100) - def get_public_room_ids_at_stream_id(self, stream_id, network_tuple): - """Get pulbic rooms for a particular list, or across all lists. + def count_public_rooms(self, network_tuple, ignore_non_federatable): + """Counts the number of public rooms as tracked in the room_stats_current + and room_stats_state table. Args: - stream_id (int) - network_tuple (ThirdPartyInstanceID): The list to use (None, None) - means the main list, None means all lsits. + network_tuple (ThirdPartyInstanceID|None) + ignore_non_federatable (bool): If true filters out non-federatable rooms """ - return self.runInteraction( - "get_public_room_ids_at_stream_id", - self.get_public_room_ids_at_stream_id_txn, - stream_id, - network_tuple=network_tuple, - ) - - def get_public_room_ids_at_stream_id_txn(self, txn, stream_id, network_tuple): - return { - rm - for rm, vis in self.get_published_at_stream_id_txn( - txn, stream_id, network_tuple=network_tuple - ).items() - if vis - } - def get_published_at_stream_id_txn(self, txn, stream_id, network_tuple): - if network_tuple: - # We want to get from a particular list. No aggregation required. + def _count_public_rooms_txn(txn): + query_args = [] + + if network_tuple: + if network_tuple.appservice_id: + published_sql = """ + SELECT room_id from appservice_room_list + WHERE appservice_id = ? AND network_id = ? + """ + query_args.append(network_tuple.appservice_id) + query_args.append(network_tuple.network_id) + else: + published_sql = """ + SELECT room_id FROM rooms WHERE is_public + """ + else: + published_sql = """ + SELECT room_id FROM rooms WHERE is_public + UNION SELECT room_id from appservice_room_list + """ sql = """ - SELECT room_id, visibility FROM public_room_list_stream - INNER JOIN ( - SELECT room_id, max(stream_id) AS stream_id - FROM public_room_list_stream - WHERE stream_id <= ? %s - GROUP BY room_id - ) grouped USING (room_id, stream_id) - """ + SELECT + COALESCE(COUNT(*), 0) + FROM ( + %(published_sql)s + ) published + INNER JOIN room_stats_state USING (room_id) + INNER JOIN room_stats_current USING (room_id) + WHERE + ( + join_rules = 'public' OR history_visibility = 'world_readable' + ) + AND joined_members > 0 + """ % { + "published_sql": published_sql + } - if network_tuple.appservice_id is not None: - txn.execute( - sql % ("AND appservice_id = ? AND network_id = ?",), - (stream_id, network_tuple.appservice_id, network_tuple.network_id), - ) - else: - txn.execute(sql % ("AND appservice_id IS NULL",), (stream_id,)) - return dict(txn) - else: - # We want to get from all lists, so we need to aggregate the results + txn.execute(sql, query_args) + return txn.fetchone()[0] - logger.info("Executing full list") + return self.runInteraction("count_public_rooms", _count_public_rooms_txn) - sql = """ - SELECT room_id, visibility - FROM public_room_list_stream - INNER JOIN ( - SELECT - room_id, max(stream_id) AS stream_id, appservice_id, - network_id - FROM public_room_list_stream - WHERE stream_id <= ? - GROUP BY room_id, appservice_id, network_id - ) grouped USING (room_id, stream_id) - """ + @defer.inlineCallbacks + def get_largest_public_rooms( + self, + network_tuple, + search_filter, + limit, + last_room_id, + forwards, + ignore_non_federatable=False, + ): + """Gets the largest public rooms (where largest is in terms of joined + members, as tracked in the statistics table). - txn.execute(sql, (stream_id,)) + Args: + network_tuple (ThirdPartyInstanceID|None): + search_filter (dict|None): + limit (int|None): Maxmimum number of rows to return, unlimited otherwise. + last_room_id (str|None): if present, a room ID which bounds the + result set, and is always *excluded* from the result set. + forwards (bool): true iff going forwards, going backwards otherwise + ignore_non_federatable (bool): If true filters out non-federatable rooms. - results = {} - # A room is visible if its visible on any list. - for room_id, visibility in txn: - results[room_id] = bool(visibility) or results.get(room_id, False) + Returns: + Rooms in order: biggest number of joined users first. + We then arbitrarily use the room_id as a tie breaker. - return results + """ - def get_public_room_changes(self, prev_stream_id, new_stream_id, network_tuple): - def get_public_room_changes_txn(txn): - then_rooms = self.get_public_room_ids_at_stream_id_txn( - txn, prev_stream_id, network_tuple - ) + where_clauses = [] + query_args = [] - now_rooms_dict = self.get_published_at_stream_id_txn( - txn, new_stream_id, network_tuple - ) + if last_room_id: + if forwards: + where_clauses.append("room_id < ?") + else: + where_clauses.append("? < room_id") - now_rooms_visible = set(rm for rm, vis in now_rooms_dict.items() if vis) - now_rooms_not_visible = set( - rm for rm, vis in now_rooms_dict.items() if not vis + query_args += [last_room_id] + + if search_filter and search_filter.get("generic_search_term", None): + search_term = "%" + search_filter["generic_search_term"] + "%" + + where_clauses.append( + """ + ( + name LIKE ? + OR topic LIKE ? + OR canonical_alias LIKE ? + ) + """ ) + query_args += [search_term, search_term, search_term] + + if network_tuple: + if network_tuple.appservice_id: + published_sql = """ + SELECT room_id from appservice_room_list + WHERE appservice_id = ? AND network_id = ? + """ + query_args.append(network_tuple.appservice_id) + query_args.append(network_tuple.network_id) + else: + published_sql = """ + SELECT room_id FROM rooms WHERE is_public + """ + else: + published_sql = """ + SELECT room_id FROM rooms WHERE is_public + UNION SELECT room_id from appservice_room_list + """ - newly_visible = now_rooms_visible - then_rooms - newly_unpublished = now_rooms_not_visible & then_rooms + where_clause = "" + if where_clauses: + where_clause = " AND " + " AND ".join(where_clauses) + + sql = """ + SELECT + room_id, name, topic, canonical_alias, joined_members, + avatar, history_visibility, joined_members, guest_access + FROM ( + %(published_sql)s + ) published + INNER JOIN room_stats_state USING (room_id) + INNER JOIN room_stats_current USING (room_id) + WHERE + ( + join_rules = 'public' OR history_visibility = 'world_readable' + ) + AND joined_members > 0 + %(where_clause)s + ORDER BY joined_members %(dir)s, room_id %(dir)s + """ % { + "published_sql": published_sql, + "where_clause": where_clause, + "dir": "DESC" if forwards else "ASC", + } - return newly_visible, newly_unpublished + if limit is not None: + query_args.append(limit) - return self.runInteraction( - "get_public_room_changes", get_public_room_changes_txn + sql += """ + LIMIT ? + """ + + def _get_largest_public_rooms_txn(txn): + txn.execute(sql, query_args) + + results = self.cursor_to_dict(txn) + + if not forwards: + results.reverse() + + return results + + ret_val = yield self.runInteraction( + "get_largest_public_rooms", _get_largest_public_rooms_txn ) + defer.returnValue(ret_val) @cached(max_entries=10000) def is_room_blocked(self, room_id): diff --git a/synapse/storage/schema/delta/56/public_room_list_idx.sql b/synapse/storage/schema/delta/56/public_room_list_idx.sql new file mode 100644 index 0000000000..7be31ffebb --- /dev/null +++ b/synapse/storage/schema/delta/56/public_room_list_idx.sql @@ -0,0 +1,16 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX public_room_list_stream_network ON public_room_list_stream (appservice_id, network_id, room_id); diff --git a/tests/handlers/test_roomlist.py b/tests/handlers/test_roomlist.py deleted file mode 100644 index 61eebb6985..0000000000 --- a/tests/handlers/test_roomlist.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.handlers.room_list import RoomListNextBatch - -import tests.unittest -import tests.utils - - -class RoomListTestCase(tests.unittest.TestCase): - """ Tests RoomList's RoomListNextBatch. """ - - def setUp(self): - pass - - def test_check_read_batch_tokens(self): - batch_token = RoomListNextBatch( - stream_ordering="abcdef", - public_room_stream_id="123", - current_limit=20, - direction_is_forward=True, - ).to_token() - next_batch = RoomListNextBatch.from_token(batch_token) - self.assertEquals(next_batch.stream_ordering, "abcdef") - self.assertEquals(next_batch.public_room_stream_id, "123") - self.assertEquals(next_batch.current_limit, 20) - self.assertEquals(next_batch.direction_is_forward, True) -- cgit 1.4.1 From 8e32240e6b650746d73315178af9aeb6dfa9be94 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 15:12:17 +0100 Subject: Newsfile --- changelog.d/6152.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6152.misc (limited to 'changelog.d') diff --git a/changelog.d/6152.misc b/changelog.d/6152.misc new file mode 100644 index 0000000000..dfee73c28f --- /dev/null +++ b/changelog.d/6152.misc @@ -0,0 +1 @@ +Improve performance of the public room list directory. -- cgit 1.4.1 From ed73f04bef517eddebb3b0f0319d6e3322d1b7ec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 15:24:33 +0100 Subject: Newsfile --- changelog.d/6153.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6153.misc (limited to 'changelog.d') diff --git a/changelog.d/6153.misc b/changelog.d/6153.misc new file mode 100644 index 0000000000..dfee73c28f --- /dev/null +++ b/changelog.d/6153.misc @@ -0,0 +1 @@ +Improve performance of the public room list directory. -- cgit 1.4.1 From 5be4083306c294ab5905683d32c5fa8c90219c95 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 15:48:36 +0100 Subject: Newsfile --- changelog.d/6154.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6154.misc (limited to 'changelog.d') diff --git a/changelog.d/6154.misc b/changelog.d/6154.misc new file mode 100644 index 0000000000..dfee73c28f --- /dev/null +++ b/changelog.d/6154.misc @@ -0,0 +1 @@ +Improve performance of the public room list directory. -- cgit 1.4.1 From 0ccf0ffc855f8d12f16598af77f356f236096994 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Oct 2019 17:12:24 +0100 Subject: Newsfile --- changelog.d/6159.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6159.misc (limited to 'changelog.d') diff --git a/changelog.d/6159.misc b/changelog.d/6159.misc new file mode 100644 index 0000000000..06cc163f8b --- /dev/null +++ b/changelog.d/6159.misc @@ -0,0 +1 @@ +Add more caching to `_get_joined_users_from_context` DB query. -- cgit 1.4.1 From 4fc60f12deef19407e9b761f3d9c24c48384118c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Oct 2019 17:35:50 +0100 Subject: Newsfile --- changelog.d/6161.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6161.misc (limited to 'changelog.d') diff --git a/changelog.d/6161.misc b/changelog.d/6161.misc new file mode 100644 index 0000000000..7c5d61cb86 --- /dev/null +++ b/changelog.d/6161.misc @@ -0,0 +1 @@ +Don't regenerate numeric user ID if registration fails. -- cgit 1.4.1 From 0186ec9df7e55e35fa9b6579869cd308dc178a3c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Oct 2019 17:46:43 +0100 Subject: Fixup newsfile --- changelog.d/6161.bugfix | 1 + changelog.d/6161.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/6161.bugfix delete mode 100644 changelog.d/6161.misc (limited to 'changelog.d') diff --git a/changelog.d/6161.bugfix b/changelog.d/6161.bugfix new file mode 100644 index 0000000000..a0e2adb979 --- /dev/null +++ b/changelog.d/6161.bugfix @@ -0,0 +1 @@ +Fix bug where guest account registration can wedge after restart. diff --git a/changelog.d/6161.misc b/changelog.d/6161.misc deleted file mode 100644 index 7c5d61cb86..0000000000 --- a/changelog.d/6161.misc +++ /dev/null @@ -1 +0,0 @@ -Don't regenerate numeric user ID if registration fails. -- cgit 1.4.1 From 66537e10ce77e47fac52e3f27569ac1ef0f1aaa3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Oct 2019 17:47:20 +0100 Subject: add some metrics on the federation sender (#6160) --- changelog.d/6160.misc | 1 + synapse/federation/sender/__init__.py | 11 ++++++----- synapse/state/__init__.py | 24 ++++++++++++++++++------ synapse/storage/roommember.py | 21 +++++++++++++++------ synapse/util/metrics.py | 6 ++++-- 5 files changed, 44 insertions(+), 19 deletions(-) create mode 100644 changelog.d/6160.misc (limited to 'changelog.d') diff --git a/changelog.d/6160.misc b/changelog.d/6160.misc new file mode 100644 index 0000000000..3d7cce00e1 --- /dev/null +++ b/changelog.d/6160.misc @@ -0,0 +1 @@ +Add some metrics on the federation sender. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index d46f4aaeb1..2b2ee8612a 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -38,7 +38,7 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.metrics import measure_func +from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -183,8 +183,8 @@ class FederationSender(object): # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. - destinations = yield self.state.get_current_hosts_in_room( - event.room_id, latest_event_ids=event.prev_event_ids() + destinations = yield self.state.get_hosts_in_room_at_events( + event.room_id, event_ids=event.prev_event_ids() ) except Exception: logger.exception( @@ -207,8 +207,9 @@ class FederationSender(object): @defer.inlineCallbacks def handle_room_events(events): - for event in events: - yield handle_event(event) + with Measure(self.clock, "handle_room_events"): + for event in events: + yield handle_event(event) events_by_room = {} for event in events: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 2b0f4c79ee..dc9f5a9008 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -33,7 +33,7 @@ from synapse.state import v1, v2 from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.metrics import Measure +from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -191,11 +191,22 @@ class StateHandler(object): return joined_users @defer.inlineCallbacks - def get_current_hosts_in_room(self, room_id, latest_event_ids=None): - if not latest_event_ids: - latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) - logger.debug("calling resolve_state_groups from get_current_hosts_in_room") - entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) + def get_current_hosts_in_room(self, room_id): + event_ids = yield self.store.get_latest_event_ids_in_room(room_id) + return (yield self.get_hosts_in_room_at_events(room_id, event_ids)) + + @defer.inlineCallbacks + def get_hosts_in_room_at_events(self, room_id, event_ids): + """Get the hosts that were in a room at the given event ids + + Args: + room_id (str): + event_ids (list[str]): + + Returns: + Deferred[list[str]]: the hosts in the room at the given events + """ + entry = yield self.resolve_state_groups_for_events(room_id, event_ids) joined_hosts = yield self.store.get_joined_hosts(room_id, entry) return joined_hosts @@ -344,6 +355,7 @@ class StateHandler(object): return context + @measure_func() @defer.inlineCallbacks def resolve_state_groups_for_events(self, room_id, event_ids): """ Given a list of event_ids this method fetches the state at each diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 4df8ebdacd..1550d827ba 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -33,6 +33,7 @@ from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from synapse.util.metrics import Measure from synapse.util.stringutils import to_ascii logger = logging.getLogger(__name__) @@ -483,6 +484,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) return result + @defer.inlineCallbacks def get_joined_users_from_state(self, room_id, state_entry): state_group = state_entry.state_group if not state_group: @@ -492,9 +494,12 @@ class RoomMemberWorkerStore(EventsWorkerStore): # To do this we set the state_group to a new object as object() != object() state_group = object() - return self._get_joined_users_from_context( - room_id, state_group, state_entry.state, context=state_entry - ) + with Measure(self._clock, "get_joined_users_from_state"): + return ( + yield self._get_joined_users_from_context( + room_id, state_group, state_entry.state, context=state_entry + ) + ) @cachedInlineCallbacks( num_args=2, cache_context=True, iterable=True, max_entries=100000 @@ -669,6 +674,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): return True + @defer.inlineCallbacks def get_joined_hosts(self, room_id, state_entry): state_group = state_entry.state_group if not state_group: @@ -678,9 +684,12 @@ class RoomMemberWorkerStore(EventsWorkerStore): # To do this we set the state_group to a new object as object() != object() state_group = object() - return self._get_joined_hosts( - room_id, state_group, state_entry.state, state_entry=state_entry - ) + with Measure(self._clock, "get_joined_hosts"): + return ( + yield self._get_joined_hosts( + room_id, state_group, state_entry.state, state_entry=state_entry + ) + ) @cachedInlineCallbacks(num_args=2, max_entries=10000, iterable=True) # @defer.inlineCallbacks diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 0910930c21..4b1bcdf23c 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -60,12 +60,14 @@ in_flight = InFlightGauge( ) -def measure_func(name): +def measure_func(name=None): def wrapper(func): + block_name = func.__name__ if name is None else name + @wraps(func) @defer.inlineCallbacks def measured_func(self, *args, **kwargs): - with Measure(self.clock, name): + with Measure(self.clock, block_name): r = yield func(self, *args, **kwargs) return r -- cgit 1.4.1 From 39b40d6d9989b09de39da5b6d3f85ee535e41138 Mon Sep 17 00:00:00 2001 From: Robert Swain Date: Fri, 4 Oct 2019 10:34:52 +0200 Subject: media/thumbnailer: Better quality for 1-bit / 8-bit color palette images (#2142) Pillow will use nearest neighbour as the resampling algorithm if the source image is either 1-bit or a color palette using 8 bits. If we convert to RGB before scaling, we'll probably get a better result. --- changelog.d/2142.feature | 1 + synapse/rest/media/v1/thumbnailer.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/2142.feature (limited to 'changelog.d') diff --git a/changelog.d/2142.feature b/changelog.d/2142.feature new file mode 100644 index 0000000000..e21e8325e1 --- /dev/null +++ b/changelog.d/2142.feature @@ -0,0 +1 @@ +Improve quality of thumbnails for 1-bit/8-bit color palette images. diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index c995d7e043..8cf415e29d 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -82,13 +82,21 @@ class Thumbnailer(object): else: return (max_height * self.width) // self.height, max_height + def _resize(self, width, height): + # 1-bit or 8-bit color palette images need converting to RGB + # otherwise they will be scaled using nearest neighbour which + # looks awful + if self.image.mode in ["1", "P"]: + self.image = self.image.convert("RGB") + return self.image.resize((width, height), Image.ANTIALIAS) + def scale(self, width, height, output_type): """Rescales the image to the given dimensions. Returns: BytesIO: the bytes of the encoded image ready to be written to disk """ - scaled = self.image.resize((width, height), Image.ANTIALIAS) + scaled = self._resize(width, height) return self._encode_image(scaled, output_type) def crop(self, width, height, output_type): @@ -107,13 +115,13 @@ class Thumbnailer(object): """ if width * self.height > height * self.width: scaled_height = (width * self.height) // self.width - scaled_image = self.image.resize((width, scaled_height), Image.ANTIALIAS) + scaled_image = self._resize(width, scaled_height) crop_top = (scaled_height - height) // 2 crop_bottom = height + crop_top cropped = scaled_image.crop((0, crop_top, width, crop_bottom)) else: scaled_width = (height * self.width) // self.height - scaled_image = self.image.resize((scaled_width, height), Image.ANTIALIAS) + scaled_image = self._resize(scaled_width, height) crop_left = (scaled_width - width) // 2 crop_right = width + crop_left cropped = scaled_image.crop((crop_left, 0, crop_right, height)) -- cgit 1.4.1 From 13c4345c844e75b0d1a4ce66e4fb2eb9820cb7f6 Mon Sep 17 00:00:00 2001 From: Alexander Maznev Date: Fri, 4 Oct 2019 04:34:16 -0500 Subject: Update `user_filters` table to have a unique index, and non-null columns (#1172) --- changelog.d/1172.misc | 1 + .../schema/delta/56/unique_user_filter_index.py | 46 ++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 changelog.d/1172.misc create mode 100644 synapse/storage/schema/delta/56/unique_user_filter_index.py (limited to 'changelog.d') diff --git a/changelog.d/1172.misc b/changelog.d/1172.misc new file mode 100644 index 0000000000..30b3e56082 --- /dev/null +++ b/changelog.d/1172.misc @@ -0,0 +1 @@ +Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. \ No newline at end of file diff --git a/synapse/storage/schema/delta/56/unique_user_filter_index.py b/synapse/storage/schema/delta/56/unique_user_filter_index.py new file mode 100644 index 0000000000..4efc1a586f --- /dev/null +++ b/synapse/storage/schema/delta/56/unique_user_filter_index.py @@ -0,0 +1,46 @@ +import logging + +from synapse.storage.engines import PostgresEngine + +logger = logging.getLogger(__name__) + + +def run_upgrade(cur, database_engine, *args, **kwargs): + if isinstance(database_engine, PostgresEngine): + select_clause = """ + CREATE TEMPORARY TABLE user_filters_migration AS + SELECT DISTINCT ON (user_id, filter_id) user_id, filter_id, filter_json + FROM user_filters; + """ + else: + select_clause = """ + CREATE TEMPORARY TABLE user_filters_migration AS + SELECT * FROM user_filters GROUP BY user_id, filter_id; + """ + sql = ( + """ + BEGIN; + %s + DROP INDEX user_filters_by_user_id_filter_id; + DELETE FROM user_filters; + ALTER TABLE user_filters + ALTER COLUMN user_id SET NOT NULL + ALTER COLUMN filter_id SET NOT NULL + ALTER COLUMN filter_json SET NOT NULL; + INSERT INTO user_filters(user_id, filter_id, filter_json) + SELECT * FROM user_filters_migration; + DROP TABLE user_filters_migration; + CREATE UNIQUE INDEX user_filters_by_user_id_filter_id_unique + ON user_filters(user_id, filter_id); + END; + """ + % select_clause + ) + if isinstance(database_engine, PostgresEngine): + cur.execute(sql) + else: + cur.executescript(sql) + + +def run_create(cur, database_engine, *args, **kwargs): + pass -- cgit 1.4.1 From aa7a003074e4e42c4ac8a571d2cd18ecfea3990f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 7 Oct 2019 13:16:54 +0100 Subject: Changelog --- changelog.d/6175.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6175.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6175.bugfix b/changelog.d/6175.bugfix new file mode 100644 index 0000000000..3cd9a99edf --- /dev/null +++ b/changelog.d/6175.bugfix @@ -0,0 +1 @@ +Fix syntax error in unique_user_filter_index schema update. -- cgit 1.4.1 From 276ae5c63eaef656d486e190298f7a5ec99a7a5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 7 Oct 2019 14:41:39 +0100 Subject: add some logging to the rooms stats updates, to try to track down a flaky test (#6167) --- changelog.d/6167.misc | 1 + synapse/handlers/stats.py | 1 + synapse/storage/stats.py | 3 +++ 3 files changed, 5 insertions(+) create mode 100644 changelog.d/6167.misc (limited to 'changelog.d') diff --git a/changelog.d/6167.misc b/changelog.d/6167.misc new file mode 100644 index 0000000000..32c96b3681 --- /dev/null +++ b/changelog.d/6167.misc @@ -0,0 +1 @@ +Add some logging to the rooms stats updates, to try to track down a flaky test. diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index cbac7c347a..c62b113115 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -293,6 +293,7 @@ class StatsHandler(StateDeltasHandler): room_state["guest_access"] = event_content.get("guest_access") for room_id, state in room_to_state_updates.items(): + logger.info("Updating room_stats_state for %s: %s", room_id, state) yield self.store.update_room_state(room_id, state) return room_to_stats_deltas, user_to_stats_deltas diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 09190d684e..7c224cd3d9 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -332,6 +332,9 @@ class StatsStore(StateDeltasStore): def _bulk_update_stats_delta_txn(txn): for stats_type, stats_updates in updates.items(): for stats_id, fields in stats_updates.items(): + logger.info( + "Updating %s stats for %s: %s", stats_type, stats_id, fields + ) self._update_stats_delta_txn( txn, ts=ts, -- cgit 1.4.1 From 1992f21a9fa00a37963bb6ac11d0e678cc08557e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 7 Oct 2019 14:54:36 +0100 Subject: Fix changelog for PR #6175 --- changelog.d/6175.bugfix | 1 - changelog.d/6175.misc | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/6175.bugfix create mode 100644 changelog.d/6175.misc (limited to 'changelog.d') diff --git a/changelog.d/6175.bugfix b/changelog.d/6175.bugfix deleted file mode 100644 index 3cd9a99edf..0000000000 --- a/changelog.d/6175.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix syntax error in unique_user_filter_index schema update. diff --git a/changelog.d/6175.misc b/changelog.d/6175.misc new file mode 100644 index 0000000000..5bb24f02fc --- /dev/null +++ b/changelog.d/6175.misc @@ -0,0 +1 @@ +Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this -- cgit 1.4.1 From dc795ba709f2ffe41671d25d94f21d4b31a5301d Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Mon, 7 Oct 2019 15:41:25 +0100 Subject: Log responder we are using. (#6139) This prevents us logging "Responding to media request with responder %s". --- changelog.d/6139.misc | 1 + synapse/rest/media/v1/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6139.misc (limited to 'changelog.d') diff --git a/changelog.d/6139.misc b/changelog.d/6139.misc new file mode 100644 index 0000000000..d4b65e7af8 --- /dev/null +++ b/changelog.d/6139.misc @@ -0,0 +1 @@ +Log responder when responding to media request. diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 5fefee4dde..65bbf00073 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -195,7 +195,7 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam respond_404(request) return - logger.debug("Responding to media request with responder %s") + logger.debug("Responding to media request with responder %s", responder) add_file_headers(request, media_type, file_size, upload_name) try: with responder: -- cgit 1.4.1 From cc2e19ad4b4fb55306f060354f74d1750e4b6001 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 7 Oct 2019 17:37:55 +0100 Subject: fix changelog --- changelog.d/6175.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/6175.misc b/changelog.d/6175.misc index 5bb24f02fc..30b3e56082 100644 --- a/changelog.d/6175.misc +++ b/changelog.d/6175.misc @@ -1 +1 @@ -Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this +Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. \ No newline at end of file -- cgit 1.4.1 From 21b5d8b1076354c7c6ee8849491f3fc886cc8189 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 7 Oct 2019 18:00:31 +0100 Subject: Changelog --- changelog.d/6178.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6178.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6178.bugfix b/changelog.d/6178.bugfix new file mode 100644 index 0000000000..cd288c2a44 --- /dev/null +++ b/changelog.d/6178.bugfix @@ -0,0 +1 @@ +Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database. -- cgit 1.4.1 From b94a401852a5b6d87455285ea050c4e0731dd6ab Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 8 Oct 2019 09:35:37 +0100 Subject: Fix /federation/v1/state for recent room versions (#6170) * Fix /federation/v1/state for recent room versions Turns out this endpoint was completely broken for v3 rooms. Hopefully this re-signing code is irrelevant nowadays anyway. --- changelog.d/6170.bugfix | 1 + synapse/federation/federation_server.py | 13 ------------- 2 files changed, 1 insertion(+), 13 deletions(-) create mode 100644 changelog.d/6170.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6170.bugfix b/changelog.d/6170.bugfix new file mode 100644 index 0000000000..52f7ea233c --- /dev/null +++ b/changelog.d/6170.bugfix @@ -0,0 +1 @@ +Fix /federation/v1/state endpoint for recent room versions. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index da06ab379d..21e52c9695 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -36,7 +36,6 @@ from synapse.api.errors import ( UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.crypto.event_signing import compute_event_signature from synapse.events import room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions @@ -322,18 +321,6 @@ class FederationServer(FederationBase): pdus = yield self.handler.get_state_for_pdu(room_id, event_id) auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus]) - for event in auth_chain: - # We sign these again because there was a bug where we - # incorrectly signed things the first time round - if self.hs.is_mine_id(event.event_id): - event.signatures.update( - compute_event_signature( - event.get_pdu_json(), - self.hs.hostname, - self.hs.config.signing_key[0], - ) - ) - return { "pdus": [pdu.get_pdu_json() for pdu in pdus], "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], -- cgit 1.4.1 From ea7d938bca2d5fa0d6a54412ecdf036c5a3fc3a7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 8 Oct 2019 13:51:25 +0100 Subject: Remove unused public room list timeout param (#6179) * Remove unused public room list timeout param * Add changelog --- changelog.d/6179.misc | 1 + synapse/handlers/room_list.py | 13 +------------ 2 files changed, 2 insertions(+), 12 deletions(-) create mode 100644 changelog.d/6179.misc (limited to 'changelog.d') diff --git a/changelog.d/6179.misc b/changelog.d/6179.misc new file mode 100644 index 0000000000..01c4e71ea3 --- /dev/null +++ b/changelog.d/6179.misc @@ -0,0 +1 @@ +Remove unused `timeout` parameter from `_get_public_room_list`. \ No newline at end of file diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index cfed344d4d..c615206df1 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -88,16 +88,8 @@ class RoomListHandler(BaseHandler): # appservice specific lists. logger.info("Bypassing cache as search request.") - # XXX: Quick hack to stop room directory queries taking too long. - # Timeout request after 60s. Probably want a more fundamental - # solution at some point - timeout = self.clock.time() + 60 return self._get_public_room_list( - limit, - since_token, - search_filter, - network_tuple=network_tuple, - timeout=timeout, + limit, since_token, search_filter, network_tuple=network_tuple ) key = (limit, since_token, network_tuple) @@ -118,7 +110,6 @@ class RoomListHandler(BaseHandler): search_filter=None, network_tuple=EMPTY_THIRD_PARTY_ID, from_federation=False, - timeout=None, ): """Generate a public room list. Args: @@ -131,8 +122,6 @@ class RoomListHandler(BaseHandler): Setting to None returns all public rooms across all lists. from_federation (bool): Whether this request originated from a federating server or a client. Used for room filtering. - timeout (int|None): Amount of seconds to wait for a response before - timing out. TODO """ # Pagination tokens work by storing the room ID sent in the last batch, -- cgit 1.4.1 From 474abf1eb6852ca488fbf86d3da0622a457efef1 Mon Sep 17 00:00:00 2001 From: Anshul Angaria Date: Tue, 8 Oct 2019 18:25:16 +0530 Subject: add M_TOO_LARGE error code for uploading a too large file (#6151) Fixes #6109 --- changelog.d/6109.bugfix | 1 + synapse/rest/media/v1/upload_resource.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6109.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6109.bugfix b/changelog.d/6109.bugfix new file mode 100644 index 0000000000..da7ac1be4e --- /dev/null +++ b/changelog.d/6109.bugfix @@ -0,0 +1 @@ +Fix bug when uploading a large file: Synapse responds with `M_UNKNOWN` while it should be `M_TOO_LARGE` according to spec. Contributed by Anshul Angaria. diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 5d76bbdf68..83d005812d 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -17,7 +17,7 @@ import logging from twisted.web.server import NOT_DONE_YET -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError from synapse.http.server import ( DirectServeResource, respond_with_json, @@ -56,7 +56,11 @@ class UploadResource(DirectServeResource): if content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) if int(content_length) > self.max_upload_size: - raise SynapseError(msg="Upload request body is too large", code=413) + raise SynapseError( + msg="Upload request body is too large", + code=413, + errcode=Codes.TOO_LARGE, + ) upload_name = parse_string(request, b"filename", encoding=None) if upload_name: -- cgit 1.4.1 From 6b72508d15b2a074fb35e68cec45700f10ea09f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2019 15:03:28 +0100 Subject: Newsfile --- changelog.d/6185.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6185.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6185.bugfix b/changelog.d/6185.bugfix new file mode 100644 index 0000000000..199ec69032 --- /dev/null +++ b/changelog.d/6185.bugfix @@ -0,0 +1 @@ +Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events. -- cgit 1.4.1 From de26678724cd5c19dcc77c0d55fd89320cee38d4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Oct 2019 15:13:02 +0100 Subject: Update changelog.d/6185.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/6185.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/6185.bugfix b/changelog.d/6185.bugfix index 199ec69032..9d1c669b88 100644 --- a/changelog.d/6185.bugfix +++ b/changelog.d/6185.bugfix @@ -1 +1 @@ -Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events. +Fix bug where redacted events were sometimes incorrectly censored in the database, breaking APIs that attempted to fetch such events. -- cgit 1.4.1 From c3b34dc32f85ed0b526dde1ed3d61316a8f461d8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Oct 2019 16:32:04 +0100 Subject: Newsfile --- changelog.d/6186.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6186.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6186.bugfix b/changelog.d/6186.bugfix new file mode 100644 index 0000000000..199ec69032 --- /dev/null +++ b/changelog.d/6186.bugfix @@ -0,0 +1 @@ +Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events. -- cgit 1.4.1 From b46cc856ec9f8ac8c96199a5291dfa71cd37ee86 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Wed, 9 Oct 2019 18:03:40 -0400 Subject: add changelog --- changelog.d/6189.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6189.misc (limited to 'changelog.d') diff --git a/changelog.d/6189.misc b/changelog.d/6189.misc new file mode 100644 index 0000000000..a66eb384e6 --- /dev/null +++ b/changelog.d/6189.misc @@ -0,0 +1 @@ +Make `version` optional in body of `PUT /room_keys/version/{version}`, since it's redundant. -- cgit 1.4.1 From f743108a94658eb1dbaf168d39874272f756a386 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 10 Oct 2019 09:39:35 +0100 Subject: Refactor HomeserverConfig so it can be typechecked (#6137) --- changelog.d/6137.misc | 1 + mypy.ini | 16 ++- synapse/config/_base.py | 191 +++++++++++++++++++++++------- synapse/config/_base.pyi | 135 +++++++++++++++++++++ synapse/config/api.py | 2 + synapse/config/appservice.py | 2 + synapse/config/captcha.py | 2 + synapse/config/cas.py | 2 + synapse/config/consent_config.py | 3 + synapse/config/database.py | 2 + synapse/config/emailconfig.py | 2 + synapse/config/groups.py | 2 + synapse/config/homeserver.py | 68 +++++------ synapse/config/jwt_config.py | 2 + synapse/config/key.py | 2 + synapse/config/logger.py | 2 + synapse/config/metrics.py | 2 + synapse/config/password.py | 2 + synapse/config/password_auth_providers.py | 2 + synapse/config/push.py | 2 + synapse/config/ratelimiting.py | 2 + synapse/config/registration.py | 4 + synapse/config/repository.py | 2 + synapse/config/room_directory.py | 2 + synapse/config/saml2_config.py | 2 + synapse/config/server.py | 2 + synapse/config/server_notices_config.py | 2 + synapse/config/spam_checker.py | 2 + synapse/config/stats.py | 2 + synapse/config/third_party_event_rules.py | 2 + synapse/config/tls.py | 9 +- synapse/config/tracer.py | 2 + synapse/config/user_directory.py | 2 + synapse/config/voip.py | 2 + synapse/config/workers.py | 2 + tests/config/test_tls.py | 25 ++-- tox.ini | 3 +- 37 files changed, 415 insertions(+), 94 deletions(-) create mode 100644 changelog.d/6137.misc create mode 100644 synapse/config/_base.pyi (limited to 'changelog.d') diff --git a/changelog.d/6137.misc b/changelog.d/6137.misc new file mode 100644 index 0000000000..92a02e71c3 --- /dev/null +++ b/changelog.d/6137.misc @@ -0,0 +1 @@ +Refactor configuration loading to allow better typechecking. diff --git a/mypy.ini b/mypy.ini index 8788574ee3..ffadaddc0b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -4,10 +4,6 @@ plugins=mypy_zope:plugin follow_imports=skip mypy_path=stubs -[mypy-synapse.config.homeserver] -# this is a mess because of the metaclass shenanigans -ignore_errors = True - [mypy-zope] ignore_missing_imports = True @@ -52,3 +48,15 @@ ignore_missing_imports = True [mypy-signedjson.*] ignore_missing_imports = True + +[mypy-prometheus_client.*] +ignore_missing_imports = True + +[mypy-service_identity.*] +ignore_missing_imports = True + +[mypy-daemonize] +ignore_missing_imports = True + +[mypy-sentry_sdk] +ignore_missing_imports = True diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 31f6530978..08619404bb 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -18,7 +18,9 @@ import argparse import errno import os +from collections import OrderedDict from textwrap import dedent +from typing import Any, MutableMapping, Optional from six import integer_types @@ -51,7 +53,56 @@ Missing mandatory `server_name` config option. """ +def path_exists(file_path): + """Check if a file exists + + Unlike os.path.exists, this throws an exception if there is an error + checking if the file exists (for example, if there is a perms error on + the parent dir). + + Returns: + bool: True if the file exists; False if not. + """ + try: + os.stat(file_path) + return True + except OSError as e: + if e.errno != errno.ENOENT: + raise e + return False + + class Config(object): + """ + A configuration section, containing configuration keys and values. + + Attributes: + section (str): The section title of this config object, such as + "tls" or "logger". This is used to refer to it on the root + logger (for example, `config.tls.some_option`). Must be + defined in subclasses. + """ + + section = None + + def __init__(self, root_config=None): + self.root = root_config + + def __getattr__(self, item: str) -> Any: + """ + Try and fetch a configuration option that does not exist on this class. + + This is so that existing configs that rely on `self.value`, where value + is actually from a different config section, continue to work. + """ + if item in ["generate_config_section", "read_config"]: + raise AttributeError(item) + + if self.root is None: + raise AttributeError(item) + else: + return self.root._get_unclassed_config(self.section, item) + @staticmethod def parse_size(value): if isinstance(value, integer_types): @@ -88,22 +139,7 @@ class Config(object): @classmethod def path_exists(cls, file_path): - """Check if a file exists - - Unlike os.path.exists, this throws an exception if there is an error - checking if the file exists (for example, if there is a perms error on - the parent dir). - - Returns: - bool: True if the file exists; False if not. - """ - try: - os.stat(file_path) - return True - except OSError as e: - if e.errno != errno.ENOENT: - raise e - return False + return path_exists(file_path) @classmethod def check_file(cls, file_path, config_name): @@ -136,42 +172,106 @@ class Config(object): with open(file_path) as file_stream: return file_stream.read() - def invoke_all(self, name, *args, **kargs): - """Invoke all instance methods with the given name and arguments in the - class's MRO. + +class RootConfig(object): + """ + Holder of an application's configuration. + + What configuration this object holds is defined by `config_classes`, a list + of Config classes that will be instantiated and given the contents of a + configuration file to read. They can then be accessed on this class by their + section name, defined in the Config or dynamically set to be the name of the + class, lower-cased and with "Config" removed. + """ + + config_classes = [] + + def __init__(self): + self._configs = OrderedDict() + + for config_class in self.config_classes: + if config_class.section is None: + raise ValueError("%r requires a section name" % (config_class,)) + + try: + conf = config_class(self) + except Exception as e: + raise Exception("Failed making %s: %r" % (config_class.section, e)) + self._configs[config_class.section] = conf + + def __getattr__(self, item: str) -> Any: + """ + Redirect lookups on this object either to config objects, or values on + config objects, so that `config.tls.blah` works, as well as legacy uses + of things like `config.server_name`. It will first look up the config + section name, and then values on those config classes. + """ + if item in self._configs.keys(): + return self._configs[item] + + return self._get_unclassed_config(None, item) + + def _get_unclassed_config(self, asking_section: Optional[str], item: str): + """ + Fetch a config value from one of the instantiated config classes that + has not been fetched directly. + + Args: + asking_section: If this check is coming from a Config child, which + one? This section will not be asked if it has the value. + item: The configuration value key. + + Raises: + AttributeError if no config classes have the config key. The body + will contain what sections were checked. + """ + for key, val in self._configs.items(): + if key == asking_section: + continue + + if item in dir(val): + return getattr(val, item) + + raise AttributeError(item, "not found in %s" % (list(self._configs.keys()),)) + + def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any]: + """ + Invoke a function on all instantiated config objects this RootConfig is + configured to use. Args: - name (str): Name of function to invoke + func_name: Name of function to invoke *args **kwargs - Returns: - list: The list of the return values from each method called + ordered dictionary of config section name and the result of the + function from it. """ - results = [] - for cls in type(self).mro(): - if name in cls.__dict__: - results.append(getattr(cls, name)(self, *args, **kargs)) - return results + res = OrderedDict() + + for name, config in self._configs.items(): + if hasattr(config, func_name): + res[name] = getattr(config, func_name)(*args, **kwargs) + + return res @classmethod - def invoke_all_static(cls, name, *args, **kargs): - """Invoke all static methods with the given name and arguments in the - class's MRO. + def invoke_all_static(cls, func_name: str, *args, **kwargs): + """ + Invoke a static function on config objects this RootConfig is + configured to use. Args: - name (str): Name of function to invoke + func_name: Name of function to invoke *args **kwargs - Returns: - list: The list of the return values from each method called + ordered dictionary of config section name and the result of the + function from it. """ - results = [] - for c in cls.mro(): - if name in c.__dict__: - results.append(getattr(c, name)(*args, **kargs)) - return results + for config in cls.config_classes: + if hasattr(config, func_name): + getattr(config, func_name)(*args, **kwargs) def generate_config( self, @@ -187,7 +287,8 @@ class Config(object): tls_private_key_path=None, acme_domain=None, ): - """Build a default configuration file + """ + Build a default configuration file This is used when the user explicitly asks us to generate a config file (eg with --generate_config). @@ -242,6 +343,7 @@ class Config(object): Returns: str: the yaml config file """ + return "\n\n".join( dedent(conf) for conf in self.invoke_all( @@ -257,7 +359,7 @@ class Config(object): tls_certificate_path=tls_certificate_path, tls_private_key_path=tls_private_key_path, acme_domain=acme_domain, - ) + ).values() ) @classmethod @@ -444,7 +546,7 @@ class Config(object): ) (config_path,) = config_files - if not cls.path_exists(config_path): + if not path_exists(config_path): print("Generating config file %s" % (config_path,)) if config_args.data_directory: @@ -469,7 +571,7 @@ class Config(object): open_private_ports=config_args.open_private_ports, ) - if not cls.path_exists(config_dir_path): + if not path_exists(config_dir_path): os.makedirs(config_dir_path) with open(config_path, "w") as config_file: config_file.write("# vim:ft=yaml\n\n") @@ -518,7 +620,7 @@ class Config(object): return obj - def parse_config_dict(self, config_dict, config_dir_path, data_dir_path): + def parse_config_dict(self, config_dict, config_dir_path=None, data_dir_path=None): """Read the information from the config dict into this Config object. Args: @@ -607,3 +709,6 @@ def find_config_files(search_paths): else: config_files.append(config_path) return config_files + + +__all__ = ["Config", "RootConfig"] diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi new file mode 100644 index 0000000000..86bc965ee4 --- /dev/null +++ b/synapse/config/_base.pyi @@ -0,0 +1,135 @@ +from typing import Any, List, Optional + +from synapse.config import ( + api, + appservice, + captcha, + cas, + consent_config, + database, + emailconfig, + groups, + jwt_config, + key, + logger, + metrics, + password, + password_auth_providers, + push, + ratelimiting, + registration, + repository, + room_directory, + saml2_config, + server, + server_notices_config, + spam_checker, + stats, + third_party_event_rules, + tls, + tracer, + user_directory, + voip, + workers, +) + +class ConfigError(Exception): ... + +MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS: str +MISSING_REPORT_STATS_SPIEL: str +MISSING_SERVER_NAME: str + +def path_exists(file_path: str): ... + +class RootConfig: + server: server.ServerConfig + tls: tls.TlsConfig + database: database.DatabaseConfig + logging: logger.LoggingConfig + ratelimit: ratelimiting.RatelimitConfig + media: repository.ContentRepositoryConfig + captcha: captcha.CaptchaConfig + voip: voip.VoipConfig + registration: registration.RegistrationConfig + metrics: metrics.MetricsConfig + api: api.ApiConfig + appservice: appservice.AppServiceConfig + key: key.KeyConfig + saml2: saml2_config.SAML2Config + cas: cas.CasConfig + jwt: jwt_config.JWTConfig + password: password.PasswordConfig + email: emailconfig.EmailConfig + worker: workers.WorkerConfig + authproviders: password_auth_providers.PasswordAuthProviderConfig + push: push.PushConfig + spamchecker: spam_checker.SpamCheckerConfig + groups: groups.GroupsConfig + userdirectory: user_directory.UserDirectoryConfig + consent: consent_config.ConsentConfig + stats: stats.StatsConfig + servernotices: server_notices_config.ServerNoticesConfig + roomdirectory: room_directory.RoomDirectoryConfig + thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig + tracer: tracer.TracerConfig + + config_classes: List = ... + def __init__(self) -> None: ... + def invoke_all(self, func_name: str, *args: Any, **kwargs: Any): ... + @classmethod + def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None: ... + def __getattr__(self, item: str): ... + def parse_config_dict( + self, + config_dict: Any, + config_dir_path: Optional[Any] = ..., + data_dir_path: Optional[Any] = ..., + ) -> None: ... + read_config: Any = ... + def generate_config( + self, + config_dir_path: str, + data_dir_path: str, + server_name: str, + generate_secrets: bool = ..., + report_stats: Optional[str] = ..., + open_private_ports: bool = ..., + listeners: Optional[Any] = ..., + database_conf: Optional[Any] = ..., + tls_certificate_path: Optional[str] = ..., + tls_private_key_path: Optional[str] = ..., + acme_domain: Optional[str] = ..., + ): ... + @classmethod + def load_or_generate_config(cls, description: Any, argv: Any): ... + @classmethod + def load_config(cls, description: Any, argv: Any): ... + @classmethod + def add_arguments_to_parser(cls, config_parser: Any) -> None: ... + @classmethod + def load_config_with_parser(cls, parser: Any, argv: Any): ... + def generate_missing_files( + self, config_dict: dict, config_dir_path: str + ) -> None: ... + +class Config: + root: RootConfig + def __init__(self, root_config: Optional[RootConfig] = ...) -> None: ... + def __getattr__(self, item: str, from_root: bool = ...): ... + @staticmethod + def parse_size(value: Any): ... + @staticmethod + def parse_duration(value: Any): ... + @staticmethod + def abspath(file_path: Optional[str]): ... + @classmethod + def path_exists(cls, file_path: str): ... + @classmethod + def check_file(cls, file_path: str, config_name: str): ... + @classmethod + def ensure_directory(cls, dir_path: str): ... + @classmethod + def read_file(cls, file_path: str, config_name: str): ... + +def read_config_files(config_files: List[str]): ... +def find_config_files(search_paths: List[str]): ... diff --git a/synapse/config/api.py b/synapse/config/api.py index dddea79a8a..74cd53a8ed 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -18,6 +18,8 @@ from ._base import Config class ApiConfig(Config): + section = "api" + def read_config(self, config, **kwargs): self.room_invite_state_types = config.get( "room_invite_state_types", diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 28d36b1bc3..9b4682222d 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -30,6 +30,8 @@ logger = logging.getLogger(__name__) class AppServiceConfig(Config): + section = "appservice" + def read_config(self, config, **kwargs): self.app_service_config_files = config.get("app_service_config_files", []) self.notify_appservices = config.get("notify_appservices", True) diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 8dac8152cf..44bd5c6799 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -16,6 +16,8 @@ from ._base import Config class CaptchaConfig(Config): + section = "captcha" + def read_config(self, config, **kwargs): self.recaptcha_private_key = config.get("recaptcha_private_key") self.recaptcha_public_key = config.get("recaptcha_public_key") diff --git a/synapse/config/cas.py b/synapse/config/cas.py index ebe34d933b..b916c3aa66 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -22,6 +22,8 @@ class CasConfig(Config): cas_server_url: URL of CAS server """ + section = "cas" + def read_config(self, config, **kwargs): cas_config = config.get("cas_config", None) if cas_config: diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index 48976e17b1..62c4c44d60 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -73,6 +73,9 @@ DEFAULT_CONFIG = """\ class ConsentConfig(Config): + + section = "consent" + def __init__(self, *args): super(ConsentConfig, self).__init__(*args) diff --git a/synapse/config/database.py b/synapse/config/database.py index 118aafbd4a..0e2509f0b1 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -21,6 +21,8 @@ from ._base import Config class DatabaseConfig(Config): + section = "database" + def read_config(self, config, **kwargs): self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K")) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index d9b43de660..658897a77e 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -28,6 +28,8 @@ from ._base import Config, ConfigError class EmailConfig(Config): + section = "email" + def read_config(self, config, **kwargs): # TODO: We should separate better the email configuration from the notification # and account validity config. diff --git a/synapse/config/groups.py b/synapse/config/groups.py index 2a522b5f44..d6862d9a64 100644 --- a/synapse/config/groups.py +++ b/synapse/config/groups.py @@ -17,6 +17,8 @@ from ._base import Config class GroupsConfig(Config): + section = "groups" + def read_config(self, config, **kwargs): self.enable_group_creation = config.get("enable_group_creation", False) self.group_creation_prefix = config.get("group_creation_prefix", "") diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 72acad4f18..6e348671c7 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ._base import RootConfig from .api import ApiConfig from .appservice import AppServiceConfig from .captcha import CaptchaConfig @@ -46,36 +47,37 @@ from .voip import VoipConfig from .workers import WorkerConfig -class HomeServerConfig( - ServerConfig, - TlsConfig, - DatabaseConfig, - LoggingConfig, - RatelimitConfig, - ContentRepositoryConfig, - CaptchaConfig, - VoipConfig, - RegistrationConfig, - MetricsConfig, - ApiConfig, - AppServiceConfig, - KeyConfig, - SAML2Config, - CasConfig, - JWTConfig, - PasswordConfig, - EmailConfig, - WorkerConfig, - PasswordAuthProviderConfig, - PushConfig, - SpamCheckerConfig, - GroupsConfig, - UserDirectoryConfig, - ConsentConfig, - StatsConfig, - ServerNoticesConfig, - RoomDirectoryConfig, - ThirdPartyRulesConfig, - TracerConfig, -): - pass +class HomeServerConfig(RootConfig): + + config_classes = [ + ServerConfig, + TlsConfig, + DatabaseConfig, + LoggingConfig, + RatelimitConfig, + ContentRepositoryConfig, + CaptchaConfig, + VoipConfig, + RegistrationConfig, + MetricsConfig, + ApiConfig, + AppServiceConfig, + KeyConfig, + SAML2Config, + CasConfig, + JWTConfig, + PasswordConfig, + EmailConfig, + WorkerConfig, + PasswordAuthProviderConfig, + PushConfig, + SpamCheckerConfig, + GroupsConfig, + UserDirectoryConfig, + ConsentConfig, + StatsConfig, + ServerNoticesConfig, + RoomDirectoryConfig, + ThirdPartyRulesConfig, + TracerConfig, + ] diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py index 36d87cef03..a568726985 100644 --- a/synapse/config/jwt_config.py +++ b/synapse/config/jwt_config.py @@ -23,6 +23,8 @@ MISSING_JWT = """Missing jwt library. This is required for jwt login. class JWTConfig(Config): + section = "jwt" + def read_config(self, config, **kwargs): jwt_config = config.get("jwt_config", None) if jwt_config: diff --git a/synapse/config/key.py b/synapse/config/key.py index f039f96e9c..ec5d430afb 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -92,6 +92,8 @@ class TrustedKeyServer(object): class KeyConfig(Config): + section = "key" + def read_config(self, config, config_dir_path, **kwargs): # the signing key can be specified inline or in a separate file if "signing_key" in config: diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 767ecfdf09..d609ec111b 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -84,6 +84,8 @@ root: class LoggingConfig(Config): + section = "logging" + def read_config(self, config, **kwargs): self.log_config = self.abspath(config.get("log_config")) self.no_redirect_stdio = config.get("no_redirect_stdio", False) diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index ec35a6b868..282a43bddb 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -34,6 +34,8 @@ class MetricsFlags(object): class MetricsConfig(Config): + section = "metrics" + def read_config(self, config, **kwargs): self.enable_metrics = config.get("enable_metrics", False) self.report_stats = config.get("report_stats", None) diff --git a/synapse/config/password.py b/synapse/config/password.py index d5b5953f2f..2a634ac751 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -20,6 +20,8 @@ class PasswordConfig(Config): """Password login configuration """ + section = "password" + def read_config(self, config, **kwargs): password_config = config.get("password_config", {}) if password_config is None: diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index c50e244394..9746bbc681 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -23,6 +23,8 @@ LDAP_PROVIDER = "ldap_auth_provider.LdapAuthProvider" class PasswordAuthProviderConfig(Config): + section = "authproviders" + def read_config(self, config, **kwargs): self.password_providers = [] # type: List[Any] providers = [] diff --git a/synapse/config/push.py b/synapse/config/push.py index 1b932722a5..0910958649 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -18,6 +18,8 @@ from ._base import Config class PushConfig(Config): + section = "push" + def read_config(self, config, **kwargs): push_config = config.get("push", {}) self.push_include_content = push_config.get("include_content", True) diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 587e2862b7..947f653e03 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -36,6 +36,8 @@ class FederationRateLimitConfig(object): class RatelimitConfig(Config): + section = "ratelimiting" + def read_config(self, config, **kwargs): # Load the new-style messages config if it exists. Otherwise fall back diff --git a/synapse/config/registration.py b/synapse/config/registration.py index bef89e2bf4..b3e3e6dda2 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -24,6 +24,8 @@ from synapse.util.stringutils import random_string_with_symbols class AccountValidityConfig(Config): + section = "accountvalidity" + def __init__(self, config, synapse_config): self.enabled = config.get("enabled", False) self.renew_by_email_enabled = "renew_at" in config @@ -77,6 +79,8 @@ class AccountValidityConfig(Config): class RegistrationConfig(Config): + section = "registration" + def read_config(self, config, **kwargs): self.enable_registration = bool( strtobool(str(config.get("enable_registration", False))) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 14740891f3..d0205e14b9 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -78,6 +78,8 @@ def parse_thumbnail_requirements(thumbnail_sizes): class ContentRepositoryConfig(Config): + section = "media" + def read_config(self, config, **kwargs): # Only enable the media repo if either the media repo is enabled or the diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index a92693017b..7c9f05bde4 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -19,6 +19,8 @@ from ._base import Config, ConfigError class RoomDirectoryConfig(Config): + section = "roomdirectory" + def read_config(self, config, **kwargs): self.enable_room_list_search = config.get("enable_room_list_search", True) diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index ab34b41ca8..c407e13680 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -55,6 +55,8 @@ def _dict_merge(merge_dict, into_dict): class SAML2Config(Config): + section = "saml2" + def read_config(self, config, **kwargs): self.saml2_enabled = False diff --git a/synapse/config/server.py b/synapse/config/server.py index 709bd387e5..afc4d6a4ab 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -58,6 +58,8 @@ on how to configure the new listener. class ServerConfig(Config): + section = "server" + def read_config(self, config, **kwargs): self.server_name = config["server_name"] self.server_context = config.get("server_context", None) diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index 6d4285ef93..6ea2ea8869 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -59,6 +59,8 @@ class ServerNoticesConfig(Config): None if server notices are not enabled. """ + section = "servernotices" + def __init__(self, *args): super(ServerNoticesConfig, self).__init__(*args) self.server_notices_mxid = None diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index e40797ab50..36e0ddab5c 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -19,6 +19,8 @@ from ._base import Config class SpamCheckerConfig(Config): + section = "spamchecker" + def read_config(self, config, **kwargs): self.spam_checker = None diff --git a/synapse/config/stats.py b/synapse/config/stats.py index b18ddbd1fa..62485189ea 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -25,6 +25,8 @@ class StatsConfig(Config): Configuration for the behaviour of synapse's stats engine """ + section = "stats" + def read_config(self, config, **kwargs): self.stats_enabled = True self.stats_bucket_size = 86400 * 1000 diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py index b3431441b9..10a99c792e 100644 --- a/synapse/config/third_party_event_rules.py +++ b/synapse/config/third_party_event_rules.py @@ -19,6 +19,8 @@ from ._base import Config class ThirdPartyRulesConfig(Config): + section = "thirdpartyrules" + def read_config(self, config, **kwargs): self.third_party_event_rules = None diff --git a/synapse/config/tls.py b/synapse/config/tls.py index fc47ba3e9a..f06341eb67 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -18,6 +18,7 @@ import os import warnings from datetime import datetime from hashlib import sha256 +from typing import List import six @@ -33,7 +34,9 @@ logger = logging.getLogger(__name__) class TlsConfig(Config): - def read_config(self, config, config_dir_path, **kwargs): + section = "tls" + + def read_config(self, config: dict, config_dir_path: str, **kwargs): acme_config = config.get("acme", None) if acme_config is None: @@ -57,7 +60,7 @@ class TlsConfig(Config): self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) - if self.has_tls_listener(): + if self.root.server.has_tls_listener(): if not self.tls_certificate_file: raise ConfigError( "tls_certificate_path must be specified if TLS-enabled listeners are " @@ -108,7 +111,7 @@ class TlsConfig(Config): ) # Support globs (*) in whitelist values - self.federation_certificate_verification_whitelist = [] + self.federation_certificate_verification_whitelist = [] # type: List[str] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 85d99a3166..8be1346113 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -19,6 +19,8 @@ from ._base import Config, ConfigError class TracerConfig(Config): + section = "tracing" + def read_config(self, config, **kwargs): opentracing_config = config.get("opentracing") if opentracing_config is None: diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index f6313e17d4..c8d19c5d6b 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -21,6 +21,8 @@ class UserDirectoryConfig(Config): Configuration for the behaviour of the /user_directory API """ + section = "userdirectory" + def read_config(self, config, **kwargs): self.user_directory_search_enabled = True self.user_directory_search_all_users = False diff --git a/synapse/config/voip.py b/synapse/config/voip.py index 2ca0e1cf70..a68a3068aa 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py @@ -16,6 +16,8 @@ from ._base import Config class VoipConfig(Config): + section = "voip" + def read_config(self, config, **kwargs): self.turn_uris = config.get("turn_uris", []) self.turn_shared_secret = config.get("turn_shared_secret") diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 1ec4998625..fef72ed974 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -21,6 +21,8 @@ class WorkerConfig(Config): They have their own pid_file and listener configuration. They use the replication_url to talk to the main synapse process.""" + section = "worker" + def read_config(self, config, **kwargs): self.worker_app = config.get("worker_app") diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index b02780772a..1be6ff563b 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -21,17 +21,24 @@ import yaml from OpenSSL import SSL +from synapse.config._base import Config, RootConfig from synapse.config.tls import ConfigError, TlsConfig from synapse.crypto.context_factory import ClientTLSOptionsFactory from tests.unittest import TestCase -class TestConfig(TlsConfig): +class FakeServer(Config): + section = "server" + def has_tls_listener(self): return False +class TestConfig(RootConfig): + config_classes = [FakeServer, TlsConfig] + + class TLSConfigTests(TestCase): def test_warn_self_signed(self): """ @@ -202,13 +209,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= conf = TestConfig() conf.read_config( yaml.safe_load( - TestConfig().generate_config_section( + TestConfig().generate_config( "/config_dir_path", "my_super_secure_server", "/data_dir_path", - "/tls_cert_path", - "tls_private_key", - None, # This is the acme_domain + tls_certificate_path="/tls_cert_path", + tls_private_key_path="tls_private_key", + acme_domain=None, # This is the acme_domain ) ), "/config_dir_path", @@ -223,13 +230,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= conf = TestConfig() conf.read_config( yaml.safe_load( - TestConfig().generate_config_section( + TestConfig().generate_config( "/config_dir_path", "my_super_secure_server", "/data_dir_path", - "/tls_cert_path", - "tls_private_key", - "my_supe_secure_server", # This is the acme_domain + tls_certificate_path="/tls_cert_path", + tls_private_key_path="tls_private_key", + acme_domain="my_supe_secure_server", # This is the acme_domain ) ), "/config_dir_path", diff --git a/tox.ini b/tox.ini index 1bce10a4ce..367cc2ccf2 100644 --- a/tox.ini +++ b/tox.ini @@ -163,10 +163,9 @@ deps = {[base]deps} mypy mypy-zope - typeshed env = MYPYPATH = stubs/ extras = all -commands = mypy --show-traceback \ +commands = mypy --show-traceback --check-untyped-defs --show-error-codes --follow-imports=normal \ synapse/logging/ \ synapse/config/ -- cgit 1.4.1 From da815c1f695ceca56643d7814c96f7a3cfa3c70a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 10 Oct 2019 10:06:45 +0100 Subject: Move tag/push rules room upgrade checking ealier (#6155) It turns out that _local_membership_update doesn't run when you join a new, remote room. It only runs if you're joining a room that your server already knows about. This would explain #4703 and #5295 and why the transfer would work in testing and some rooms, but not others. This would especially hit single-user homeservers. The check has been moved to right after the room has been joined, and works much more reliably. (Though it may still be a bit awkward of a place). --- changelog.d/6155.bugfix | 1 + synapse/handlers/room_member.py | 62 +++++++++++++++++++++++++++++------------ 2 files changed, 45 insertions(+), 18 deletions(-) create mode 100644 changelog.d/6155.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6155.bugfix b/changelog.d/6155.bugfix new file mode 100644 index 0000000000..e32c0dce09 --- /dev/null +++ b/changelog.d/6155.bugfix @@ -0,0 +1 @@ +Fix transferring notifications and tags when joining an upgraded room that is new to your server. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 95a244d86c..380e2fad5e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -203,23 +203,11 @@ class RoomMemberHandler(object): prev_member_event = yield self.store.get_event(prev_member_event_id) newly_joined = prev_member_event.membership != Membership.JOIN if newly_joined: - yield self._user_joined_room(target, room_id) - - # Copy over direct message status and room tags if this is a join - # on an upgraded room - - # Check if this is an upgraded room - predecessor = yield self.store.get_room_predecessor(room_id) - - if predecessor: - # It is an upgraded room. Copy over old tags - self.copy_room_tags_and_direct_to_room( - predecessor["room_id"], room_id, user_id - ) - # Copy over push rules - yield self.store.copy_push_rules_from_room_to_room_for_user( - predecessor["room_id"], room_id, user_id + # Copy over user state if we're joining an upgraded room + yield self.copy_user_state_if_room_upgrade( + room_id, requester.user.to_string() ) + yield self._user_joined_room(target, room_id) elif event.membership == Membership.LEAVE: if prev_member_event_id: prev_member_event = yield self.store.get_event(prev_member_event_id) @@ -463,10 +451,16 @@ class RoomMemberHandler(object): if requester.is_guest: content["kind"] = "guest" - ret = yield self._remote_join( + remote_join_response = yield self._remote_join( requester, remote_room_hosts, room_id, target, content ) - return ret + + # Copy over user state if this is a join on an remote upgraded room + yield self.copy_user_state_if_room_upgrade( + room_id, requester.user.to_string() + ) + + return remote_join_response elif effective_membership_state == Membership.LEAVE: if not is_host_in_room: @@ -503,6 +497,38 @@ class RoomMemberHandler(object): ) return res + @defer.inlineCallbacks + def copy_user_state_if_room_upgrade(self, new_room_id, user_id): + """Copy user-specific information when they join a new room if that new room is the + result of a room upgrade + + Args: + new_room_id (str): The ID of the room the user is joining + user_id (str): The ID of the user + + Returns: + Deferred + """ + # Check if the new room is an upgraded room + predecessor = yield self.store.get_room_predecessor(new_room_id) + if not predecessor: + return + + logger.debug( + "Found predecessor for %s: %s. Copying over room tags and push " "rules", + new_room_id, + predecessor, + ) + + # It is an upgraded room. Copy over old tags + yield self.copy_room_tags_and_direct_to_room( + predecessor["room_id"], new_room_id, user_id + ) + # Copy over push rules + yield self.store.copy_push_rules_from_room_to_room_for_user( + predecessor["room_id"], new_room_id, user_id + ) + @defer.inlineCallbacks def send_membership_event(self, requester, event, context, ratelimit=True): """ -- cgit 1.4.1 From f4571a0497791285ff044d293a6287ed90575b71 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 10 Oct 2019 11:17:00 +0100 Subject: Newsfile --- changelog.d/6127.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6127.misc (limited to 'changelog.d') diff --git a/changelog.d/6127.misc b/changelog.d/6127.misc new file mode 100644 index 0000000000..7bfbcfc252 --- /dev/null +++ b/changelog.d/6127.misc @@ -0,0 +1 @@ +Add env var to turn on tracking of log context changes. -- cgit 1.4.1 From 562b4e51dd0e7d4a6f776502b9ac357ed3428445 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 10 Oct 2019 11:28:23 +0100 Subject: Rewrite the user_filter migration again (#6184) you can't plausibly ALTER TABLE in sqlite, so we create the new table with the right schema to start with. --- changelog.d/6184.misc | 1 + .../schema/delta/56/unique_user_filter_index.py | 58 ++++++++++++---------- 2 files changed, 33 insertions(+), 26 deletions(-) create mode 100644 changelog.d/6184.misc (limited to 'changelog.d') diff --git a/changelog.d/6184.misc b/changelog.d/6184.misc new file mode 100644 index 0000000000..30b3e56082 --- /dev/null +++ b/changelog.d/6184.misc @@ -0,0 +1 @@ +Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. \ No newline at end of file diff --git a/synapse/storage/schema/delta/56/unique_user_filter_index.py b/synapse/storage/schema/delta/56/unique_user_filter_index.py index 60031f23ca..1de8b54961 100644 --- a/synapse/storage/schema/delta/56/unique_user_filter_index.py +++ b/synapse/storage/schema/delta/56/unique_user_filter_index.py @@ -5,42 +5,48 @@ from synapse.storage.engines import PostgresEngine logger = logging.getLogger(__name__) +""" +This migration updates the user_filters table as follows: + + - drops any (user_id, filter_id) duplicates + - makes the columns NON-NULLable + - turns the index into a UNIQUE index +""" + + def run_upgrade(cur, database_engine, *args, **kwargs): + pass + + +def run_create(cur, database_engine, *args, **kwargs): if isinstance(database_engine, PostgresEngine): select_clause = """ - CREATE TEMPORARY TABLE user_filters_migration AS SELECT DISTINCT ON (user_id, filter_id) user_id, filter_id, filter_json - FROM user_filters; + FROM user_filters """ else: select_clause = """ - CREATE TEMPORARY TABLE user_filters_migration AS - SELECT * FROM user_filters GROUP BY user_id, filter_id; + SELECT * FROM user_filters GROUP BY user_id, filter_id """ - sql = ( - """ - BEGIN; - %s - DROP INDEX user_filters_by_user_id_filter_id; - DELETE FROM user_filters; - ALTER TABLE user_filters - ALTER COLUMN user_id SET NOT NULL, - ALTER COLUMN filter_id SET NOT NULL, - ALTER COLUMN filter_json SET NOT NULL; - INSERT INTO user_filters(user_id, filter_id, filter_json) - SELECT * FROM user_filters_migration; - DROP TABLE user_filters_migration; - CREATE UNIQUE INDEX user_filters_by_user_id_filter_id_unique - ON user_filters(user_id, filter_id); - END; - """ - % select_clause + sql = """ + DROP TABLE IF EXISTS user_filters_migration; + DROP INDEX IF EXISTS user_filters_unique; + CREATE TABLE user_filters_migration ( + user_id TEXT NOT NULL, + filter_id BIGINT NOT NULL, + filter_json BYTEA NOT NULL + ); + INSERT INTO user_filters_migration (user_id, filter_id, filter_json) + %s; + CREATE UNIQUE INDEX user_filters_unique ON user_filters_migration + (user_id, filter_id); + DROP TABLE user_filters; + ALTER TABLE user_filters_migration RENAME TO user_filters; + """ % ( + select_clause, ) + if isinstance(database_engine, PostgresEngine): cur.execute(sql) else: cur.executescript(sql) - - -def run_create(cur, database_engine, *args, **kwargs): - pass -- cgit 1.4.1 From a139420a3cfda6a4a4ee4750611b31dd71fc33f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 10 Oct 2019 11:29:01 +0100 Subject: Fix races in room stats (and other) updates. (#6187) Hopefully this will fix the occasional failures we were seeing in the room directory. The problem was that events are not necessarily persisted (and `current_state_delta_stream` updated) in the same order as their stream_id. So for instance current_state_delta 9 might be persisted *before* current_state_delta 8. Then, when the room stats saw stream_id 9, it assumed it had done everything up to 9, and never came back to do stream_id 8. We can solve this easily by only processing up to the stream_id where we know all events have been persisted. --- changelog.d/6187.bugfix | 1 + synapse/handlers/presence.py | 16 ++++++++++++---- synapse/handlers/stats.py | 12 +++++++----- synapse/handlers/user_directory.py | 17 ++++++++++++----- synapse/storage/state_deltas.py | 38 +++++++++++++++++++++++++++++--------- tests/handlers/test_typing.py | 2 +- tests/rest/admin/test_admin.py | 2 +- 7 files changed, 63 insertions(+), 25 deletions(-) create mode 100644 changelog.d/6187.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6187.bugfix b/changelog.d/6187.bugfix new file mode 100644 index 0000000000..6142c5b98d --- /dev/null +++ b/changelog.d/6187.bugfix @@ -0,0 +1 @@ +Fix occasional missed updates in the room and user directories. \ No newline at end of file diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 053cf66b28..2a5f1a007d 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -803,17 +803,25 @@ class PresenceHandler(object): # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "presence_delta"): - deltas = yield self.store.get_current_state_deltas(self._event_pos) - if not deltas: + room_max_stream_ordering = self.store.get_room_max_stream_ordering() + if self._event_pos == room_max_stream_ordering: return + logger.debug( + "Processing presence stats %s->%s", + self._event_pos, + room_max_stream_ordering, + ) + max_pos, deltas = yield self.store.get_current_state_deltas( + self._event_pos, room_max_stream_ordering + ) yield self._handle_state_delta(deltas) - self._event_pos = deltas[-1]["stream_id"] + self._event_pos = max_pos # Expose current event processing position to prometheus synapse.metrics.event_processing_positions.labels("presence").set( - self._event_pos + max_pos ) @defer.inlineCallbacks diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index c62b113115..466daf9202 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -87,21 +87,23 @@ class StatsHandler(StateDeltasHandler): # Be sure to read the max stream_ordering *before* checking if there are any outstanding # deltas, since there is otherwise a chance that we could miss updates which arrive # after we check the deltas. - room_max_stream_ordering = yield self.store.get_room_max_stream_ordering() + room_max_stream_ordering = self.store.get_room_max_stream_ordering() if self.pos == room_max_stream_ordering: break - deltas = yield self.store.get_current_state_deltas(self.pos) + logger.debug( + "Processing room stats %s->%s", self.pos, room_max_stream_ordering + ) + max_pos, deltas = yield self.store.get_current_state_deltas( + self.pos, room_max_stream_ordering + ) if deltas: logger.debug("Handling %d state deltas", len(deltas)) room_deltas, user_deltas = yield self._handle_deltas(deltas) - - max_pos = deltas[-1]["stream_id"] else: room_deltas = {} user_deltas = {} - max_pos = room_max_stream_ordering # Then count deltas for total_events and total_event_bytes. room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes( diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index e53669e40d..624f05ab5b 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -138,21 +138,28 @@ class UserDirectoryHandler(StateDeltasHandler): # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "user_dir_delta"): - deltas = yield self.store.get_current_state_deltas(self.pos) - if not deltas: + room_max_stream_ordering = self.store.get_room_max_stream_ordering() + if self.pos == room_max_stream_ordering: return + logger.debug( + "Processing user stats %s->%s", self.pos, room_max_stream_ordering + ) + max_pos, deltas = yield self.store.get_current_state_deltas( + self.pos, room_max_stream_ordering + ) + logger.info("Handling %d state deltas", len(deltas)) yield self._handle_deltas(deltas) - self.pos = deltas[-1]["stream_id"] + self.pos = max_pos # Expose current event processing position to prometheus synapse.metrics.event_processing_positions.labels("user_dir").set( - self.pos + max_pos ) - yield self.store.update_user_directory_stream_pos(self.pos) + yield self.store.update_user_directory_stream_pos(max_pos) @defer.inlineCallbacks def _handle_deltas(self, deltas): diff --git a/synapse/storage/state_deltas.py b/synapse/storage/state_deltas.py index 5fdb442104..28f33ec18f 100644 --- a/synapse/storage/state_deltas.py +++ b/synapse/storage/state_deltas.py @@ -21,7 +21,7 @@ logger = logging.getLogger(__name__) class StateDeltasStore(SQLBaseStore): - def get_current_state_deltas(self, prev_stream_id): + def get_current_state_deltas(self, prev_stream_id: int, max_stream_id: int): """Fetch a list of room state changes since the given stream id Each entry in the result contains the following fields: @@ -36,15 +36,27 @@ class StateDeltasStore(SQLBaseStore): Args: prev_stream_id (int): point to get changes since (exclusive) + max_stream_id (int): the point that we know has been correctly persisted + - ie, an upper limit to return changes from. Returns: - Deferred[list[dict]]: results + Deferred[tuple[int, list[dict]]: A tuple consisting of: + - the stream id which these results go up to + - list of current_state_delta_stream rows. If it is empty, we are + up to date. """ prev_stream_id = int(prev_stream_id) + + # check we're not going backwards + assert prev_stream_id <= max_stream_id + if not self._curr_state_delta_stream_cache.has_any_entity_changed( prev_stream_id ): - return [] + # if the CSDs haven't changed between prev_stream_id and now, we + # know for certain that they haven't changed between prev_stream_id and + # max_stream_id. + return max_stream_id, [] def get_current_state_deltas_txn(txn): # First we calculate the max stream id that will give us less than @@ -54,21 +66,29 @@ class StateDeltasStore(SQLBaseStore): sql = """ SELECT stream_id, count(*) FROM current_state_delta_stream - WHERE stream_id > ? + WHERE stream_id > ? AND stream_id <= ? GROUP BY stream_id ORDER BY stream_id ASC LIMIT 100 """ - txn.execute(sql, (prev_stream_id,)) + txn.execute(sql, (prev_stream_id, max_stream_id)) total = 0 - max_stream_id = prev_stream_id - for max_stream_id, count in txn: + + for stream_id, count in txn: total += count if total > 100: # We arbitarily limit to 100 entries to ensure we don't # select toooo many. + logger.debug( + "Clipping current_state_delta_stream rows to stream_id %i", + stream_id, + ) + clipped_stream_id = stream_id break + else: + # if there's no problem, we may as well go right up to the max_stream_id + clipped_stream_id = max_stream_id # Now actually get the deltas sql = """ @@ -77,8 +97,8 @@ class StateDeltasStore(SQLBaseStore): WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ - txn.execute(sql, (prev_stream_id, max_stream_id)) - return self.cursor_to_dict(txn) + txn.execute(sql, (prev_stream_id, clipped_stream_id)) + return clipped_stream_id, self.cursor_to_dict(txn) return self.runInteraction( "get_current_state_deltas", get_current_state_deltas_txn diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 1f2ef5d01f..67f1013051 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -139,7 +139,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): defer.succeed(1) ) - self.datastore.get_current_state_deltas.return_value = None + self.datastore.get_current_state_deltas.return_value = (0, None) self.datastore.get_to_device_stream_token = lambda: 0 self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0) diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 5877bb2133..d3a4f717f7 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -62,7 +62,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.device_handler.check_device_registered = Mock(return_value="FAKE") self.datastore = Mock(return_value=Mock()) - self.datastore.get_current_state_deltas = Mock(return_value=[]) + self.datastore.get_current_state_deltas = Mock(return_value=(0, [])) self.secrets = Mock() -- cgit 1.4.1 From 0aee4900131bf97dde0f0ff5d1f7133147ff5bc7 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 10 Oct 2019 21:59:06 +1100 Subject: Add snapcraft packaging information (#6084) --- .gitignore | 1 + changelog.d/6084.misc | 1 + snap/snapcraft.yaml | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 changelog.d/6084.misc create mode 100644 snap/snapcraft.yaml (limited to 'changelog.d') diff --git a/.gitignore b/.gitignore index 747b8714d7..af36c00cfa 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ *.egg-info *.lock *.pyc +*.snap *.tac _trial_temp/ _trial_temp*/ diff --git a/changelog.d/6084.misc b/changelog.d/6084.misc new file mode 100644 index 0000000000..3c33701651 --- /dev/null +++ b/changelog.d/6084.misc @@ -0,0 +1 @@ +Add snapcraft packaging information. Contributed by @devec0. diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 0000000000..1f7df71db2 --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,22 @@ +name: matrix-synapse +base: core18 +version: git +summary: Reference Matrix homeserver +description: | + Synapse is the reference Matrix homeserver. + Matrix is a federated and decentralised instant messaging and VoIP system. + +grade: stable +confinement: strict + +apps: + matrix-synapse: + command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml + stop-command: synctl -c $SNAP_COMMON stop + plugs: [network-bind, network] + daemon: simple +parts: + matrix-synapse: + source: . + plugin: python + python-version: python3 -- cgit 1.4.1 From 2efd050c9db2e96fd96535dc9b1c6f54acbd163d Mon Sep 17 00:00:00 2001 From: krombel Date: Thu, 10 Oct 2019 13:59:55 +0200 Subject: send 404 as http-status when filter-id is unknown to the server (#2380) This fixed the weirdness of 400 vs 404 as http status code in the case the filter id is not known by the server. As e.g. matrix-js-sdk expects 404 to catch this situation this leads to unwanted behaviour. --- changelog.d/2380.bugfix | 1 + synapse/rest/client/v2_alpha/filter.py | 12 +++++---- synapse/rest/client/v2_alpha/sync.py | 41 ++++++++++++++++++------------- tests/rest/client/v2_alpha/test_filter.py | 2 +- 4 files changed, 33 insertions(+), 23 deletions(-) create mode 100644 changelog.d/2380.bugfix (limited to 'changelog.d') diff --git a/changelog.d/2380.bugfix b/changelog.d/2380.bugfix new file mode 100644 index 0000000000..eae3206031 --- /dev/null +++ b/changelog.d/2380.bugfix @@ -0,0 +1 @@ +Return an HTTP 404 instead of 400 when requesting a filter by ID that is unknown to the server. Thanks to @krombel for contributing this! diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index c6ddf24c8d..17a8bc7366 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.api.errors import AuthError, NotFoundError, StoreError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID @@ -52,13 +52,15 @@ class GetFilterRestServlet(RestServlet): raise SynapseError(400, "Invalid filter_id") try: - filter = yield self.filtering.get_user_filter( + filter_collection = yield self.filtering.get_user_filter( user_localpart=target_user.localpart, filter_id=filter_id ) + except StoreError as e: + if e.code != 404: + raise + raise NotFoundError("No such filter") - return 200, filter.get_filter_json() - except (KeyError, StoreError): - raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) + return 200, filter_collection.get_filter_json() class CreateFilterRestServlet(RestServlet): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index c98c5a3802..a883c8adda 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -21,7 +21,7 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.constants import PresenceState -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection from synapse.events.utils import ( format_event_for_client_v2_without_room_id, @@ -119,25 +119,32 @@ class SyncRestServlet(RestServlet): request_key = (user, timeout, since, filter_id, full_state, device_id) - if filter_id: - if filter_id.startswith("{"): - try: - filter_object = json.loads(filter_id) - set_timeline_upper_limit( - filter_object, self.hs.config.filter_timeline_limit - ) - except Exception: - raise SynapseError(400, "Invalid filter JSON") - self.filtering.check_valid_filter(filter_object) - filter = FilterCollection(filter_object) - else: - filter = yield self.filtering.get_user_filter(user.localpart, filter_id) + if filter_id is None: + filter_collection = DEFAULT_FILTER_COLLECTION + elif filter_id.startswith("{"): + try: + filter_object = json.loads(filter_id) + set_timeline_upper_limit( + filter_object, self.hs.config.filter_timeline_limit + ) + except Exception: + raise SynapseError(400, "Invalid filter JSON") + self.filtering.check_valid_filter(filter_object) + filter_collection = FilterCollection(filter_object) else: - filter = DEFAULT_FILTER_COLLECTION + try: + filter_collection = yield self.filtering.get_user_filter( + user.localpart, filter_id + ) + except StoreError as err: + if err.code != 404: + raise + # fix up the description and errcode to be more useful + raise SynapseError(400, "No such filter", errcode=Codes.INVALID_PARAM) sync_config = SyncConfig( user=user, - filter_collection=filter, + filter_collection=filter_collection, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, @@ -171,7 +178,7 @@ class SyncRestServlet(RestServlet): time_now = self.clock.time_msec() response_content = yield self.encode_response( - time_now, sync_result, requester.access_token_id, filter + time_now, sync_result, requester.access_token_id, filter_collection ) return 200, response_content diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index f42a8efbf4..e0e9e94fbf 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -92,7 +92,7 @@ class FilterTestCase(unittest.HomeserverTestCase): ) self.render(request) - self.assertEqual(channel.result["code"], b"400") + self.assertEqual(channel.result["code"], b"404") self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND) # Currently invalid params do not have an appropriate errcode -- cgit 1.4.1 From 9a84d74417a1c9fbcd6c57e7ef23e5590e04ef49 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Thu, 10 Oct 2019 13:03:44 +0100 Subject: before fulfilling a group invite,check if user is already joined/invited (#3436) Fixes vector-im/riot-web#5645 --- changelog.d/3436.bugfix | 1 + synapse/groups/groups_server.py | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 changelog.d/3436.bugfix (limited to 'changelog.d') diff --git a/changelog.d/3436.bugfix b/changelog.d/3436.bugfix new file mode 100644 index 0000000000..15714a11e0 --- /dev/null +++ b/changelog.d/3436.bugfix @@ -0,0 +1 @@ +Fix a problem where users could be invited twice to the same group. diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index d50e691436..8f10b6adbb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd +# Copyright 2019 Michael Telatynski <7t3chguy@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,16 +21,16 @@ from six import string_types from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError from synapse.types import GroupID, RoomID, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute logger = logging.getLogger(__name__) -# TODO: Allow users to "knock" or simpkly join depending on rules +# TODO: Allow users to "knock" or simply join depending on rules # TODO: Federation admin APIs -# TODO: is_priveged flag to users and is_public to users and rooms +# TODO: is_privileged flag to users and is_public to users and rooms # TODO: Audit log for admins (profile updates, membership changes, users who tried # to join but were rejected, etc) # TODO: Flairs @@ -590,7 +591,18 @@ class GroupsServerHandler(object): ) # TODO: Check if user knocked - # TODO: Check if user is already invited + + invited_users = yield self.store.get_invited_users_in_group(group_id) + if user_id in invited_users: + raise SynapseError( + 400, "User already invited to group", errcode=Codes.BAD_STATE + ) + + user_results = yield self.store.get_users_in_group( + group_id, include_private=True + ) + if user_id in [user_result["user_id"] for user_result in user_results]: + raise SynapseError(400, "User already in group") content = { "profile": {"name": group["name"], "avatar_url": group["avatar_url"]}, -- cgit 1.4.1 From b5b03b7079a9baa34a25915d6a569e383e8307c3 Mon Sep 17 00:00:00 2001 From: werner291 Date: Thu, 10 Oct 2019 14:05:48 +0200 Subject: Add domain validation when creating room with list of invitees (#6121) --- changelog.d/4088.bugfix | 1 + synapse/handlers/room.py | 4 +++- tests/rest/client/v1/test_rooms.py | 9 +++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4088.bugfix (limited to 'changelog.d') diff --git a/changelog.d/4088.bugfix b/changelog.d/4088.bugfix new file mode 100644 index 0000000000..61722b6224 --- /dev/null +++ b/changelog.d/4088.bugfix @@ -0,0 +1 @@ +Added domain validation when including a list of invitees upon room creation. \ No newline at end of file diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 970be3c846..2816bd8f87 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -28,6 +28,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.http.endpoint import parse_and_validate_server_name from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -554,7 +555,8 @@ class RoomCreationHandler(BaseHandler): invite_list = config.get("invite", []) for i in invite_list: try: - UserID.from_string(i) + uid = UserID.from_string(i) + parse_and_validate_server_name(uid.domain) except Exception: raise SynapseError(400, "Invalid user_id: %s" % (i,)) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index fe741637f5..2f2ca74611 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -484,6 +484,15 @@ class RoomsCreateTestCase(RoomBase): self.render(request) self.assertEquals(400, channel.code) + def test_post_room_invitees_invalid_mxid(self): + # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 + # Note the trailing space in the MXID here! + request, channel = self.make_request( + "POST", "/createRoom", b'{"invite":["@alice:example.com "]}' + ) + self.render(request) + self.assertEquals(400, channel.code) + class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ -- cgit 1.4.1 From 203ccdac5fa50888df3261c419c6b9fd670b21e5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Oct 2019 19:09:54 +0100 Subject: Newsfile --- changelog.d/6156.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6156.misc (limited to 'changelog.d') diff --git a/changelog.d/6156.misc b/changelog.d/6156.misc new file mode 100644 index 0000000000..49525e9416 --- /dev/null +++ b/changelog.d/6156.misc @@ -0,0 +1 @@ +Use Postgres ANY for selecting many values. -- cgit 1.4.1 From 430dc2c67b20bf4abff74f861d8dce78f880ec73 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 Oct 2019 14:05:30 +0100 Subject: Fix python packaging ... after it got borked by #6081 --- MANIFEST.in | 4 +--- changelog.d/6191.misc | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6191.misc (limited to 'changelog.d') diff --git a/MANIFEST.in b/MANIFEST.in index 9c2902b8d2..b22be58f3d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -47,7 +47,5 @@ prune debian prune demo/etc prune docker prune mypy.ini +prune snap prune stubs - -exclude jenkins* -recursive-exclude jenkins *.sh diff --git a/changelog.d/6191.misc b/changelog.d/6191.misc new file mode 100644 index 0000000000..3c33701651 --- /dev/null +++ b/changelog.d/6191.misc @@ -0,0 +1 @@ +Add snapcraft packaging information. Contributed by @devec0. -- cgit 1.4.1 From 2208891ace3b1d9db148d606fca2b3f784a4257a Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 10 Oct 2019 19:22:10 -0400 Subject: add changelog --- changelog.d/6193.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6193.misc (limited to 'changelog.d') diff --git a/changelog.d/6193.misc b/changelog.d/6193.misc new file mode 100644 index 0000000000..8e3707f8fd --- /dev/null +++ b/changelog.d/6193.misc @@ -0,0 +1 @@ +Make storage layer responsible for adding device names to key, rather than the handler. -- cgit 1.4.1 From a0d0ba7862e38588aa0d0ac29a720fdf06f1ab8d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 11 Oct 2019 09:38:26 +0100 Subject: Fix MAU reaping where reserved users are specified. (#6168) --- changelog.d/6168.bugfix | 1 + synapse/app/homeserver.py | 6 +- synapse/storage/monthly_active_users.py | 101 ++++++++++++++++++----------- tests/storage/test_monthly_active_users.py | 58 ++++++++++++++--- 4 files changed, 115 insertions(+), 51 deletions(-) create mode 100644 changelog.d/6168.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6168.bugfix b/changelog.d/6168.bugfix new file mode 100644 index 0000000000..39e8e9d019 --- /dev/null +++ b/changelog.d/6168.bugfix @@ -0,0 +1 @@ +Fix monthly active user reaping where reserved users are specified. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 774326dff9..eb54f56853 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -605,13 +605,13 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 - reserved_count = 0 + reserved_users = () store = hs.get_datastore() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: current_mau_count = yield store.get_monthly_active_count() - reserved_count = yield store.get_registered_reserved_users_count() + reserved_users = yield store.get_registered_reserved_users() current_mau_gauge.set(float(current_mau_count)) - registered_reserved_users_mau_gauge.set(float(reserved_count)) + registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.max_mau_value)) def start_generate_monthly_active_users(): diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 752e9788a2..3803604be7 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -32,7 +32,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): super(MonthlyActiveUsersStore, self).__init__(None, hs) self._clock = hs.get_clock() self.hs = hs - self.reserved_users = () # Do not add more reserved users than the total allowable number self._new_transaction( dbconn, @@ -51,7 +50,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): txn (cursor): threepids (list[dict]): List of threepid dicts to reserve """ - reserved_user_list = [] for tp in threepids: user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) @@ -60,10 +58,8 @@ class MonthlyActiveUsersStore(SQLBaseStore): is_support = self.is_support_user_txn(txn, user_id) if not is_support: self.upsert_monthly_active_user_txn(txn, user_id) - reserved_user_list.append(user_id) else: logger.warning("mau limit reserved threepid %s not found in db" % tp) - self.reserved_users = tuple(reserved_user_list) @defer.inlineCallbacks def reap_monthly_active_users(self): @@ -74,8 +70,11 @@ class MonthlyActiveUsersStore(SQLBaseStore): Deferred[] """ - def _reap_users(txn): - # Purge stale users + def _reap_users(txn, reserved_users): + """ + Args: + reserved_users (tuple): reserved users to preserve + """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) query_args = [thirty_days_ago] @@ -83,20 +82,19 @@ class MonthlyActiveUsersStore(SQLBaseStore): # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. - if len(self.reserved_users) > 0: + if len(reserved_users) > 0: # questionmarks is a hack to overcome sqlite not supporting # tuples in 'WHERE IN %s' - questionmarks = "?" * len(self.reserved_users) + question_marks = ",".join("?" * len(reserved_users)) - query_args.extend(self.reserved_users) - sql = base_sql + """ AND user_id NOT IN ({})""".format( - ",".join(questionmarks) - ) + query_args.extend(reserved_users) + sql = base_sql + " AND user_id NOT IN ({})".format(question_marks) else: sql = base_sql txn.execute(sql, query_args) + max_mau_value = self.hs.config.max_mau_value if self.hs.config.limit_usage_by_mau: # If MAU user count still exceeds the MAU threshold, then delete on # a least recently active basis. @@ -106,31 +104,52 @@ class MonthlyActiveUsersStore(SQLBaseStore): # While Postgres does not require 'LIMIT', but also does not support # negative LIMIT values. So there is no way to write it that both can # support - safe_guard = self.hs.config.max_mau_value - len(self.reserved_users) - # Must be greater than zero for postgres - safe_guard = safe_guard if safe_guard > 0 else 0 - query_args = [safe_guard] - - base_sql = """ - DELETE FROM monthly_active_users - WHERE user_id NOT IN ( - SELECT user_id FROM monthly_active_users - ORDER BY timestamp DESC - LIMIT ? + if len(reserved_users) == 0: + sql = """ + DELETE FROM monthly_active_users + WHERE user_id NOT IN ( + SELECT user_id FROM monthly_active_users + ORDER BY timestamp DESC + LIMIT ? ) - """ + """ + txn.execute(sql, (max_mau_value,)) # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres # when len(reserved_users) == 0. Works fine on sqlite. - if len(self.reserved_users) > 0: - query_args.extend(self.reserved_users) - sql = base_sql + """ AND user_id NOT IN ({})""".format( - ",".join(questionmarks) - ) else: - sql = base_sql - txn.execute(sql, query_args) + # Must be >= 0 for postgres + num_of_non_reserved_users_to_remove = max( + max_mau_value - len(reserved_users), 0 + ) + + # It is important to filter reserved users twice to guard + # against the case where the reserved user is present in the + # SELECT, meaning that a legitmate mau is deleted. + sql = """ + DELETE FROM monthly_active_users + WHERE user_id NOT IN ( + SELECT user_id FROM monthly_active_users + WHERE user_id NOT IN ({}) + ORDER BY timestamp DESC + LIMIT ? + ) + AND user_id NOT IN ({}) + """.format( + question_marks, question_marks + ) + + query_args = [ + *reserved_users, + num_of_non_reserved_users_to_remove, + *reserved_users, + ] - yield self.runInteraction("reap_monthly_active_users", _reap_users) + txn.execute(sql, query_args) + + reserved_users = yield self.get_registered_reserved_users() + yield self.runInteraction( + "reap_monthly_active_users", _reap_users, reserved_users + ) # It seems poor to invalidate the whole cache, Postgres supports # 'Returning' which would allow me to invalidate only the # specific users, but sqlite has no way to do this and instead @@ -159,21 +178,25 @@ class MonthlyActiveUsersStore(SQLBaseStore): return self.runInteraction("count_users", _count_users) @defer.inlineCallbacks - def get_registered_reserved_users_count(self): - """Of the reserved threepids defined in config, how many are associated + def get_registered_reserved_users(self): + """Of the reserved threepids defined in config, which are associated with registered users? Returns: - Defered[int]: Number of real reserved users + Defered[list]: Real reserved users """ - count = 0 - for tp in self.hs.config.mau_limits_reserved_threepids: + users = [] + + for tp in self.hs.config.mau_limits_reserved_threepids[ + : self.hs.config.max_mau_value + ]: user_id = yield self.hs.get_datastore().get_user_id_by_threepid( tp["medium"], tp["address"] ) if user_id: - count = count + 1 - return count + users.append(user_id) + + return users @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 1494650d10..90a63dc477 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -50,6 +50,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): {"medium": "email", "address": user2_email}, {"medium": "email", "address": user3_email}, ] + self.hs.config.mau_limits_reserved_threepids = threepids # -1 because user3 is a support user and does not count user_num = len(threepids) - 1 @@ -84,6 +85,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.hs.config.max_mau_value = 0 self.reactor.advance(FORTY_DAYS) + self.hs.config.max_mau_value = 5 self.store.reap_monthly_active_users() self.pump() @@ -147,9 +149,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.reap_monthly_active_users() self.pump() count = self.store.get_monthly_active_count() - self.assertEquals( - self.get_success(count), initial_users - self.hs.config.max_mau_value - ) + self.assertEquals(self.get_success(count), self.hs.config.max_mau_value) self.reactor.advance(FORTY_DAYS) self.store.reap_monthly_active_users() @@ -158,6 +158,44 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): count = self.store.get_monthly_active_count() self.assertEquals(self.get_success(count), 0) + def test_reap_monthly_active_users_reserved_users(self): + """ Tests that reaping correctly handles reaping where reserved users are + present""" + + self.hs.config.max_mau_value = 5 + initial_users = 5 + reserved_user_number = initial_users - 1 + threepids = [] + for i in range(initial_users): + user = "@user%d:server" % i + email = "user%d@example.com" % i + self.get_success(self.store.upsert_monthly_active_user(user)) + threepids.append({"medium": "email", "address": email}) + # Need to ensure that the most recent entries in the + # monthly_active_users table are reserved + now = int(self.hs.get_clock().time_msec()) + if i != 0: + self.get_success( + self.store.register_user(user_id=user, password_hash=None) + ) + self.get_success( + self.store.user_add_threepid(user, "email", email, now, now) + ) + + self.hs.config.mau_limits_reserved_threepids = threepids + self.store.runInteraction( + "initialise", self.store._initialise_reserved_users, threepids + ) + count = self.store.get_monthly_active_count() + self.assertTrue(self.get_success(count), initial_users) + + users = self.store.get_registered_reserved_users() + self.assertEquals(len(self.get_success(users)), reserved_user_number) + + self.get_success(self.store.reap_monthly_active_users()) + count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(count), self.hs.config.max_mau_value) + def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list user_id = "@user_id:host" @@ -192,12 +230,13 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_get_reserved_real_user_account(self): # Test no reserved users, or reserved threepids - count = self.store.get_registered_reserved_users_count() - self.assertEquals(self.get_success(count), 0) + users = self.get_success(self.store.get_registered_reserved_users()) + self.assertEquals(len(users), 0) # Test reserved users but no registered users user1 = "@user1:example.com" user2 = "@user2:example.com" + user1_email = "user1@example.com" user2_email = "user2@example.com" threepids = [ @@ -210,8 +249,8 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): ) self.pump() - count = self.store.get_registered_reserved_users_count() - self.assertEquals(self.get_success(count), 0) + users = self.get_success(self.store.get_registered_reserved_users()) + self.assertEquals(len(users), 0) # Test reserved registed users self.store.register_user(user_id=user1, password_hash=None) @@ -221,8 +260,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - count = self.store.get_registered_reserved_users_count() - self.assertEquals(self.get_success(count), len(threepids)) + + users = self.get_success(self.store.get_registered_reserved_users()) + self.assertEquals(len(users), len(threepids)) def test_support_user_not_add_to_mau_limits(self): support_user_id = "@support:test" -- cgit 1.4.1 From de3a1764266536fdc4bf87b01ed873632213eb12 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Oct 2019 11:24:08 +0100 Subject: Newsfile --- changelog.d/6195.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6195.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6195.bugfix b/changelog.d/6195.bugfix new file mode 100644 index 0000000000..d22935dbcd --- /dev/null +++ b/changelog.d/6195.bugfix @@ -0,0 +1 @@ +Fix tracing of non-JSON APIs, /media, /key etc. -- cgit 1.4.1 From c3b0fbe9c3a71d000f2358122c45d33f4f9e55c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Oct 2019 12:24:52 +0100 Subject: Newsfile --- changelog.d/6196.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6196.misc (limited to 'changelog.d') diff --git a/changelog.d/6196.misc b/changelog.d/6196.misc new file mode 100644 index 0000000000..3897b1216f --- /dev/null +++ b/changelog.d/6196.misc @@ -0,0 +1 @@ +Port synapse.rest.admin module to use async/await. -- cgit 1.4.1 From be9b55e0d2b758bd7d9be4273253ea115c5362a3 Mon Sep 17 00:00:00 2001 From: Valérian Rousset Date: Fri, 11 Oct 2019 13:33:12 +0200 Subject: cas: support setting display name (#6114) Now, the CAS server can return an attribute stating what's the desired displayname, instead of using the username directly. --- changelog.d/6114.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/cas.py | 3 +++ synapse/rest/client/v1/login.py | 4 +++- 4 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6114.feature (limited to 'changelog.d') diff --git a/changelog.d/6114.feature b/changelog.d/6114.feature new file mode 100644 index 0000000000..a34ab12148 --- /dev/null +++ b/changelog.d/6114.feature @@ -0,0 +1 @@ +CAS login now provides a default display name for users if a `displayname_attribute` is set in the configuration file. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 43893399ad..8226978ba6 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1220,6 +1220,7 @@ saml2_config: # enabled: true # server_url: "https://cas-server.com" # service_url: "https://homeserver.domain.com:8448" +# #displayname_attribute: name # #required_attributes: # # name: value diff --git a/synapse/config/cas.py b/synapse/config/cas.py index b916c3aa66..4526c1a67b 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -30,11 +30,13 @@ class CasConfig(Config): self.cas_enabled = cas_config.get("enabled", True) self.cas_server_url = cas_config["server_url"] self.cas_service_url = cas_config["service_url"] + self.cas_displayname_attribute = cas_config.get("displayname_attribute") self.cas_required_attributes = cas_config.get("required_attributes", {}) else: self.cas_enabled = False self.cas_server_url = None self.cas_service_url = None + self.cas_displayname_attribute = None self.cas_required_attributes = {} def generate_config_section(self, config_dir_path, server_name, **kwargs): @@ -45,6 +47,7 @@ class CasConfig(Config): # enabled: true # server_url: "https://cas-server.com" # service_url: "https://homeserver.domain.com:8448" + # #displayname_attribute: name # #required_attributes: # # name: value """ diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 9cddbc752a..8414af08cb 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -377,6 +377,7 @@ class CasTicketServlet(RestServlet): super(CasTicketServlet, self).__init__() self.cas_server_url = hs.config.cas_server_url self.cas_service_url = hs.config.cas_service_url + self.cas_displayname_attribute = hs.config.cas_displayname_attribute self.cas_required_attributes = hs.config.cas_required_attributes self._sso_auth_handler = SSOAuthHandler(hs) self._http_client = hs.get_simple_http_client() @@ -400,6 +401,7 @@ class CasTicketServlet(RestServlet): def handle_cas_response(self, request, cas_response_body, client_redirect_url): user, attributes = self.parse_cas_response(cas_response_body) + displayname = attributes.pop(self.cas_displayname_attribute, None) for required_attribute, required_value in self.cas_required_attributes.items(): # If required attribute was not in CAS Response - Forbidden @@ -414,7 +416,7 @@ class CasTicketServlet(RestServlet): raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) return self._sso_auth_handler.on_successful_auth( - user, request, client_redirect_url + user, request, client_redirect_url, displayname ) def parse_cas_response(self, cas_response_body): -- cgit 1.4.1 From 423f7ae3974e251fef110c90a620a51789459d68 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 17 Oct 2019 17:06:07 +0100 Subject: Fix up changelogs --- changelog.d/6186.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6186.misc (limited to 'changelog.d') diff --git a/changelog.d/6186.misc b/changelog.d/6186.misc new file mode 100644 index 0000000000..5e1314a0ac --- /dev/null +++ b/changelog.d/6186.misc @@ -0,0 +1 @@ +Reject (accidental) attempts to insert bytes into postgres tables. -- cgit 1.4.1 From 6fb0a3da07192382bd05e0309c74d0e91c3b1253 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 17 Oct 2019 18:03:28 +0100 Subject: Remove dead changelog file This is part of 1.4.1 --- changelog.d/6185.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/6185.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6185.bugfix b/changelog.d/6185.bugfix deleted file mode 100644 index 9d1c669b88..0000000000 --- a/changelog.d/6185.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where redacted events were sometimes incorrectly censored in the database, breaking APIs that attempted to fetch such events. -- cgit 1.4.1 From 1594de856c78d3e10b965b4c8ac121fb6a1083d1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 17 Oct 2019 21:44:44 +0100 Subject: changelog --- changelog.d/6214.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6214.misc (limited to 'changelog.d') diff --git a/changelog.d/6214.misc b/changelog.d/6214.misc new file mode 100644 index 0000000000..c3fd04d0d8 --- /dev/null +++ b/changelog.d/6214.misc @@ -0,0 +1 @@ +Remove some unused event-auth code. -- cgit 1.4.1 From 5859a5c569c03f3b7c578fe4dbf2274e37af03bb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Oct 2019 07:42:26 +0200 Subject: Fix presence timeouts when synchrotron restarts. (#6212) * Fix presence timeouts when synchrotron restarts. Handling timeouts would fail if there was an external process that had timed out, e.g. a synchrotron restarting. This was due to a couple of variable name typoes. Fixes #3715. --- changelog.d/6212.bugfix | 1 + synapse/handlers/presence.py | 13 +++++++++---- tests/handlers/test_presence.py | 39 +++++++++++++++++++++++++++++++++++++++ tox.ini | 2 +- 4 files changed, 50 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6212.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6212.bugfix b/changelog.d/6212.bugfix new file mode 100644 index 0000000000..918755fee0 --- /dev/null +++ b/changelog.d/6212.bugfix @@ -0,0 +1 @@ +Fix bug where presence would not get timed out correctly if a synchrotron worker is used and restarted. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 2a5f1a007d..eda15bc623 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -24,6 +24,7 @@ The methods that define policy are: import logging from contextlib import contextmanager +from typing import Dict, Set from six import iteritems, itervalues @@ -179,8 +180,9 @@ class PresenceHandler(object): # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of user_id, and a dict of # process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs = {} - self.external_process_last_updated_ms = {} + self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]] + self.external_process_last_updated_ms = {} # type: Dict[int, int] + self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") # Start a LoopingCall in 30s that fires every 5s. @@ -349,10 +351,13 @@ class PresenceHandler(object): if now - last_update > EXTERNAL_PROCESS_EXPIRY ] for process_id in expired_process_ids: + # For each expired process drop tracking info and check the users + # that were syncing on that process to see if they need to be timed + # out. users_to_check.update( - self.external_process_last_updated_ms.pop(process_id, ()) + self.external_process_to_current_syncs.pop(process_id, ()) ) - self.external_process_last_update.pop(process_id) + self.external_process_last_updated_ms.pop(process_id) states = [ self.user_to_current_state.get(user_id, UserPresenceState.default(user_id)) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index f70c6e7d65..d4293b4312 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.events import room_version_to_event_format from synapse.events.builder import EventBuilder from synapse.handlers.presence import ( + EXTERNAL_PROCESS_EXPIRY, FEDERATION_PING_INTERVAL, FEDERATION_TIMEOUT, IDLE_TIMER, @@ -413,6 +414,44 @@ class PresenceTimeoutTestCase(unittest.TestCase): self.assertEquals(state, new_state) +class PresenceHandlerTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.presence_handler = hs.get_presence_handler() + self.clock = hs.get_clock() + + def test_external_process_timeout(self): + """Test that if an external process doesn't update the records for a while + we time out their syncing users presence. + """ + process_id = 1 + user_id = "@test:server" + + # Notify handler that a user is now syncing. + self.get_success( + self.presence_handler.update_external_syncs_row( + process_id, user_id, True, self.clock.time_msec() + ) + ) + + # Check that if we wait a while without telling the handler the user has + # stopped syncing that their presence state doesn't get timed out. + self.reactor.advance(EXTERNAL_PROCESS_EXPIRY / 2) + + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.ONLINE) + + # Check that if the external process timeout fires, then the syncing + # user gets timed out + self.reactor.advance(EXTERNAL_PROCESS_EXPIRY) + + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + + class PresenceJoinTestCase(unittest.HomeserverTestCase): """Tests remote servers get told about presence of users in the room when they join and when new local users join. diff --git a/tox.ini b/tox.ini index 367cc2ccf2..7ba6f6339f 100644 --- a/tox.ini +++ b/tox.ini @@ -161,7 +161,7 @@ basepython = python3.7 skip_install = True deps = {[base]deps} - mypy + mypy==0.730 mypy-zope env = MYPYPATH = stubs/ -- cgit 1.4.1 From dc4bec885df275d2973f500c1488baa6d9e25197 Mon Sep 17 00:00:00 2001 From: Bart Noordervliet Date: Fri, 18 Oct 2019 11:13:59 +0200 Subject: Add missing BOOLEAN_COLUMNs to synapse_port_db (#6216) Small fix to synapse_port_db to be able to convert from database schema v56. --- changelog.d/6216.bugfix | 1 + scripts/synapse_port_db | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6216.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6216.bugfix b/changelog.d/6216.bugfix new file mode 100644 index 0000000000..5784e82d18 --- /dev/null +++ b/changelog.d/6216.bugfix @@ -0,0 +1 @@ +synapse_port_db: Add 2 additional BOOLEAN_COLUMNS to be able to convert from database schema v56. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index b6ba19c776..3f942abdb6 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -55,6 +55,8 @@ BOOLEAN_COLUMNS = { "local_group_membership": ["is_publicised", "is_admin"], "e2e_room_keys": ["is_verified"], "account_validity": ["email_sent"], + "redactions": ["have_censored"], + "room_stats_state": ["is_federatable"], } -- cgit 1.4.1 From 770a6053a09812d23f6761442bddeaf6ef219057 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Fri, 18 Oct 2019 11:38:27 +0100 Subject: add note about database upgrade --- changelog.d/5759.misc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc index c0bc566c4c..dc7e2c01bf 100644 --- a/changelog.d/5759.misc +++ b/changelog.d/5759.misc @@ -1 +1,4 @@ -Allow devices to be marked as hidden, for use by features such as cross-signing. \ No newline at end of file +Allow devices to be marked as hidden, for use by features such as cross-signing. +This adds a new field with a default value to the devices field in the database, +and so the database upgrade may take a long time depending on how many devices +are in the database. -- cgit 1.4.1 From 560c1222672a241d89e74a1befe2a9f778732fdc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 18 Oct 2019 13:34:33 +0200 Subject: Fix logging config for the docker image (#6197) Turns out that loggers that are instantiated before the config is loaded get turned off. Also bring the logging config that is generated by --generate-config into line. Fixes #6194. --- changelog.d/6197.docker | 1 + docker/conf/log.config | 2 ++ synapse/config/logger.py | 5 ++--- 3 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6197.docker (limited to 'changelog.d') diff --git a/changelog.d/6197.docker b/changelog.d/6197.docker new file mode 100644 index 0000000000..71fb9cbff5 --- /dev/null +++ b/changelog.d/6197.docker @@ -0,0 +1 @@ +Fix logging getting lost for the docker image. diff --git a/docker/conf/log.config b/docker/conf/log.config index db35e475a4..ed418a57cd 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -24,3 +24,5 @@ loggers: root: level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} handlers: [console] + +disable_existing_loggers: false diff --git a/synapse/config/logger.py b/synapse/config/logger.py index d609ec111b..be92e33f93 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -68,9 +68,6 @@ handlers: filters: [context] loggers: - synapse: - level: INFO - synapse.storage.SQL: # beware: increasing this to DEBUG will make synapse log sensitive # information such as access tokens. @@ -79,6 +76,8 @@ loggers: root: level: INFO handlers: [file, console] + +disable_existing_loggers: false """ ) -- cgit 1.4.1 From 93eaeec75a2d3be89df0040b1374d339e92bb9b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 18 Oct 2019 19:43:36 +0200 Subject: Remove Auth.check method (#6217) This method was somewhat redundant, and confusing. --- changelog.d/6217.misc | 1 + synapse/api/auth.py | 19 +------------------ synapse/handlers/federation.py | 7 ++++--- 3 files changed, 6 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6217.misc (limited to 'changelog.d') diff --git a/changelog.d/6217.misc b/changelog.d/6217.misc new file mode 100644 index 0000000000..503352ee0b --- /dev/null +++ b/changelog.d/6217.misc @@ -0,0 +1 @@ +Remove Auth.check method. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index cb50579fd2..cd347fbe1b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -84,27 +84,10 @@ class Auth(object): ) auth_events = yield self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)} - self.check( + event_auth.check( room_version, event, auth_events=auth_events, do_sig_check=do_sig_check ) - def check(self, room_version, event, auth_events, do_sig_check=True): - """ Checks if this event is correctly authed. - - Args: - room_version (str): version of the room - event: the event being checked. - auth_events (dict: event-key -> event): the existing room state. - - - Returns: - True if the auth checks pass. - """ - with Measure(self.clock, "auth.check"): - event_auth.check( - room_version, event, auth_events, do_sig_check=do_sig_check - ) - @defer.inlineCallbacks def check_joined_room(self, room_id, user_id, current_state=None): """Check if the user is currently joined in the room diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57f661f16e..4b4c6c15f9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -30,6 +30,7 @@ from unpaddedbase64 import decode_base64 from twisted.internet import defer +from synapse import event_auth from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.api.errors import ( AuthError, @@ -1763,7 +1764,7 @@ class FederationHandler(BaseHandler): auth_for_e[(EventTypes.Create, "")] = create_event try: - self.auth.check(room_version, e, auth_events=auth_for_e) + event_auth.check(room_version, e, auth_events=auth_for_e) except SynapseError as err: # we may get SynapseErrors here as well as AuthErrors. For # instance, there are a couple of (ancient) events in some @@ -1919,7 +1920,7 @@ class FederationHandler(BaseHandler): } try: - self.auth.check(room_version, event, auth_events=current_auth_events) + event_auth.check(room_version, event, auth_events=current_auth_events) except AuthError as e: logger.warn("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True @@ -2018,7 +2019,7 @@ class FederationHandler(BaseHandler): ) try: - self.auth.check(room_version, event, auth_events=auth_events) + event_auth.check(room_version, event, auth_events=auth_events) except AuthError as e: logger.warn("Failed auth resolution for %r because %s", event, e) raise e -- cgit 1.4.1 From 6493ed572380828dfa9ed4c900deada30ceb0604 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Fri, 18 Oct 2019 18:45:36 +0100 Subject: Add changelog entry ... again? How did you make it disappear, git? --- changelog.d/5726.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5726.feature (limited to 'changelog.d') diff --git a/changelog.d/5726.feature b/changelog.d/5726.feature new file mode 100644 index 0000000000..d3c669aec0 --- /dev/null +++ b/changelog.d/5726.feature @@ -0,0 +1 @@ +Add ability to upload cross-signing signatures. -- cgit 1.4.1 From 82c8799ec7f6676555033f5d804cbed443a1ea3e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 19 Oct 2019 09:06:15 +0100 Subject: Set room version default to 5 --- changelog.d/6220.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6220.feature (limited to 'changelog.d') diff --git a/changelog.d/6220.feature b/changelog.d/6220.feature new file mode 100644 index 0000000000..8343e9912b --- /dev/null +++ b/changelog.d/6220.feature @@ -0,0 +1 @@ +Increase default room version from 4 to 5, thereby enforcing server key validity period checks. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8226978ba6..af3ca0f722 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -72,7 +72,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "4" +#default_room_version: "5" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index afc4d6a4ab..26e6d84c09 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -41,7 +41,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] -DEFAULT_ROOM_VERSION = "4" +DEFAULT_ROOM_VERSION = "5" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " -- cgit 1.4.1 From 22a9f75097bb51d17d3b1f824665b51607f2b95e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sat, 19 Oct 2019 19:42:10 +0200 Subject: Delete format_tap.py (#6219) * Delete format_tap.py This python implementation of a tap formatting library for buildkite has been replaced with a perl implementation as part of the matrix-org/sytest repo, which is specific to sytest's language, not that of any one homeserver's. --- .buildkite/format_tap.py | 48 ------------------------------------------------ changelog.d/6219.misc | 1 + 2 files changed, 1 insertion(+), 48 deletions(-) delete mode 100644 .buildkite/format_tap.py create mode 100644 changelog.d/6219.misc (limited to 'changelog.d') diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py deleted file mode 100644 index b557a9c38e..0000000000 --- a/.buildkite/format_tap.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from tap.parser import Parser -from tap.line import Result, Unknown, Diagnostic - -out = ["### TAP Output for " + sys.argv[2]] - -p = Parser() - -in_error = False - -for line in p.parse_file(sys.argv[1]): - if isinstance(line, Result): - if in_error: - out.append("") - out.append("") - out.append("") - out.append("----") - out.append("") - in_error = False - - if not line.ok and not line.todo: - in_error = True - - out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description)) - out.append("") - out.append("
Show log
")
-
-    elif isinstance(line, Diagnostic) and in_error:
-        out.append(line.text)
-
-if out:
-    for line in out[:-3]:
-        print(line)
diff --git a/changelog.d/6219.misc b/changelog.d/6219.misc
new file mode 100644
index 0000000000..296406246d
--- /dev/null
+++ b/changelog.d/6219.misc
@@ -0,0 +1 @@
+Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo.
\ No newline at end of file
-- 
cgit 1.4.1


From 3c304aaaebfd95f6cc17b1f0677183df9fe6b735 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 21 Oct 2019 16:10:37 +0100
Subject: Newsfile

---
 changelog.d/6231.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6231.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6231.misc b/changelog.d/6231.misc
new file mode 100644
index 0000000000..89b8297794
--- /dev/null
+++ b/changelog.d/6231.misc
@@ -0,0 +1 @@
+Refactor storage layer in preparation to support having multiple databases.
-- 
cgit 1.4.1


From b2945d26727520378f1f80bf96eed24638c360cf Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 22 Oct 2019 13:52:25 +0100
Subject: Fix demo script on ipv6-supported boxes (#6229)

The synapse demo was a bit flakey in terms of supporting federation. It turns out that if your computer resolved `localhost` to `::1` instead of `127.0.0.1`, the built-in federation blacklist specified in `start.sh` would still block it, since it contained an entry for `::/127`. Removing this no longer prevents Synapse from contacting `::1`, federation works again on these boxes.
---
 changelog.d/6229.bugfix | 1 +
 demo/start.sh           | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6229.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6229.bugfix b/changelog.d/6229.bugfix
new file mode 100644
index 0000000000..bced3304d0
--- /dev/null
+++ b/changelog.d/6229.bugfix
@@ -0,0 +1 @@
+Prevent the demo Synapse's from blacklisting `::1`.
\ No newline at end of file
diff --git a/demo/start.sh b/demo/start.sh
index eccaa2abeb..83396e5c33 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -77,14 +77,13 @@ for port in 8080 8081 8082; do
 
         # Reduce the blacklist
         blacklist=$(cat <<-BLACK
-		# Set the blacklist so that it doesn't include 127.0.0.1
+		# Set the blacklist so that it doesn't include 127.0.0.1, ::1
 		federation_ip_range_blacklist:
 		  - '10.0.0.0/8'
 		  - '172.16.0.0/12'
 		  - '192.168.0.0/16'
 		  - '100.64.0.0/10'
 		  - '169.254.0.0/16'
-		  - '::1/128'
 		  - 'fe80::/64'
 		  - 'fc00::/7'
 		BLACK
-- 
cgit 1.4.1


From 056383953529132d8df3b352a05106c105bfb917 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Tue, 22 Oct 2019 21:51:01 -0400
Subject: add news file

---
 changelog.d/5727.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5727.feature

(limited to 'changelog.d')

diff --git a/changelog.d/5727.feature b/changelog.d/5727.feature
new file mode 100644
index 0000000000..819bebf2d7
--- /dev/null
+++ b/changelog.d/5727.feature
@@ -0,0 +1 @@
+Add federation support for cross-signing.
-- 
cgit 1.4.1


From 409c62b27bca5df1c1f147e85ac1432376054d1c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 23 Oct 2019 13:22:54 +0100
Subject: Add config linting script that checks for bool casing (#6203)

Add a linting script that enforces all boolean values in the default config be lowercase.

This has annoyed me for a while so I decided to fix it.
---
 changelog.d/6203.misc            |  1 +
 docs/sample_config.yaml          | 30 +++++++++++++++---------------
 scripts-dev/config-lint.sh       |  9 +++++++++
 scripts-dev/lint.sh              |  1 +
 synapse/config/appservice.py     |  2 +-
 synapse/config/consent_config.py |  4 ++--
 synapse/config/emailconfig.py    |  4 ++--
 synapse/config/metrics.py        |  2 +-
 synapse/config/registration.py   |  2 +-
 synapse/config/saml2_config.py   |  2 +-
 synapse/config/server.py         | 10 +++++-----
 synapse/config/tls.py            |  9 ++++++++-
 synapse/config/voip.py           |  2 +-
 tox.ini                          |  1 +
 14 files changed, 49 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6203.misc
 create mode 100755 scripts-dev/config-lint.sh

(limited to 'changelog.d')

diff --git a/changelog.d/6203.misc b/changelog.d/6203.misc
new file mode 100644
index 0000000000..c1d8276d45
--- /dev/null
+++ b/changelog.d/6203.misc
@@ -0,0 +1 @@
+Enforce that all boolean configuration values are lowercase in CI.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 8226978ba6..b4dd146f06 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -86,7 +86,7 @@ pid_file: DATADIR/homeserver.pid
 # Whether room invites to users on this server should be blocked
 # (except those sent by local server admins). The default is False.
 #
-#block_non_admin_invites: True
+#block_non_admin_invites: true
 
 # Room searching
 #
@@ -239,7 +239,7 @@ listeners:
 
 # Global blocking
 #
-#hs_disabled: False
+#hs_disabled: false
 #hs_disabled_message: 'Human readable reason for why the HS is blocked'
 #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
 
@@ -261,7 +261,7 @@ listeners:
 # sign up in a short space of time never to return after their initial
 # session.
 #
-#limit_usage_by_mau: False
+#limit_usage_by_mau: false
 #max_mau_value: 50
 #mau_trial_days: 2
 
@@ -269,7 +269,7 @@ listeners:
 # be populated, however no one will be limited. If limit_usage_by_mau
 # is true, this is implied to be true.
 #
-#mau_stats_only: False
+#mau_stats_only: false
 
 # Sometimes the server admin will want to ensure certain accounts are
 # never blocked by mau checking. These accounts are specified here.
@@ -294,7 +294,7 @@ listeners:
 #
 # Uncomment the below lines to enable:
 #limit_remote_rooms:
-#  enabled: True
+#  enabled: true
 #  complexity: 1.0
 #  complexity_error: "This room is too complex."
 
@@ -411,7 +411,7 @@ acme:
     # ACME support is disabled by default. Set this to `true` and uncomment
     # tls_certificate_path and tls_private_key_path above to enable it.
     #
-    enabled: False
+    enabled: false
 
     # Endpoint to use to request certificates. If you only want to test,
     # use Let's Encrypt's staging url:
@@ -786,7 +786,7 @@ uploads_path: "DATADIR/uploads"
 # connect to arbitrary endpoints without having first signed up for a
 # valid account (e.g. by passing a CAPTCHA).
 #
-#turn_allow_guests: True
+#turn_allow_guests: true
 
 
 ## Registration ##
@@ -829,7 +829,7 @@ uploads_path: "DATADIR/uploads"
 # where d is equal to 10% of the validity period.
 #
 #account_validity:
-#  enabled: True
+#  enabled: true
 #  period: 6w
 #  renew_at: 1w
 #  renew_email_subject: "Renew your %(app)s account"
@@ -971,7 +971,7 @@ account_threepid_delegates:
 
 # Enable collection and rendering of performance metrics
 #
-#enable_metrics: False
+#enable_metrics: false
 
 # Enable sentry integration
 # NOTE: While attempts are made to ensure that the logs don't contain
@@ -1023,7 +1023,7 @@ metrics_flags:
 # Uncomment to enable tracking of application service IP addresses. Implicitly
 # enables MAU tracking for application service users.
 #
-#track_appservice_user_ips: True
+#track_appservice_user_ips: true
 
 
 # a secret which is used to sign access tokens. If none is specified,
@@ -1149,7 +1149,7 @@ saml2_config:
   #      - url: https://our_idp/metadata.xml
   #
   #    # By default, the user has to go to our login page first. If you'd like
-  #    # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
+  #    # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
   #    # 'service.sp' section:
   #    #
   #    #service:
@@ -1263,13 +1263,13 @@ password_config:
 #   smtp_port: 25 # SSL: 465, STARTTLS: 587
 #   smtp_user: "exampleusername"
 #   smtp_pass: "examplepassword"
-#   require_transport_security: False
+#   require_transport_security: false
 #   notif_from: "Your Friendly %(app)s Home Server "
 #   app_name: Matrix
 #
 #   # Enable email notifications by default
 #   #
-#   notif_for_new_users: True
+#   notif_for_new_users: true
 #
 #   # Defining a custom URL for Riot is only needed if email notifications
 #   # should contain links to a self-hosted installation of Riot; when set
@@ -1447,11 +1447,11 @@ password_config:
 #    body: >-
 #      To continue using this homeserver you must review and agree to the
 #      terms and conditions at %(consent_uri)s
-#  send_server_notice_to_guests: True
+#  send_server_notice_to_guests: true
 #  block_events_error: >-
 #    To continue using this homeserver you must review and agree to the
 #    terms and conditions at %(consent_uri)s
-#  require_at_registration: False
+#  require_at_registration: false
 #  policy_name: Privacy Policy
 #
 
diff --git a/scripts-dev/config-lint.sh b/scripts-dev/config-lint.sh
new file mode 100755
index 0000000000..677a854c85
--- /dev/null
+++ b/scripts-dev/config-lint.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Find linting errors in Synapse's default config file.
+# Exits with 0 if there are no problems, or another code otherwise.
+
+# Fix non-lowercase true/false values
+sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
+
+# Check if anything changed
+git diff --exit-code docs/sample_config.yaml
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index ebb4d69f86..02a2ca39e5 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -10,3 +10,4 @@ set -e
 isort -y -rc synapse tests scripts-dev scripts
 flake8 synapse tests
 python3 -m black synapse tests scripts-dev scripts
+./scripts-dev/config-lint.sh
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 9b4682222d..e77d3387ff 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -48,7 +48,7 @@ class AppServiceConfig(Config):
         # Uncomment to enable tracking of application service IP addresses. Implicitly
         # enables MAU tracking for application service users.
         #
-        #track_appservice_user_ips: True
+        #track_appservice_user_ips: true
         """
 
 
diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py
index 62c4c44d60..aec9c4bbce 100644
--- a/synapse/config/consent_config.py
+++ b/synapse/config/consent_config.py
@@ -62,11 +62,11 @@ DEFAULT_CONFIG = """\
 #    body: >-
 #      To continue using this homeserver you must review and agree to the
 #      terms and conditions at %(consent_uri)s
-#  send_server_notice_to_guests: True
+#  send_server_notice_to_guests: true
 #  block_events_error: >-
 #    To continue using this homeserver you must review and agree to the
 #    terms and conditions at %(consent_uri)s
-#  require_at_registration: False
+#  require_at_registration: false
 #  policy_name: Privacy Policy
 #
 """
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 658897a77e..39e7a1dddb 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -304,13 +304,13 @@ class EmailConfig(Config):
         #   smtp_port: 25 # SSL: 465, STARTTLS: 587
         #   smtp_user: "exampleusername"
         #   smtp_pass: "examplepassword"
-        #   require_transport_security: False
+        #   require_transport_security: false
         #   notif_from: "Your Friendly %(app)s Home Server "
         #   app_name: Matrix
         #
         #   # Enable email notifications by default
         #   #
-        #   notif_for_new_users: True
+        #   notif_for_new_users: true
         #
         #   # Defining a custom URL for Riot is only needed if email notifications
         #   # should contain links to a self-hosted installation of Riot; when set
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 282a43bddb..22538153e1 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -70,7 +70,7 @@ class MetricsConfig(Config):
 
         # Enable collection and rendering of performance metrics
         #
-        #enable_metrics: False
+        #enable_metrics: false
 
         # Enable sentry integration
         # NOTE: While attempts are made to ensure that the logs don't contain
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index b3e3e6dda2..ab41623b2b 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -180,7 +180,7 @@ class RegistrationConfig(Config):
         # where d is equal to 10%% of the validity period.
         #
         #account_validity:
-        #  enabled: True
+        #  enabled: true
         #  period: 6w
         #  renew_at: 1w
         #  renew_email_subject: "Renew your %%(app)s account"
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index c407e13680..c5ea2d43a1 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -176,7 +176,7 @@ class SAML2Config(Config):
           #      - url: https://our_idp/metadata.xml
           #
           #    # By default, the user has to go to our login page first. If you'd like
-          #    # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
+          #    # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
           #    # 'service.sp' section:
           #    #
           #    #service:
diff --git a/synapse/config/server.py b/synapse/config/server.py
index afc4d6a4ab..c942841578 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -532,7 +532,7 @@ class ServerConfig(Config):
         # Whether room invites to users on this server should be blocked
         # (except those sent by local server admins). The default is False.
         #
-        #block_non_admin_invites: True
+        #block_non_admin_invites: true
 
         # Room searching
         #
@@ -673,7 +673,7 @@ class ServerConfig(Config):
 
         # Global blocking
         #
-        #hs_disabled: False
+        #hs_disabled: false
         #hs_disabled_message: 'Human readable reason for why the HS is blocked'
         #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
 
@@ -695,7 +695,7 @@ class ServerConfig(Config):
         # sign up in a short space of time never to return after their initial
         # session.
         #
-        #limit_usage_by_mau: False
+        #limit_usage_by_mau: false
         #max_mau_value: 50
         #mau_trial_days: 2
 
@@ -703,7 +703,7 @@ class ServerConfig(Config):
         # be populated, however no one will be limited. If limit_usage_by_mau
         # is true, this is implied to be true.
         #
-        #mau_stats_only: False
+        #mau_stats_only: false
 
         # Sometimes the server admin will want to ensure certain accounts are
         # never blocked by mau checking. These accounts are specified here.
@@ -728,7 +728,7 @@ class ServerConfig(Config):
         #
         # Uncomment the below lines to enable:
         #limit_remote_rooms:
-        #  enabled: True
+        #  enabled: true
         #  complexity: 1.0
         #  complexity_error: "This room is too complex."
 
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index f06341eb67..2e9e478a2a 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -289,6 +289,9 @@ class TlsConfig(Config):
             "http://localhost:8009/.well-known/acme-challenge"
         )
 
+        # flake8 doesn't recognise that variables are used in the below string
+        _ = tls_enabled, proxypassline, acme_enabled, default_acme_account_file
+
         return (
             """\
         ## TLS ##
@@ -451,7 +454,11 @@ class TlsConfig(Config):
         #tls_fingerprints: [{"sha256": ""}]
 
         """
-            % locals()
+            # Lowercase the string representation of boolean values
+            % {
+                x[0]: str(x[1]).lower() if isinstance(x[1], bool) else x[1]
+                for x in locals().items()
+            }
         )
 
     def read_tls_certificate(self):
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index a68a3068aa..b313bff140 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -56,5 +56,5 @@ class VoipConfig(Config):
         # connect to arbitrary endpoints without having first signed up for a
         # valid account (e.g. by passing a CAPTCHA).
         #
-        #turn_allow_guests: True
+        #turn_allow_guests: true
         """
diff --git a/tox.ini b/tox.ini
index 7ba6f6339f..3cd2c5e633 100644
--- a/tox.ini
+++ b/tox.ini
@@ -118,6 +118,7 @@ deps =
 commands =
     python -m black --check --diff .
     /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
+    {toxinidir}/scripts-dev/config-lint.sh
 
 [testenv:check_isort]
 skip_install = True
-- 
cgit 1.4.1


From c97ed64db3d99680819ec4dcd88ea76f3d0c7537 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 23 Oct 2019 15:31:59 +0100
Subject: Make synapse_port_db correctly create indexes (#6102)

Make `synapse_port_db` correctly create indexes in the PostgreSQL database, by having it run the background updates on the database before migrating the data.

To ensure we're migrating the right data, also block the port if the SQLite3 database still has pending or ongoing background updates.

Fixes #4877
---
 changelog.d/6102.bugfix |   1 +
 scripts/synapse_port_db | 182 ++++++++++++++++++++++++++++++++++--------------
 2 files changed, 131 insertions(+), 52 deletions(-)
 create mode 100644 changelog.d/6102.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6102.bugfix b/changelog.d/6102.bugfix
new file mode 100644
index 0000000000..cd288c2a44
--- /dev/null
+++ b/changelog.d/6102.bugfix
@@ -0,0 +1 @@
+Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 3f942abdb6..5a34d6f2f5 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
 # Copyright 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -29,9 +30,23 @@ import yaml
 from twisted.enterprise import adbapi
 from twisted.internet import defer, reactor
 
-from synapse.storage._base import LoggingTransaction, SQLBaseStore
+from synapse.config.homeserver import HomeServerConfig
+from synapse.logging.context import PreserveLoggingContext
+from synapse.storage._base import LoggingTransaction
+from synapse.storage.client_ips import ClientIpBackgroundUpdateStore
+from synapse.storage.deviceinbox import DeviceInboxBackgroundUpdateStore
+from synapse.storage.devices import DeviceBackgroundUpdateStore
 from synapse.storage.engines import create_engine
+from synapse.storage.events_bg_updates import EventsBackgroundUpdatesStore
+from synapse.storage.media_repository import MediaRepositoryBackgroundUpdateStore
 from synapse.storage.prepare_database import prepare_database
+from synapse.storage.registration import RegistrationBackgroundUpdateStore
+from synapse.storage.roommember import RoomMemberBackgroundUpdateStore
+from synapse.storage.search import SearchBackgroundUpdateStore
+from synapse.storage.state import StateBackgroundUpdateStore
+from synapse.storage.stats import StatsStore
+from synapse.storage.user_directory import UserDirectoryBackgroundUpdateStore
+from synapse.util import Clock
 
 logger = logging.getLogger("synapse_port_db")
 
@@ -98,33 +113,24 @@ APPEND_ONLY_TABLES = [
 end_error_exec_info = None
 
 
-class Store(object):
-    """This object is used to pull out some of the convenience API from the
-    Storage layer.
-
-    *All* database interactions should go through this object.
-    """
-
-    def __init__(self, db_pool, engine):
-        self.db_pool = db_pool
-        self.database_engine = engine
-
-    _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
-    _simple_insert = SQLBaseStore.__dict__["_simple_insert"]
-
-    _simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
-    _simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
-    _simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
-    _simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
-    _simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
-    _simple_select_one_onecol_txn = SQLBaseStore.__dict__[
-        "_simple_select_one_onecol_txn"
-    ]
-
-    _simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
-    _simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
-    _simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
+class Store(
+    ClientIpBackgroundUpdateStore,
+    DeviceInboxBackgroundUpdateStore,
+    DeviceBackgroundUpdateStore,
+    EventsBackgroundUpdatesStore,
+    MediaRepositoryBackgroundUpdateStore,
+    RegistrationBackgroundUpdateStore,
+    RoomMemberBackgroundUpdateStore,
+    SearchBackgroundUpdateStore,
+    StateBackgroundUpdateStore,
+    UserDirectoryBackgroundUpdateStore,
+    StatsStore,
+):
+    def __init__(self, db_conn, hs):
+        super().__init__(db_conn, hs)
+        self.db_pool = hs.get_db_pool()
 
+    @defer.inlineCallbacks
     def runInteraction(self, desc, func, *args, **kwargs):
         def r(conn):
             try:
@@ -150,7 +156,8 @@ class Store(object):
                 logger.debug("[TXN FAIL] {%s} %s", desc, e)
                 raise
 
-        return self.db_pool.runWithConnection(r)
+        with PreserveLoggingContext():
+            return (yield self.db_pool.runWithConnection(r))
 
     def execute(self, f, *args, **kwargs):
         return self.runInteraction(f.__name__, f, *args, **kwargs)
@@ -176,6 +183,25 @@ class Store(object):
             raise
 
 
+class MockHomeserver:
+    def __init__(self, config, database_engine, db_conn, db_pool):
+        self.database_engine = database_engine
+        self.db_conn = db_conn
+        self.db_pool = db_pool
+        self.clock = Clock(reactor)
+        self.config = config
+        self.hostname = config.server_name
+
+    def get_db_conn(self):
+        return self.db_conn
+
+    def get_db_pool(self):
+        return self.db_pool
+
+    def get_clock(self):
+        return self.clock
+
+
 class Porter(object):
     def __init__(self, **kwargs):
         self.__dict__.update(kwargs)
@@ -447,31 +473,75 @@ class Porter(object):
 
         db_conn.commit()
 
+        return db_conn
+
     @defer.inlineCallbacks
-    def run(self):
-        try:
-            sqlite_db_pool = adbapi.ConnectionPool(
-                self.sqlite_config["name"], **self.sqlite_config["args"]
-            )
+    def build_db_store(self, config):
+        """Builds and returns a database store using the provided configuration.
 
-            postgres_db_pool = adbapi.ConnectionPool(
-                self.postgres_config["name"], **self.postgres_config["args"]
-            )
+        Args:
+            config: The database configuration, i.e. a dict following the structure of
+                the "database" section of Synapse's configuration file.
+
+        Returns:
+            The built Store object.
+        """
+        engine = create_engine(config)
+
+        self.progress.set_state("Preparing %s" % config["name"])
+        conn = self.setup_db(config, engine)
+
+        db_pool = adbapi.ConnectionPool(
+            config["name"], **config["args"]
+        )
+
+        hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
+
+        store = Store(conn, hs)
+
+        yield store.runInteraction(
+            "%s_engine.check_database" % config["name"],
+            engine.check_database,
+        )
 
-            sqlite_engine = create_engine(sqlite_config)
-            postgres_engine = create_engine(postgres_config)
+        return store
 
-            self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
-            self.postgres_store = Store(postgres_db_pool, postgres_engine)
+    @defer.inlineCallbacks
+    def run_background_updates_on_postgres(self):
+        # Manually apply all background updates on the PostgreSQL database.
+        postgres_ready = yield self.postgres_store.has_completed_background_updates()
+
+        if not postgres_ready:
+            # Only say that we're running background updates when there are background
+            # updates to run.
+            self.progress.set_state("Running background updates on PostgreSQL")
+
+        while not postgres_ready:
+            yield self.postgres_store.do_next_background_update(100)
+            postgres_ready = yield (
+                self.postgres_store.has_completed_background_updates()
+            )
 
-            yield self.postgres_store.execute(postgres_engine.check_database)
+    @defer.inlineCallbacks
+    def run(self):
+        try:
+            self.sqlite_store = yield self.build_db_store(self.sqlite_config)
+
+            # Check if all background updates are done, abort if not.
+            updates_complete = yield self.sqlite_store.has_completed_background_updates()
+            if not updates_complete:
+                sys.stderr.write(
+                    "Pending background updates exist in the SQLite3 database."
+                    " Please start Synapse again and wait until every update has finished"
+                    " before running this script.\n"
+                )
+                defer.returnValue(None)
 
-            # Step 1. Set up databases.
-            self.progress.set_state("Preparing SQLite3")
-            self.setup_db(sqlite_config, sqlite_engine)
+            self.postgres_store = yield self.build_db_store(
+                self.hs_config.database_config
+            )
 
-            self.progress.set_state("Preparing PostgreSQL")
-            self.setup_db(postgres_config, postgres_engine)
+            yield self.run_background_updates_on_postgres()
 
             self.progress.set_state("Creating port tables")
 
@@ -563,6 +633,8 @@ class Porter(object):
         def conv(j, col):
             if j in bool_cols:
                 return bool(col)
+            if isinstance(col, bytes):
+                return bytearray(col)
             elif isinstance(col, string_types) and "\0" in col:
                 logger.warn(
                     "DROPPING ROW: NUL value in table %s col %s: %r",
@@ -926,18 +998,24 @@ if __name__ == "__main__":
         },
     }
 
-    postgres_config = yaml.safe_load(args.postgres_config)
+    hs_config = yaml.safe_load(args.postgres_config)
 
-    if "database" in postgres_config:
-        postgres_config = postgres_config["database"]
+    if "database" not in hs_config:
+        sys.stderr.write("The configuration file must have a 'database' section.\n")
+        sys.exit(4)
+
+    postgres_config = hs_config["database"]
 
     if "name" not in postgres_config:
-        sys.stderr.write("Malformed database config: no 'name'")
+        sys.stderr.write("Malformed database config: no 'name'\n")
         sys.exit(2)
     if postgres_config["name"] != "psycopg2":
-        sys.stderr.write("Database must use 'psycopg2' connector.")
+        sys.stderr.write("Database must use the 'psycopg2' connector.\n")
         sys.exit(3)
 
+    config = HomeServerConfig()
+    config.parse_config_dict(hs_config, "", "")
+
     def start(stdscr=None):
         if stdscr:
             progress = CursesProgress(stdscr)
@@ -946,9 +1024,9 @@ if __name__ == "__main__":
 
         porter = Porter(
             sqlite_config=sqlite_config,
-            postgres_config=postgres_config,
             progress=progress,
             batch_size=args.batch_size,
+            hs_config=config,
         )
 
         reactor.callWhenRunning(porter.run)
-- 
cgit 1.4.1


From 2f859e865a7112c1d2a329bae20cb47d89813334 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 23 Oct 2019 15:56:50 +0100
Subject: Changelog

---
 changelog.d/6243.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6243.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6243.bugfix b/changelog.d/6243.bugfix
new file mode 100644
index 0000000000..cd288c2a44
--- /dev/null
+++ b/changelog.d/6243.bugfix
@@ -0,0 +1 @@
+Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
-- 
cgit 1.4.1


From 9fb96889a46aadb909086dfed1ca4577a051128d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 23 Oct 2019 12:05:30 +0100
Subject: Newsfile

---
 changelog.d/6240.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6240.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6240.misc b/changelog.d/6240.misc
new file mode 100644
index 0000000000..0b3d7a14a1
--- /dev/null
+++ b/changelog.d/6240.misc
@@ -0,0 +1 @@
+Move `persist_events` out from main data store.
-- 
cgit 1.4.1


From 92e88a71d31e9a1424acd8a3a8cf9ee02b4af9ea Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 23 Oct 2019 16:49:05 +0100
Subject: Cleanup extra quotes from IDEs (#6236)

---
 changelog.d/6236.misc                 | 1 +
 contrib/experiments/test_messaging.py | 2 +-
 contrib/graph/graph2.py               | 4 ++--
 synapse/event_auth.py                 | 3 +--
 4 files changed, 5 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6236.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6236.misc b/changelog.d/6236.misc
new file mode 100644
index 0000000000..686e7a8721
--- /dev/null
+++ b/changelog.d/6236.misc
@@ -0,0 +1 @@
+Remove some extra quotation marks across the codebase.
\ No newline at end of file
diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py
index 5ef140ae48..6b22400a60 100644
--- a/contrib/experiments/test_messaging.py
+++ b/contrib/experiments/test_messaging.py
@@ -339,7 +339,7 @@ def main(stdscr):
     root_logger = logging.getLogger()
 
     formatter = logging.Formatter(
-        "%(asctime)s - %(name)s - %(lineno)d - " "%(levelname)s - %(message)s"
+        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
     )
     if not os.path.exists("logs"):
         os.makedirs("logs")
diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py
index 9db8725eee..4619f0e3c1 100644
--- a/contrib/graph/graph2.py
+++ b/contrib/graph/graph2.py
@@ -36,7 +36,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
     args = [room_id]
 
     if limit:
-        sql += " ORDER BY topological_ordering DESC, stream_ordering DESC " "LIMIT ?"
+        sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
 
         args.append(limit)
 
@@ -53,7 +53,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
 
     for event in events:
         c = conn.execute(
-            "SELECT state_group FROM event_to_state_groups " "WHERE event_id = ?",
+            "SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
             (event.event_id,),
         )
 
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 4e91df60e6..e7b722547b 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -493,8 +493,7 @@ def _check_power_levels(event, auth_events):
         new_level_too_big = new_level is not None and new_level > user_level
         if old_level_too_big or new_level_too_big:
             raise AuthError(
-                403,
-                "You don't have permission to add ops level greater " "than your own",
+                403, "You don't have permission to add ops level greater than your own"
             )
 
 
-- 
cgit 1.4.1


From 2794b79052f96b8103ce2b710959853313a82e90 Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Thu, 24 Oct 2019 11:48:46 +0100
Subject: Option to suppress resource exceeded alerting (#6173)

The expected use case is to suppress MAU limiting on small instances
---
 changelog.d/6173.feature                           |   1 +
 docs/sample_config.yaml                            |   8 +-
 synapse/api/auth.py                                |  12 ++-
 synapse/api/constants.py                           |   7 ++
 synapse/config/server.py                           |  10 +-
 .../resource_limits_server_notices.py              | 110 ++++++++++++++-------
 .../test_resource_limits_server_notices.py         |  59 ++++++++++-
 tests/utils.py                                     |   1 -
 8 files changed, 161 insertions(+), 47 deletions(-)
 create mode 100644 changelog.d/6173.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6173.feature b/changelog.d/6173.feature
new file mode 100644
index 0000000000..b1cabc322b
--- /dev/null
+++ b/changelog.d/6173.feature
@@ -0,0 +1 @@
+Add config option to suppress client side resource limit alerting.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index b4dd146f06..6c81c0db75 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -241,7 +241,6 @@ listeners:
 #
 #hs_disabled: false
 #hs_disabled_message: 'Human readable reason for why the HS is blocked'
-#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
 
 # Monthly Active User Blocking
 #
@@ -261,9 +260,16 @@ listeners:
 # sign up in a short space of time never to return after their initial
 # session.
 #
+# 'mau_limit_alerting' is a means of limiting client side alerting
+# should the mau limit be reached. This is useful for small instances
+# where the admin has 5 mau seats (say) for 5 specific people and no
+# interest increasing the mau limit further. Defaults to True, which
+# means that alerting is enabled
+#
 #limit_usage_by_mau: false
 #max_mau_value: 50
 #mau_trial_days: 2
+#mau_limit_alerting: false
 
 # If enabled, the metrics for the number of monthly active users will
 # be populated, however no one will be limited. If limit_usage_by_mau
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index cd347fbe1b..53f3bb0fa8 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -25,7 +25,13 @@ from twisted.internet import defer
 import synapse.logging.opentracing as opentracing
 import synapse.types
 from synapse import event_auth
-from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes
+from synapse.api.constants import (
+    EventTypes,
+    JoinRules,
+    LimitBlockingTypes,
+    Membership,
+    UserTypes,
+)
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -726,7 +732,7 @@ class Auth(object):
                 self.hs.config.hs_disabled_message,
                 errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
                 admin_contact=self.hs.config.admin_contact,
-                limit_type=self.hs.config.hs_disabled_limit_type,
+                limit_type=LimitBlockingTypes.HS_DISABLED,
             )
         if self.hs.config.limit_usage_by_mau is True:
             assert not (user_id and threepid)
@@ -759,5 +765,5 @@ class Auth(object):
                     "Monthly Active User Limit Exceeded",
                     admin_contact=self.hs.config.admin_contact,
                     errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
-                    limit_type="monthly_active_user",
+                    limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
                 )
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 60e99e4663..312196675e 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -131,3 +131,10 @@ class RelationTypes(object):
     ANNOTATION = "m.annotation"
     REPLACE = "m.replace"
     REFERENCE = "m.reference"
+
+
+class LimitBlockingTypes(object):
+    """Reasons that a server may be blocked"""
+
+    MONTHLY_ACTIVE_USER = "monthly_active_user"
+    HS_DISABLED = "hs_disabled"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index c942841578..d556df308d 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -171,6 +171,7 @@ class ServerConfig(Config):
         )
 
         self.mau_trial_days = config.get("mau_trial_days", 0)
+        self.mau_limit_alerting = config.get("mau_limit_alerting", True)
 
         # How long to keep redacted events in the database in unredacted form
         # before redacting them.
@@ -192,7 +193,6 @@ class ServerConfig(Config):
         # Options to disable HS
         self.hs_disabled = config.get("hs_disabled", False)
         self.hs_disabled_message = config.get("hs_disabled_message", "")
-        self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
 
         # Admin uri to direct users at should their instance become blocked
         # due to resource constraints
@@ -675,7 +675,6 @@ class ServerConfig(Config):
         #
         #hs_disabled: false
         #hs_disabled_message: 'Human readable reason for why the HS is blocked'
-        #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
 
         # Monthly Active User Blocking
         #
@@ -695,9 +694,16 @@ class ServerConfig(Config):
         # sign up in a short space of time never to return after their initial
         # session.
         #
+        # 'mau_limit_alerting' is a means of limiting client side alerting
+        # should the mau limit be reached. This is useful for small instances
+        # where the admin has 5 mau seats (say) for 5 specific people and no
+        # interest increasing the mau limit further. Defaults to True, which
+        # means that alerting is enabled
+        #
         #limit_usage_by_mau: false
         #max_mau_value: 50
         #mau_trial_days: 2
+        #mau_limit_alerting: false
 
         # If enabled, the metrics for the number of monthly active users will
         # be populated, however no one will be limited. If limit_usage_by_mau
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 81c4aff496..c0e7f475c9 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import (
     EventTypes,
+    LimitBlockingTypes,
     ServerNoticeLimitReached,
     ServerNoticeMsgType,
 )
@@ -70,7 +71,7 @@ class ResourceLimitsServerNotices(object):
             return
 
         if not self._server_notices_manager.is_enabled():
-            # Don't try and send server notices unles they've been enabled
+            # Don't try and send server notices unless they've been enabled
             return
 
         timestamp = yield self._store.user_last_seen_monthly_active(user_id)
@@ -79,8 +80,6 @@ class ResourceLimitsServerNotices(object):
             # In practice, not sure we can ever get here
             return
 
-        # Determine current state of room
-
         room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
 
         if not room_id:
@@ -88,50 +87,85 @@ class ResourceLimitsServerNotices(object):
             return
 
         yield self._check_and_set_tags(user_id, room_id)
+
+        # Determine current state of room
         currently_blocked, ref_events = yield self._is_room_currently_blocked(room_id)
 
+        limit_msg = None
+        limit_type = None
         try:
-            # Normally should always pass in user_id if you have it, but in
-            # this case are checking what would happen to other users if they
-            # were to arrive.
-            try:
-                yield self._auth.check_auth_blocking()
-                is_auth_blocking = False
-            except ResourceLimitError as e:
-                is_auth_blocking = True
-                event_content = e.msg
-                event_limit_type = e.limit_type
-
-            if currently_blocked and not is_auth_blocking:
-                # Room is notifying of a block, when it ought not to be.
-                # Remove block notification
-                content = {"pinned": ref_events}
-                yield self._server_notices_manager.send_notice(
-                    user_id, content, EventTypes.Pinned, ""
-                )
+            # Normally should always pass in user_id to check_auth_blocking
+            # if you have it, but in this case are checking what would happen
+            # to other users if they were to arrive.
+            yield self._auth.check_auth_blocking()
+        except ResourceLimitError as e:
+            limit_msg = e.msg
+            limit_type = e.limit_type
 
-            elif not currently_blocked and is_auth_blocking:
+        try:
+            if (
+                limit_type == LimitBlockingTypes.MONTHLY_ACTIVE_USER
+                and not self._config.mau_limit_alerting
+            ):
+                # We have hit the MAU limit, but MAU alerting is disabled:
+                # reset room if necessary and return
+                if currently_blocked:
+                    self._remove_limit_block_notification(user_id, ref_events)
+                return
+
+            if currently_blocked and not limit_msg:
+                # Room is notifying of a block, when it ought not to be.
+                yield self._remove_limit_block_notification(user_id, ref_events)
+            elif not currently_blocked and limit_msg:
                 # Room is not notifying of a block, when it ought to be.
-                # Add block notification
-                content = {
-                    "body": event_content,
-                    "msgtype": ServerNoticeMsgType,
-                    "server_notice_type": ServerNoticeLimitReached,
-                    "admin_contact": self._config.admin_contact,
-                    "limit_type": event_limit_type,
-                }
-                event = yield self._server_notices_manager.send_notice(
-                    user_id, content, EventTypes.Message
+                yield self._apply_limit_block_notification(
+                    user_id, limit_msg, limit_type
                 )
-
-                content = {"pinned": [event.event_id]}
-                yield self._server_notices_manager.send_notice(
-                    user_id, content, EventTypes.Pinned, ""
-                )
-
         except SynapseError as e:
             logger.error("Error sending resource limits server notice: %s", e)
 
+    @defer.inlineCallbacks
+    def _remove_limit_block_notification(self, user_id, ref_events):
+        """Utility method to remove limit block notifications from the server
+        notices room.
+
+        Args:
+            user_id (str): user to notify
+            ref_events (list[str]): The event_ids of pinned events that are unrelated to
+            limit blocking and need to be preserved.
+        """
+        content = {"pinned": ref_events}
+        yield self._server_notices_manager.send_notice(
+            user_id, content, EventTypes.Pinned, ""
+        )
+
+    @defer.inlineCallbacks
+    def _apply_limit_block_notification(self, user_id, event_body, event_limit_type):
+        """Utility method to apply limit block notifications in the server
+        notices room.
+
+        Args:
+            user_id (str): user to notify
+            event_body(str): The human readable text that describes the block.
+            event_limit_type(str): Specifies the type of block e.g. monthly active user
+            limit has been exceeded.
+        """
+        content = {
+            "body": event_body,
+            "msgtype": ServerNoticeMsgType,
+            "server_notice_type": ServerNoticeLimitReached,
+            "admin_contact": self._config.admin_contact,
+            "limit_type": event_limit_type,
+        }
+        event = yield self._server_notices_manager.send_notice(
+            user_id, content, EventTypes.Message
+        )
+
+        content = {"pinned": [event.event_id]}
+        yield self._server_notices_manager.send_notice(
+            user_id, content, EventTypes.Pinned, ""
+        )
+
     @defer.inlineCallbacks
     def _check_and_set_tags(self, user_id, room_id):
         """
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index cdf89e3383..eb540e34f6 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -17,7 +17,7 @@ from mock import Mock
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes, ServerNoticeMsgType
+from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType
 from synapse.api.errors import ResourceLimitError
 from synapse.server_notices.resource_limits_server_notices import (
     ResourceLimitsServerNotices,
@@ -133,7 +133,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
         self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         # Would be better to check contents, but 2 calls == set blocking event
-        self.assertTrue(self._send_notice.call_count == 2)
+        self.assertEqual(self._send_notice.call_count, 2)
 
     def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
         """
@@ -158,6 +158,61 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
 
         self._send_notice.assert_not_called()
 
+    def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self):
+        """
+        Test that when server is over MAU limit and alerting is suppressed, then
+        an alert message is not sent into the room
+        """
+        self.hs.config.mau_limit_alerting = False
+        self._rlsn._auth.check_auth_blocking = Mock(
+            side_effect=ResourceLimitError(
+                403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
+            )
+        )
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+        self.assertTrue(self._send_notice.call_count == 0)
+
+    def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self):
+        """
+        Test that when a server is disabled, that MAU limit alerting is ignored.
+        """
+        self.hs.config.mau_limit_alerting = False
+        self._rlsn._auth.check_auth_blocking = Mock(
+            side_effect=ResourceLimitError(
+                403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED
+            )
+        )
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+        # Would be better to check contents, but 2 calls == set blocking event
+        self.assertEqual(self._send_notice.call_count, 2)
+
+    def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
+        """
+        When the room is already in a blocked state, test that when alerting
+        is suppressed that the room is returned to an unblocked state.
+        """
+        self.hs.config.mau_limit_alerting = False
+        self._rlsn._auth.check_auth_blocking = Mock(
+            side_effect=ResourceLimitError(
+                403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
+            )
+        )
+        self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock(
+            return_value=defer.succeed((True, []))
+        )
+
+        mock_event = Mock(
+            type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
+        )
+        self._rlsn._store.get_events = Mock(
+            return_value=defer.succeed({"123": mock_event})
+        )
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+
+        self._send_notice.assert_called_once()
+
 
 class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
     def prepare(self, reactor, clock, hs):
diff --git a/tests/utils.py b/tests/utils.py
index 0a64f75d04..8cced4b7e8 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -137,7 +137,6 @@ def default_config(name, parse=False):
         "limit_usage_by_mau": False,
         "hs_disabled": False,
         "hs_disabled_message": "",
-        "hs_disabled_limit_type": "",
         "max_mau_value": 50,
         "mau_trial_days": 0,
         "mau_stats_only": False,
-- 
cgit 1.4.1


From ef8d76be9983a79b1dabf15b7c5116d50c9b0d25 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 24 Oct 2019 12:40:13 +0100
Subject: Changelog

---
 changelog.d/6247.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6247.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6247.bugfix b/changelog.d/6247.bugfix
new file mode 100644
index 0000000000..3122ba0bde
--- /dev/null
+++ b/changelog.d/6247.bugfix
@@ -0,0 +1 @@
+Update list of boolean columns in `synapse_port_db`.
-- 
cgit 1.4.1


From 8831b04a53681c931197809d9010326bdd44ecad Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Thu, 24 Oct 2019 12:23:56 +0100
Subject: 1.5.0rc1

---
 CHANGES.md               | 79 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/1172.misc    |  1 -
 changelog.d/2142.feature |  1 -
 changelog.d/2380.bugfix  |  1 -
 changelog.d/3436.bugfix  |  1 -
 changelog.d/4088.bugfix  |  1 -
 changelog.d/5726.feature |  1 -
 changelog.d/5759.misc    |  4 ---
 changelog.d/5769.feature |  1 -
 changelog.d/5978.misc    |  1 -
 changelog.d/6019.misc    |  1 -
 changelog.d/6077.misc    |  1 -
 changelog.d/6084.misc    |  1 -
 changelog.d/6101.misc    |  1 -
 changelog.d/6102.bugfix  |  1 -
 changelog.d/6108.misc    |  1 -
 changelog.d/6109.bugfix  |  1 -
 changelog.d/6114.feature |  1 -
 changelog.d/6115.misc    |  1 -
 changelog.d/6125.feature |  1 -
 changelog.d/6127.misc    |  1 -
 changelog.d/6137.misc    |  1 -
 changelog.d/6139.misc    |  1 -
 changelog.d/6144.bugfix  |  1 -
 changelog.d/6147.bugfix  |  1 -
 changelog.d/6148.misc    |  1 -
 changelog.d/6150.misc    |  1 -
 changelog.d/6152.misc    |  1 -
 changelog.d/6153.misc    |  1 -
 changelog.d/6154.misc    |  1 -
 changelog.d/6155.bugfix  |  1 -
 changelog.d/6156.misc    |  1 -
 changelog.d/6159.misc    |  1 -
 changelog.d/6160.misc    |  1 -
 changelog.d/6161.bugfix  |  1 -
 changelog.d/6167.misc    |  1 -
 changelog.d/6168.bugfix  |  1 -
 changelog.d/6170.bugfix  |  1 -
 changelog.d/6173.feature |  1 -
 changelog.d/6175.misc    |  1 -
 changelog.d/6178.bugfix  |  1 -
 changelog.d/6179.misc    |  1 -
 changelog.d/6184.misc    |  1 -
 changelog.d/6186.bugfix  |  1 -
 changelog.d/6186.misc    |  1 -
 changelog.d/6187.bugfix  |  1 -
 changelog.d/6189.misc    |  1 -
 changelog.d/6191.misc    |  1 -
 changelog.d/6193.misc    |  1 -
 changelog.d/6195.bugfix  |  1 -
 changelog.d/6196.misc    |  1 -
 changelog.d/6197.docker  |  1 -
 changelog.d/6203.misc    |  1 -
 changelog.d/6212.bugfix  |  1 -
 changelog.d/6214.misc    |  1 -
 changelog.d/6216.bugfix  |  1 -
 changelog.d/6217.misc    |  1 -
 changelog.d/6219.misc    |  1 -
 changelog.d/6229.bugfix  |  1 -
 changelog.d/6231.misc    |  1 -
 changelog.d/6236.misc    |  1 -
 changelog.d/6243.bugfix  |  1 -
 synapse/__init__.py      |  2 +-
 63 files changed, 80 insertions(+), 65 deletions(-)
 delete mode 100644 changelog.d/1172.misc
 delete mode 100644 changelog.d/2142.feature
 delete mode 100644 changelog.d/2380.bugfix
 delete mode 100644 changelog.d/3436.bugfix
 delete mode 100644 changelog.d/4088.bugfix
 delete mode 100644 changelog.d/5726.feature
 delete mode 100644 changelog.d/5759.misc
 delete mode 100644 changelog.d/5769.feature
 delete mode 100644 changelog.d/5978.misc
 delete mode 100644 changelog.d/6019.misc
 delete mode 100644 changelog.d/6077.misc
 delete mode 100644 changelog.d/6084.misc
 delete mode 100644 changelog.d/6101.misc
 delete mode 100644 changelog.d/6102.bugfix
 delete mode 100644 changelog.d/6108.misc
 delete mode 100644 changelog.d/6109.bugfix
 delete mode 100644 changelog.d/6114.feature
 delete mode 100644 changelog.d/6115.misc
 delete mode 100644 changelog.d/6125.feature
 delete mode 100644 changelog.d/6127.misc
 delete mode 100644 changelog.d/6137.misc
 delete mode 100644 changelog.d/6139.misc
 delete mode 100644 changelog.d/6144.bugfix
 delete mode 100644 changelog.d/6147.bugfix
 delete mode 100644 changelog.d/6148.misc
 delete mode 100644 changelog.d/6150.misc
 delete mode 100644 changelog.d/6152.misc
 delete mode 100644 changelog.d/6153.misc
 delete mode 100644 changelog.d/6154.misc
 delete mode 100644 changelog.d/6155.bugfix
 delete mode 100644 changelog.d/6156.misc
 delete mode 100644 changelog.d/6159.misc
 delete mode 100644 changelog.d/6160.misc
 delete mode 100644 changelog.d/6161.bugfix
 delete mode 100644 changelog.d/6167.misc
 delete mode 100644 changelog.d/6168.bugfix
 delete mode 100644 changelog.d/6170.bugfix
 delete mode 100644 changelog.d/6173.feature
 delete mode 100644 changelog.d/6175.misc
 delete mode 100644 changelog.d/6178.bugfix
 delete mode 100644 changelog.d/6179.misc
 delete mode 100644 changelog.d/6184.misc
 delete mode 100644 changelog.d/6186.bugfix
 delete mode 100644 changelog.d/6186.misc
 delete mode 100644 changelog.d/6187.bugfix
 delete mode 100644 changelog.d/6189.misc
 delete mode 100644 changelog.d/6191.misc
 delete mode 100644 changelog.d/6193.misc
 delete mode 100644 changelog.d/6195.bugfix
 delete mode 100644 changelog.d/6196.misc
 delete mode 100644 changelog.d/6197.docker
 delete mode 100644 changelog.d/6203.misc
 delete mode 100644 changelog.d/6212.bugfix
 delete mode 100644 changelog.d/6214.misc
 delete mode 100644 changelog.d/6216.bugfix
 delete mode 100644 changelog.d/6217.misc
 delete mode 100644 changelog.d/6219.misc
 delete mode 100644 changelog.d/6229.bugfix
 delete mode 100644 changelog.d/6231.misc
 delete mode 100644 changelog.d/6236.misc
 delete mode 100644 changelog.d/6243.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index cd23b8112b..482d124833 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,82 @@
+Synapse 1.5.0rc1 (2019-10-24)
+==========================
+
+Features
+--------
+
+- Improve quality of thumbnails for 1-bit/8-bit color palette images. ([\#2142](https://github.com/matrix-org/synapse/issues/2142))
+- Add ability to upload cross-signing signatures. ([\#5726](https://github.com/matrix-org/synapse/issues/5726))
+- Allow uploading of cross-signing keys. ([\#5769](https://github.com/matrix-org/synapse/issues/5769))
+- CAS login now provides a default display name for users if a `displayname_attribute` is set in the configuration file. ([\#6114](https://github.com/matrix-org/synapse/issues/6114))
+- Reject all pending invites for a user during deactivation. ([\#6125](https://github.com/matrix-org/synapse/issues/6125))
+- Add config option to suppress client side resource limit alerting. ([\#6173](https://github.com/matrix-org/synapse/issues/6173))
+
+
+Bugfixes
+--------
+
+- Return an HTTP 404 instead of 400 when requesting a filter by ID that is unknown to the server. Thanks to @krombel for contributing this! ([\#2380](https://github.com/matrix-org/synapse/issues/2380))
+- Fix a problem where users could be invited twice to the same group. ([\#3436](https://github.com/matrix-org/synapse/issues/3436))
+- Added domain validation when including a list of invitees upon room creation. ([\#4088](https://github.com/matrix-org/synapse/issues/4088))
+- Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database. ([\#6102](https://github.com/matrix-org/synapse/issues/6102), [\#6178](https://github.com/matrix-org/synapse/issues/6178), [\#6243](https://github.com/matrix-org/synapse/issues/6243))
+- Fix bug when uploading a large file: Synapse responds with `M_UNKNOWN` while it should be `M_TOO_LARGE` according to spec. Contributed by Anshul Angaria. ([\#6109](https://github.com/matrix-org/synapse/issues/6109))
+- Prevent user push rules being deleted from a room when it is upgraded. ([\#6144](https://github.com/matrix-org/synapse/issues/6144))
+- Don't 500 when trying to exchange a revoked 3PID invite. ([\#6147](https://github.com/matrix-org/synapse/issues/6147))
+- Fix transferring notifications and tags when joining an upgraded room that is new to your server. ([\#6155](https://github.com/matrix-org/synapse/issues/6155))
+- Fix bug where guest account registration can wedge after restart. ([\#6161](https://github.com/matrix-org/synapse/issues/6161))
+- Fix monthly active user reaping where reserved users are specified. ([\#6168](https://github.com/matrix-org/synapse/issues/6168))
+- Fix /federation/v1/state endpoint for recent room versions. ([\#6170](https://github.com/matrix-org/synapse/issues/6170))
+- Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events. ([\#6186](https://github.com/matrix-org/synapse/issues/6186))
+- Fix occasional missed updates in the room and user directories. ([\#6187](https://github.com/matrix-org/synapse/issues/6187))
+- Fix tracing of non-JSON APIs, /media, /key etc. ([\#6195](https://github.com/matrix-org/synapse/issues/6195))
+- Fix bug where presence would not get timed out correctly if a synchrotron worker is used and restarted. ([\#6212](https://github.com/matrix-org/synapse/issues/6212))
+- synapse_port_db: Add 2 additional BOOLEAN_COLUMNS to be able to convert from database schema v56. ([\#6216](https://github.com/matrix-org/synapse/issues/6216))
+- Prevent the demo Synapse's from blacklisting `::1`. ([\#6229](https://github.com/matrix-org/synapse/issues/6229))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix logging getting lost for the docker image. ([\#6197](https://github.com/matrix-org/synapse/issues/6197))
+
+
+Internal Changes
+----------------
+
+- Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. ([\#1172](https://github.com/matrix-org/synapse/issues/1172), [\#6175](https://github.com/matrix-org/synapse/issues/6175), [\#6184](https://github.com/matrix-org/synapse/issues/6184))
+- Allow devices to be marked as hidden, for use by features such as cross-signing.
+  This adds a new field with a default value to the devices field in the database,
+  and so the database upgrade may take a long time depending on how many devices
+  are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
+- Move lookup-related functions from RoomMemberHandler to IdentityHandler. ([\#5978](https://github.com/matrix-org/synapse/issues/5978))
+- Improve performance of the public room list directory. ([\#6019](https://github.com/matrix-org/synapse/issues/6019), [\#6152](https://github.com/matrix-org/synapse/issues/6152), [\#6153](https://github.com/matrix-org/synapse/issues/6153), [\#6154](https://github.com/matrix-org/synapse/issues/6154))
+- Edit header dicts docstrings in SimpleHttpClient to note that `str` or `bytes` can be passed as header keys. ([\#6077](https://github.com/matrix-org/synapse/issues/6077))
+- Add snapcraft packaging information. Contributed by @devec0. ([\#6084](https://github.com/matrix-org/synapse/issues/6084), [\#6191](https://github.com/matrix-org/synapse/issues/6191))
+- Kill off half-implemented password-reset via sms. ([\#6101](https://github.com/matrix-org/synapse/issues/6101))
+- Remove `get_user_by_req` opentracing span and add some tags. ([\#6108](https://github.com/matrix-org/synapse/issues/6108))
+- Drop some unused database tables. ([\#6115](https://github.com/matrix-org/synapse/issues/6115))
+- Add env var to turn on tracking of log context changes. ([\#6127](https://github.com/matrix-org/synapse/issues/6127))
+- Refactor configuration loading to allow better typechecking. ([\#6137](https://github.com/matrix-org/synapse/issues/6137))
+- Log responder when responding to media request. ([\#6139](https://github.com/matrix-org/synapse/issues/6139))
+- Improve performance of `find_next_generated_user_id` DB query. ([\#6148](https://github.com/matrix-org/synapse/issues/6148))
+- Expand type-checking on modules imported by synapse.config. ([\#6150](https://github.com/matrix-org/synapse/issues/6150))
+- Use Postgres ANY for selecting many values. ([\#6156](https://github.com/matrix-org/synapse/issues/6156))
+- Add more caching to `_get_joined_users_from_context` DB query. ([\#6159](https://github.com/matrix-org/synapse/issues/6159))
+- Add some metrics on the federation sender. ([\#6160](https://github.com/matrix-org/synapse/issues/6160))
+- Add some logging to the rooms stats updates, to try to track down a flaky test. ([\#6167](https://github.com/matrix-org/synapse/issues/6167))
+- Remove unused `timeout` parameter from `_get_public_room_list`. ([\#6179](https://github.com/matrix-org/synapse/issues/6179))
+- Reject (accidental) attempts to insert bytes into postgres tables. ([\#6186](https://github.com/matrix-org/synapse/issues/6186))
+- Make `version` optional in body of `PUT /room_keys/version/{version}`, since it's redundant. ([\#6189](https://github.com/matrix-org/synapse/issues/6189))
+- Make storage layer responsible for adding device names to key, rather than the handler. ([\#6193](https://github.com/matrix-org/synapse/issues/6193))
+- Port synapse.rest.admin module to use async/await. ([\#6196](https://github.com/matrix-org/synapse/issues/6196))
+- Enforce that all boolean configuration values are lowercase in CI. ([\#6203](https://github.com/matrix-org/synapse/issues/6203))
+- Remove some unused event-auth code. ([\#6214](https://github.com/matrix-org/synapse/issues/6214))
+- Remove Auth.check method. ([\#6217](https://github.com/matrix-org/synapse/issues/6217))
+- Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo. ([\#6219](https://github.com/matrix-org/synapse/issues/6219))
+- Refactor storage layer in preparation to support having multiple databases. ([\#6231](https://github.com/matrix-org/synapse/issues/6231))
+- Remove some extra quotation marks across the codebase. ([\#6236](https://github.com/matrix-org/synapse/issues/6236))
+
+
 Synapse 1.4.1 (2019-10-18)
 ==========================
 
diff --git a/changelog.d/1172.misc b/changelog.d/1172.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/1172.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/2142.feature b/changelog.d/2142.feature
deleted file mode 100644
index e21e8325e1..0000000000
--- a/changelog.d/2142.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve quality of thumbnails for 1-bit/8-bit color palette images.
diff --git a/changelog.d/2380.bugfix b/changelog.d/2380.bugfix
deleted file mode 100644
index eae3206031..0000000000
--- a/changelog.d/2380.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Return an HTTP 404 instead of 400 when requesting a filter by ID that is unknown to the server. Thanks to @krombel for contributing this!
diff --git a/changelog.d/3436.bugfix b/changelog.d/3436.bugfix
deleted file mode 100644
index 15714a11e0..0000000000
--- a/changelog.d/3436.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a problem where users could be invited twice to the same group.
diff --git a/changelog.d/4088.bugfix b/changelog.d/4088.bugfix
deleted file mode 100644
index 61722b6224..0000000000
--- a/changelog.d/4088.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Added domain validation when including a list of invitees upon room creation.
\ No newline at end of file
diff --git a/changelog.d/5726.feature b/changelog.d/5726.feature
deleted file mode 100644
index d3c669aec0..0000000000
--- a/changelog.d/5726.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add ability to upload cross-signing signatures.
diff --git a/changelog.d/5759.misc b/changelog.d/5759.misc
deleted file mode 100644
index dc7e2c01bf..0000000000
--- a/changelog.d/5759.misc
+++ /dev/null
@@ -1,4 +0,0 @@
-Allow devices to be marked as hidden, for use by features such as cross-signing.
-This adds a new field with a default value to the devices field in the database,
-and so the database upgrade may take a long time depending on how many devices
-are in the database.
diff --git a/changelog.d/5769.feature b/changelog.d/5769.feature
deleted file mode 100644
index bf994ca327..0000000000
--- a/changelog.d/5769.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow uploading of cross-signing keys.
\ No newline at end of file
diff --git a/changelog.d/5978.misc b/changelog.d/5978.misc
deleted file mode 100644
index 6d2b69b11b..0000000000
--- a/changelog.d/5978.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move lookup-related functions from RoomMemberHandler to IdentityHandler.
\ No newline at end of file
diff --git a/changelog.d/6019.misc b/changelog.d/6019.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6019.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6077.misc b/changelog.d/6077.misc
deleted file mode 100644
index 31ac5b97a4..0000000000
--- a/changelog.d/6077.misc
+++ /dev/null
@@ -1 +0,0 @@
-Edit header dicts docstrings in SimpleHttpClient to note that `str` or `bytes` can be passed as header keys.
diff --git a/changelog.d/6084.misc b/changelog.d/6084.misc
deleted file mode 100644
index 3c33701651..0000000000
--- a/changelog.d/6084.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add snapcraft packaging information. Contributed by @devec0.
diff --git a/changelog.d/6101.misc b/changelog.d/6101.misc
deleted file mode 100644
index 9743abb9e9..0000000000
--- a/changelog.d/6101.misc
+++ /dev/null
@@ -1 +0,0 @@
-Kill off half-implemented password-reset via sms.
diff --git a/changelog.d/6102.bugfix b/changelog.d/6102.bugfix
deleted file mode 100644
index cd288c2a44..0000000000
--- a/changelog.d/6102.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
diff --git a/changelog.d/6108.misc b/changelog.d/6108.misc
deleted file mode 100644
index 6c3f9460e9..0000000000
--- a/changelog.d/6108.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove `get_user_by_req` opentracing span and add some tags.
diff --git a/changelog.d/6109.bugfix b/changelog.d/6109.bugfix
deleted file mode 100644
index da7ac1be4e..0000000000
--- a/changelog.d/6109.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug when uploading a large file: Synapse responds with `M_UNKNOWN` while it should be `M_TOO_LARGE` according to spec. Contributed by Anshul Angaria.
diff --git a/changelog.d/6114.feature b/changelog.d/6114.feature
deleted file mode 100644
index a34ab12148..0000000000
--- a/changelog.d/6114.feature
+++ /dev/null
@@ -1 +0,0 @@
-CAS login now provides a default display name for users if a `displayname_attribute` is set in the configuration file.
diff --git a/changelog.d/6115.misc b/changelog.d/6115.misc
deleted file mode 100644
index b19e395a99..0000000000
--- a/changelog.d/6115.misc
+++ /dev/null
@@ -1 +0,0 @@
-Drop some unused database tables.
diff --git a/changelog.d/6125.feature b/changelog.d/6125.feature
deleted file mode 100644
index cbe5f8d3c8..0000000000
--- a/changelog.d/6125.feature
+++ /dev/null
@@ -1 +0,0 @@
-Reject all pending invites for a user during deactivation.
diff --git a/changelog.d/6127.misc b/changelog.d/6127.misc
deleted file mode 100644
index 7bfbcfc252..0000000000
--- a/changelog.d/6127.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add env var to turn on tracking of log context changes.
diff --git a/changelog.d/6137.misc b/changelog.d/6137.misc
deleted file mode 100644
index 92a02e71c3..0000000000
--- a/changelog.d/6137.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor configuration loading to allow better typechecking.
diff --git a/changelog.d/6139.misc b/changelog.d/6139.misc
deleted file mode 100644
index d4b65e7af8..0000000000
--- a/changelog.d/6139.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log responder when responding to media request.
diff --git a/changelog.d/6144.bugfix b/changelog.d/6144.bugfix
deleted file mode 100644
index eee63961e4..0000000000
--- a/changelog.d/6144.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent user push rules being deleted from a room when it is upgraded.
\ No newline at end of file
diff --git a/changelog.d/6147.bugfix b/changelog.d/6147.bugfix
deleted file mode 100644
index b0f936d280..0000000000
--- a/changelog.d/6147.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Don't 500 when trying to exchange a revoked 3PID invite.
diff --git a/changelog.d/6148.misc b/changelog.d/6148.misc
deleted file mode 100644
index 1d5213345c..0000000000
--- a/changelog.d/6148.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of `find_next_generated_user_id` DB query.
diff --git a/changelog.d/6150.misc b/changelog.d/6150.misc
deleted file mode 100644
index a373c091ab..0000000000
--- a/changelog.d/6150.misc
+++ /dev/null
@@ -1 +0,0 @@
-Expand type-checking on modules imported by synapse.config.
diff --git a/changelog.d/6152.misc b/changelog.d/6152.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6152.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6153.misc b/changelog.d/6153.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6153.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6154.misc b/changelog.d/6154.misc
deleted file mode 100644
index dfee73c28f..0000000000
--- a/changelog.d/6154.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of the public room list directory.
diff --git a/changelog.d/6155.bugfix b/changelog.d/6155.bugfix
deleted file mode 100644
index e32c0dce09..0000000000
--- a/changelog.d/6155.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix transferring notifications and tags when joining an upgraded room that is new to your server.
\ No newline at end of file
diff --git a/changelog.d/6156.misc b/changelog.d/6156.misc
deleted file mode 100644
index 49525e9416..0000000000
--- a/changelog.d/6156.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use Postgres ANY for selecting many values.
diff --git a/changelog.d/6159.misc b/changelog.d/6159.misc
deleted file mode 100644
index 06cc163f8b..0000000000
--- a/changelog.d/6159.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add more caching to `_get_joined_users_from_context` DB query.
diff --git a/changelog.d/6160.misc b/changelog.d/6160.misc
deleted file mode 100644
index 3d7cce00e1..0000000000
--- a/changelog.d/6160.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some metrics on the federation sender.
diff --git a/changelog.d/6161.bugfix b/changelog.d/6161.bugfix
deleted file mode 100644
index a0e2adb979..0000000000
--- a/changelog.d/6161.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where guest account registration can wedge after restart.
diff --git a/changelog.d/6167.misc b/changelog.d/6167.misc
deleted file mode 100644
index 32c96b3681..0000000000
--- a/changelog.d/6167.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some logging to the rooms stats updates, to try to track down a flaky test.
diff --git a/changelog.d/6168.bugfix b/changelog.d/6168.bugfix
deleted file mode 100644
index 39e8e9d019..0000000000
--- a/changelog.d/6168.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix monthly active user reaping where reserved users are specified.
diff --git a/changelog.d/6170.bugfix b/changelog.d/6170.bugfix
deleted file mode 100644
index 52f7ea233c..0000000000
--- a/changelog.d/6170.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix /federation/v1/state endpoint for recent room versions.
diff --git a/changelog.d/6173.feature b/changelog.d/6173.feature
deleted file mode 100644
index b1cabc322b..0000000000
--- a/changelog.d/6173.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add config option to suppress client side resource limit alerting.
diff --git a/changelog.d/6175.misc b/changelog.d/6175.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/6175.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/6178.bugfix b/changelog.d/6178.bugfix
deleted file mode 100644
index cd288c2a44..0000000000
--- a/changelog.d/6178.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
diff --git a/changelog.d/6179.misc b/changelog.d/6179.misc
deleted file mode 100644
index 01c4e71ea3..0000000000
--- a/changelog.d/6179.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `timeout` parameter from `_get_public_room_list`.
\ No newline at end of file
diff --git a/changelog.d/6184.misc b/changelog.d/6184.misc
deleted file mode 100644
index 30b3e56082..0000000000
--- a/changelog.d/6184.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this.
\ No newline at end of file
diff --git a/changelog.d/6186.bugfix b/changelog.d/6186.bugfix
deleted file mode 100644
index 199ec69032..0000000000
--- a/changelog.d/6186.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where we were updating censored events as bytes rather than text, occaisonally causing invalid JSON being inserted breaking APIs that attempted to fetch such events.
diff --git a/changelog.d/6186.misc b/changelog.d/6186.misc
deleted file mode 100644
index 5e1314a0ac..0000000000
--- a/changelog.d/6186.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reject (accidental) attempts to insert bytes into postgres tables.
diff --git a/changelog.d/6187.bugfix b/changelog.d/6187.bugfix
deleted file mode 100644
index 6142c5b98d..0000000000
--- a/changelog.d/6187.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix occasional missed updates in the room and user directories.
\ No newline at end of file
diff --git a/changelog.d/6189.misc b/changelog.d/6189.misc
deleted file mode 100644
index a66eb384e6..0000000000
--- a/changelog.d/6189.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make `version` optional in body of `PUT /room_keys/version/{version}`, since it's redundant.
diff --git a/changelog.d/6191.misc b/changelog.d/6191.misc
deleted file mode 100644
index 3c33701651..0000000000
--- a/changelog.d/6191.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add snapcraft packaging information. Contributed by @devec0.
diff --git a/changelog.d/6193.misc b/changelog.d/6193.misc
deleted file mode 100644
index 8e3707f8fd..0000000000
--- a/changelog.d/6193.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make storage layer responsible for adding device names to key, rather than the handler.
diff --git a/changelog.d/6195.bugfix b/changelog.d/6195.bugfix
deleted file mode 100644
index d22935dbcd..0000000000
--- a/changelog.d/6195.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix tracing of non-JSON APIs, /media, /key etc.
diff --git a/changelog.d/6196.misc b/changelog.d/6196.misc
deleted file mode 100644
index 3897b1216f..0000000000
--- a/changelog.d/6196.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port synapse.rest.admin module to use async/await.
diff --git a/changelog.d/6197.docker b/changelog.d/6197.docker
deleted file mode 100644
index 71fb9cbff5..0000000000
--- a/changelog.d/6197.docker
+++ /dev/null
@@ -1 +0,0 @@
-Fix logging getting lost for the docker image.
diff --git a/changelog.d/6203.misc b/changelog.d/6203.misc
deleted file mode 100644
index c1d8276d45..0000000000
--- a/changelog.d/6203.misc
+++ /dev/null
@@ -1 +0,0 @@
-Enforce that all boolean configuration values are lowercase in CI.
diff --git a/changelog.d/6212.bugfix b/changelog.d/6212.bugfix
deleted file mode 100644
index 918755fee0..0000000000
--- a/changelog.d/6212.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where presence would not get timed out correctly if a synchrotron worker is used and restarted.
diff --git a/changelog.d/6214.misc b/changelog.d/6214.misc
deleted file mode 100644
index c3fd04d0d8..0000000000
--- a/changelog.d/6214.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove some unused event-auth code.
diff --git a/changelog.d/6216.bugfix b/changelog.d/6216.bugfix
deleted file mode 100644
index 5784e82d18..0000000000
--- a/changelog.d/6216.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-synapse_port_db: Add 2 additional BOOLEAN_COLUMNS to be able to convert from database schema v56.
diff --git a/changelog.d/6217.misc b/changelog.d/6217.misc
deleted file mode 100644
index 503352ee0b..0000000000
--- a/changelog.d/6217.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove Auth.check method.
diff --git a/changelog.d/6219.misc b/changelog.d/6219.misc
deleted file mode 100644
index 296406246d..0000000000
--- a/changelog.d/6219.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove `format_tap.py` script in favour of a perl reimplementation in Sytest's repo.
\ No newline at end of file
diff --git a/changelog.d/6229.bugfix b/changelog.d/6229.bugfix
deleted file mode 100644
index bced3304d0..0000000000
--- a/changelog.d/6229.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent the demo Synapse's from blacklisting `::1`.
\ No newline at end of file
diff --git a/changelog.d/6231.misc b/changelog.d/6231.misc
deleted file mode 100644
index 89b8297794..0000000000
--- a/changelog.d/6231.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor storage layer in preparation to support having multiple databases.
diff --git a/changelog.d/6236.misc b/changelog.d/6236.misc
deleted file mode 100644
index 686e7a8721..0000000000
--- a/changelog.d/6236.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove some extra quotation marks across the codebase.
\ No newline at end of file
diff --git a/changelog.d/6243.bugfix b/changelog.d/6243.bugfix
deleted file mode 100644
index cd288c2a44..0000000000
--- a/changelog.d/6243.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make the `synapse_port_db` script create the right indexes on a new PostgreSQL database.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index ee3313a41c..bcc2f8c049 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.4.1"
+__version__ = "1.5.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 19be9b703a2e9b71626e4d6c7ef2c34f74285921 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 24 Oct 2019 16:46:39 +0100
Subject: Newsfile

---
 changelog.d/6248.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6248.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6248.misc b/changelog.d/6248.misc
new file mode 100644
index 0000000000..97176bcfc7
--- /dev/null
+++ b/changelog.d/6248.misc
@@ -0,0 +1 @@
+Move schema delta files to the correct data store.
-- 
cgit 1.4.1


From c3cd977fff85be2a2a5cbf604c7eae4f975e0218 Mon Sep 17 00:00:00 2001
From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com>
Date: Thu, 24 Oct 2019 17:58:50 +0100
Subject: Add changelog.d

---
 changelog.d/6250.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6250.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6250.misc b/changelog.d/6250.misc
new file mode 100644
index 0000000000..e0994eca00
--- /dev/null
+++ b/changelog.d/6250.misc
@@ -0,0 +1 @@
+Reduce verbosity of user/room stats
-- 
cgit 1.4.1


From 0d7e9523e5793ec9a6ee5ea03b64f7561860f868 Mon Sep 17 00:00:00 2001
From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com>
Date: Thu, 24 Oct 2019 18:37:55 +0100
Subject: Reduce impact of debug logging

---
 changelog.d/6251.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6251.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6251.misc b/changelog.d/6251.misc
new file mode 100644
index 0000000000..371c6983be
--- /dev/null
+++ b/changelog.d/6251.misc
@@ -0,0 +1 @@
+Reduce impact of debug logging.
-- 
cgit 1.4.1


From 47c02f82e35c8d99219ecb8182a54e7587f7c8ec Mon Sep 17 00:00:00 2001
From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com>
Date: Thu, 24 Oct 2019 18:39:15 +0100
Subject: Add missing '.'

---
 changelog.d/6250.misc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6250.misc b/changelog.d/6250.misc
index e0994eca00..12e3fe66b0 100644
--- a/changelog.d/6250.misc
+++ b/changelog.d/6250.misc
@@ -1 +1 @@
-Reduce verbosity of user/room stats
+Reduce verbosity of user/room stats.
-- 
cgit 1.4.1


From 608947eedfaf7106cce31d0fe0c1685f2663f250 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Thu, 24 Oct 2019 21:33:35 -0400
Subject: add changelog

---
 changelog.d/6253.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6253.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6253.bugfix b/changelog.d/6253.bugfix
new file mode 100644
index 0000000000..266fae381c
--- /dev/null
+++ b/changelog.d/6253.bugfix
@@ -0,0 +1 @@
+Delete keys from key backup when deleting backup versions.
-- 
cgit 1.4.1


From 0417ca1a640ac39848535ff27b96c70f137c3fd9 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Thu, 24 Oct 2019 22:49:55 -0400
Subject: add changelog

---
 changelog.d/6254.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6254.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6254.bugfix b/changelog.d/6254.bugfix
new file mode 100644
index 0000000000..3181484b88
--- /dev/null
+++ b/changelog.d/6254.bugfix
@@ -0,0 +1 @@
+Make notification of cross-signing signatures work with workers.
-- 
cgit 1.4.1


From a52b23d413b9e64c36b519c27b057a41de5d9dd4 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 25 Oct 2019 10:34:10 +0100
Subject: Newsfile

---
 changelog.d/6255.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6255.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6255.misc b/changelog.d/6255.misc
new file mode 100644
index 0000000000..45bc493648
--- /dev/null
+++ b/changelog.d/6255.misc
@@ -0,0 +1 @@
+Small performance improvement by removing repeated config lookups in room stats calculation.
-- 
cgit 1.4.1


From deaa9db5f2999af4355d35d0388b92aab6e82c01 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 25 Oct 2019 11:11:38 +0100
Subject: Newsfile

---
 changelog.d/6256.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6256.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6256.bugfix b/changelog.d/6256.bugfix
new file mode 100644
index 0000000000..4b619f8cf8
--- /dev/null
+++ b/changelog.d/6256.bugfix
@@ -0,0 +1 @@
+Fix /keys/query API on workers.
-- 
cgit 1.4.1


From 9aee28927b22a16ea0699c3f73fbc58121511630 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 28 Oct 2019 12:29:55 +0000
Subject: Convert EventContext to attrs (#6218)

* make EventContext use an attr
---
 changelog.d/6218.misc                     |   1 +
 synapse/events/snapshot.py                | 100 ++++++++++++------------------
 synapse/storage/data_stores/main/state.py |   7 ++-
 3 files changed, 46 insertions(+), 62 deletions(-)
 create mode 100644 changelog.d/6218.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6218.misc b/changelog.d/6218.misc
new file mode 100644
index 0000000000..49d10c36cf
--- /dev/null
+++ b/changelog.d/6218.misc
@@ -0,0 +1 @@
+Convert EventContext to an attrs.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index acbcbeeced..27cd8a63ff 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,9 +12,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 from six import iteritems
 
+import attr
 from frozendict import frozendict
 
 from twisted.internet import defer
@@ -22,7 +22,8 @@ from twisted.internet import defer
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 
 
-class EventContext(object):
+@attr.s(slots=True)
+class EventContext:
     """
     Attributes:
         state_group (int|None): state group id, if the state has been stored
@@ -31,9 +32,6 @@ class EventContext(object):
         rejected (bool|str): A rejection reason if the event was rejected, else
             False
 
-        push_actions (list[(str, list[object])]): list of (user_id, actions)
-            tuples
-
         prev_group (int): Previously persisted state group. ``None`` for an
             outlier.
         delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
@@ -42,6 +40,8 @@ class EventContext(object):
         prev_state_events (?): XXX: is this ever set to anything other than
             the empty list?
 
+        app_service: FIXME
+
         _current_state_ids (dict[(str, str), str]|None):
             The current state map including the current event. None if outlier
             or we haven't fetched the state from DB yet.
@@ -67,49 +67,33 @@ class EventContext(object):
             Only set when state has not been fetched yet.
     """
 
-    __slots__ = [
-        "state_group",
-        "rejected",
-        "prev_group",
-        "delta_ids",
-        "prev_state_events",
-        "app_service",
-        "_current_state_ids",
-        "_prev_state_ids",
-        "_prev_state_id",
-        "_event_type",
-        "_event_state_key",
-        "_fetching_state_deferred",
-    ]
-
-    def __init__(self):
-        self.prev_state_events = []
-        self.rejected = False
-        self.app_service = None
+    state_group = attr.ib(default=None)
+    rejected = attr.ib(default=False)
+    prev_group = attr.ib(default=None)
+    delta_ids = attr.ib(default=None)
+    prev_state_events = attr.ib(default=attr.Factory(list))
+    app_service = attr.ib(default=None)
+
+    _current_state_ids = attr.ib(default=None)
+    _prev_state_ids = attr.ib(default=None)
+    _prev_state_id = attr.ib(default=None)
+
+    _event_type = attr.ib(default=None)
+    _event_state_key = attr.ib(default=None)
+    _fetching_state_deferred = attr.ib(default=None)
 
     @staticmethod
     def with_state(
         state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
     ):
-        context = EventContext()
-
-        # The current state including the current event
-        context._current_state_ids = current_state_ids
-        # The current state excluding the current event
-        context._prev_state_ids = prev_state_ids
-        context.state_group = state_group
-
-        context._prev_state_id = None
-        context._event_type = None
-        context._event_state_key = None
-        context._fetching_state_deferred = defer.succeed(None)
-
-        # A previously persisted state group and a delta between that
-        # and this state.
-        context.prev_group = prev_group
-        context.delta_ids = delta_ids
-
-        return context
+        return EventContext(
+            current_state_ids=current_state_ids,
+            prev_state_ids=prev_state_ids,
+            state_group=state_group,
+            fetching_state_deferred=defer.succeed(None),
+            prev_group=prev_group,
+            delta_ids=delta_ids,
+        )
 
     @defer.inlineCallbacks
     def serialize(self, event, store):
@@ -157,24 +141,18 @@ class EventContext(object):
         Returns:
             EventContext
         """
-        context = EventContext()
-
-        # We use the state_group and prev_state_id stuff to pull the
-        # current_state_ids out of the DB and construct prev_state_ids.
-        context._prev_state_id = input["prev_state_id"]
-        context._event_type = input["event_type"]
-        context._event_state_key = input["event_state_key"]
-
-        context._current_state_ids = None
-        context._prev_state_ids = None
-        context._fetching_state_deferred = None
-
-        context.state_group = input["state_group"]
-        context.prev_group = input["prev_group"]
-        context.delta_ids = _decode_state_dict(input["delta_ids"])
-
-        context.rejected = input["rejected"]
-        context.prev_state_events = input["prev_state_events"]
+        context = EventContext(
+            # We use the state_group and prev_state_id stuff to pull the
+            # current_state_ids out of the DB and construct prev_state_ids.
+            prev_state_id=input["prev_state_id"],
+            event_type=input["event_type"],
+            event_state_key=input["event_state_key"],
+            state_group=input["state_group"],
+            prev_group=input["prev_group"],
+            delta_ids=_decode_state_dict(input["delta_ids"]),
+            rejected=input["rejected"],
+            prev_state_events=input["prev_state_events"],
+        )
 
         app_service_id = input["app_service_id"]
         if app_service_id:
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index d54442e5fa..9b2207075b 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -15,6 +15,7 @@
 
 import logging
 from collections import namedtuple
+from typing import Iterable, Tuple
 
 from six import iteritems, itervalues
 from six.moves import range
@@ -23,6 +24,8 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.api.errors import NotFoundError
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
@@ -1215,7 +1218,9 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
     def __init__(self, db_conn, hs):
         super(StateStore, self).__init__(db_conn, hs)
 
-    def _store_event_state_mappings_txn(self, txn, events_and_contexts):
+    def _store_event_state_mappings_txn(
+        self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
+    ):
         state_groups = {}
         for event, context in events_and_contexts:
             if event.internal_metadata.is_outlier():
-- 
cgit 1.4.1


From 172f264ed38e8bef857552f93114b4ee113a880b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 28 Oct 2019 12:43:23 +0000
Subject: Improve signature checking on some federation APIs (#6262)

Make sure that we check that events sent over /send_join, /send_leave, and
/invite, are correctly signed and come from the expected servers.
---
 changelog.d/6262.bugfix                 |  1 +
 synapse/federation/federation_base.py   |  7 ++-----
 synapse/federation/federation_server.py |  7 +++++++
 synapse/handlers/federation.py          | 20 ++++++++++++++++++--
 4 files changed, 28 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6262.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6262.bugfix b/changelog.d/6262.bugfix
new file mode 100644
index 0000000000..32687f0d2b
--- /dev/null
+++ b/changelog.d/6262.bugfix
@@ -0,0 +1 @@
+Improve signature checking on some federation APIs.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 5a1e23a145..223aace0d9 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -278,9 +278,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
             pdu_to_check.sender_domain,
             e.getErrorMessage(),
         )
-        # XX not really sure if these are the right codes, but they are what
-        # we've done for ages
-        raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+        raise SynapseError(403, errmsg, Codes.FORBIDDEN)
 
     for p, d in zip(pdus_to_check_sender, more_deferreds):
         d.addErrback(sender_err, p)
@@ -314,8 +312,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
                 "event id %s: unable to verify signature for event id domain: %s"
                 % (pdu_to_check.pdu.event_id, e.getErrorMessage())
             )
-            # XX as above: not really sure if these are the right codes
-            raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+            raise SynapseError(403, errmsg, Codes.FORBIDDEN)
 
         for p, d in zip(pdus_to_check_event_id, more_deferreds):
             d.addErrback(event_err, p)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 21e52c9695..5fc7c1d67b 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -370,6 +370,7 @@ class FederationServer(FederationBase):
         pdu = event_from_pdu_json(content, format_ver)
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
+        pdu = yield self._check_sigs_and_hash(room_version, pdu)
         ret_pdu = yield self.handler.on_invite_request(origin, pdu)
         time_now = self._clock.time_msec()
         return {"event": ret_pdu.get_pdu_json(time_now)}
@@ -386,6 +387,9 @@ class FederationServer(FederationBase):
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
 
         logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
+
+        pdu = yield self._check_sigs_and_hash(room_version, pdu)
+
         res_pdus = yield self.handler.on_send_join_request(origin, pdu)
         time_now = self._clock.time_msec()
         return (
@@ -421,6 +425,9 @@ class FederationServer(FederationBase):
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
 
         logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
+
+        pdu = yield self._check_sigs_and_hash(room_version, pdu)
+
         yield self.handler.on_send_leave_request(origin, pdu)
         return 200, {}
 
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 4b4c6c15f9..488058fe68 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1222,7 +1222,6 @@ class FederationHandler(BaseHandler):
         Returns:
             Deferred[FrozenEvent]
         """
-
         if get_domain_from_id(user_id) != origin:
             logger.info(
                 "Got /make_join request for user %r from different origin %s, ignoring",
@@ -1280,11 +1279,20 @@ class FederationHandler(BaseHandler):
         event = pdu
 
         logger.debug(
-            "on_send_join_request: Got event: %s, signatures: %s",
+            "on_send_join_request from %s: Got event: %s, signatures: %s",
+            origin,
             event.event_id,
             event.signatures,
         )
 
+        if get_domain_from_id(event.sender) != origin:
+            logger.info(
+                "Got /send_join request for user %r from different origin %s",
+                event.sender,
+                origin,
+            )
+            raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
         event.internal_metadata.outlier = False
         # Send this event on behalf of the origin server.
         #
@@ -1503,6 +1511,14 @@ class FederationHandler(BaseHandler):
             event.signatures,
         )
 
+        if get_domain_from_id(event.sender) != origin:
+            logger.info(
+                "Got /send_leave request for user %r from different origin %s",
+                event.sender,
+                origin,
+            )
+            raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
         event.internal_metadata.outlier = False
 
         context = yield self._handle_new_event(origin, event)
-- 
cgit 1.4.1


From c482d458221945d56dec1762c27205d229255eb3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 28 Oct 2019 12:48:18 +0000
Subject: 1.5.0rc2

---
 CHANGES.md              | 18 ++++++++++++++++++
 changelog.d/6247.bugfix |  1 -
 changelog.d/6248.misc   |  1 -
 changelog.d/6255.misc   |  1 -
 changelog.d/6256.bugfix |  1 -
 changelog.d/6262.bugfix |  1 -
 synapse/__init__.py     |  2 +-
 7 files changed, 19 insertions(+), 6 deletions(-)
 delete mode 100644 changelog.d/6247.bugfix
 delete mode 100644 changelog.d/6248.misc
 delete mode 100644 changelog.d/6255.misc
 delete mode 100644 changelog.d/6256.bugfix
 delete mode 100644 changelog.d/6262.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index d438c5272a..c59b139eae 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,21 @@
+Synapse 1.5.0rc2 (2019-10-28)
+=============================
+
+Bugfixes
+--------
+
+- Update list of boolean columns in `synapse_port_db`. ([\#6247](https://github.com/matrix-org/synapse/issues/6247))
+- Fix /keys/query API on workers. ([\#6256](https://github.com/matrix-org/synapse/issues/6256))
+- Improve signature checking on some federation APIs. ([\#6262](https://github.com/matrix-org/synapse/issues/6262))
+
+
+Internal Changes
+----------------
+
+- Move schema delta files to the correct data store. ([\#6248](https://github.com/matrix-org/synapse/issues/6248))
+- Small performance improvement by removing repeated config lookups in room stats calculation. ([\#6255](https://github.com/matrix-org/synapse/issues/6255))
+
+
 Synapse 1.5.0rc1 (2019-10-24)
 ==========================
 
diff --git a/changelog.d/6247.bugfix b/changelog.d/6247.bugfix
deleted file mode 100644
index 3122ba0bde..0000000000
--- a/changelog.d/6247.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Update list of boolean columns in `synapse_port_db`.
diff --git a/changelog.d/6248.misc b/changelog.d/6248.misc
deleted file mode 100644
index 97176bcfc7..0000000000
--- a/changelog.d/6248.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move schema delta files to the correct data store.
diff --git a/changelog.d/6255.misc b/changelog.d/6255.misc
deleted file mode 100644
index 45bc493648..0000000000
--- a/changelog.d/6255.misc
+++ /dev/null
@@ -1 +0,0 @@
-Small performance improvement by removing repeated config lookups in room stats calculation.
diff --git a/changelog.d/6256.bugfix b/changelog.d/6256.bugfix
deleted file mode 100644
index 4b619f8cf8..0000000000
--- a/changelog.d/6256.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix /keys/query API on workers.
diff --git a/changelog.d/6262.bugfix b/changelog.d/6262.bugfix
deleted file mode 100644
index 32687f0d2b..0000000000
--- a/changelog.d/6262.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Improve signature checking on some federation APIs.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index bcc2f8c049..d0f92ffbf3 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.5.0rc1"
+__version__ = "1.5.0rc2"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From a8aced58df9674123d491c121dbf4ff2562724cb Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 28 Oct 2019 13:36:52 +0000
Subject: Newsfile

---
 changelog.d/6263.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6263.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6263.misc b/changelog.d/6263.misc
new file mode 100644
index 0000000000..7b1bb4b679
--- /dev/null
+++ b/changelog.d/6263.misc
@@ -0,0 +1 @@
+Change cache descriptors to always return deferreds.
-- 
cgit 1.4.1


From 14504ad5736ae230d759d8fadccd8babb42fa548 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 28 Oct 2019 17:45:32 +0000
Subject: Add CI for synapse_port_db (#6140)

This adds:

* a test sqlite database
* a configuration file for the sqlite database
* a configuration file for a postgresql database (using the credentials in `.buildkite/docker-compose.pyXX.pgXX.yaml`)

as well as a new script named `.buildkite/scripts/test_synapse_port_db.sh` that:

1. installs Synapse
2. updates the test sqlite database to the latest schema and runs background updates on it
3. creates an empty postgresql database
4. run the `synapse_port_db` script to migrate the test sqlite database to the empty postgresql database (with coverage)

Step `2` is done via a new script located at `scripts-dev/update_database`.

The test sqlite database is extracted from a SyTest run, so that it can be considered as an actual homeserver's database with actual data in it.
---
 .buildkite/postgres-config.yaml            |  19 +++++
 .buildkite/scripts/test_synapse_port_db.sh |  29 +++++++
 .buildkite/sqlite-config.yaml              |  16 ++++
 .buildkite/test_db.db                      | Bin 0 -> 18825216 bytes
 changelog.d/6140.misc                      |   1 +
 scripts-dev/update_database                | 125 +++++++++++++++++++++++++++++
 synapse/storage/background_updates.py      |   9 ++-
 7 files changed, 196 insertions(+), 3 deletions(-)
 create mode 100644 .buildkite/postgres-config.yaml
 create mode 100755 .buildkite/scripts/test_synapse_port_db.sh
 create mode 100644 .buildkite/sqlite-config.yaml
 create mode 100644 .buildkite/test_db.db
 create mode 100644 changelog.d/6140.misc
 create mode 100755 scripts-dev/update_database

(limited to 'changelog.d')

diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml
new file mode 100644
index 0000000000..23db43fac9
--- /dev/null
+++ b/.buildkite/postgres-config.yaml
@@ -0,0 +1,19 @@
+# Configuration file used for testing the 'synapse_port_db' script.
+# Tells the script to connect to the postgresql database that will be available in the
+# CI's Docker setup at the point where this file is considered.
+server_name: "test"
+
+report_stats: false
+
+database:
+  name: "psycopg2"
+  args:
+    user: postgres
+    host: postgres
+    password: postgres
+    database: synapse
+
+# Suppress the key server warning.
+trusted_key_servers:
+  - server_name: "matrix.org"
+suppress_key_server_warning: true
diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh
new file mode 100755
index 0000000000..7defd47bc6
--- /dev/null
+++ b/.buildkite/scripts/test_synapse_port_db.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
+# with additional dependencies needed for the test (such as coverage or the PostgreSQL
+# driver), update the schema of the test SQLite database and run background updates on it,
+# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
+# test porting the SQLite database to the PostgreSQL database (with coverage).
+
+set -xe
+cd `dirname $0`/../..
+
+# Create a virtualenv and use it.
+virtualenv env
+source env/bin/activate
+
+# Install dependencies for this test.
+pip install psycopg2 coverage coverage-enable-subprocess
+
+# Install Synapse itself. This won't update any libraries.
+pip install -e .
+
+# Make sure the SQLite3 database is using the latest schema and has no pending background update.
+scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
+
+# Create the PostgreSQL database.
+PGPASSWORD=postgres createdb -h postgres -U postgres synapse
+
+# Run the script
+coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml
new file mode 100644
index 0000000000..56503cc4ce
--- /dev/null
+++ b/.buildkite/sqlite-config.yaml
@@ -0,0 +1,16 @@
+# Configuration file used for testing the 'synapse_port_db' script.
+# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
+# schema and run background updates on it.
+server_name: "test"
+
+report_stats: false
+
+database:
+  name: "sqlite3"
+  args:
+    database: ".buildkite/test_db.db"
+
+# Suppress the key server warning.
+trusted_key_servers:
+  - server_name: "matrix.org"
+suppress_key_server_warning: true
diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db
new file mode 100644
index 0000000000..f20567ba73
Binary files /dev/null and b/.buildkite/test_db.db differ
diff --git a/changelog.d/6140.misc b/changelog.d/6140.misc
new file mode 100644
index 0000000000..0feb97ec61
--- /dev/null
+++ b/changelog.d/6140.misc
@@ -0,0 +1 @@
+Add a CI job to test the `synapse_port_db` script.
\ No newline at end of file
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
new file mode 100755
index 0000000000..10166583e1
--- /dev/null
+++ b/scripts-dev/update_database
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import sys
+
+import yaml
+
+from twisted.internet import defer, reactor
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.server import HomeServer
+from synapse.storage.engines import create_engine
+from synapse.storage import DataStore
+from synapse.storage.prepare_database import prepare_database
+
+logger = logging.getLogger("update_database")
+
+
+class MockHomeserver(HomeServer):
+    DATASTORE_CLASS = DataStore
+
+    def __init__(self, config, database_engine, db_conn, **kwargs):
+        super(MockHomeserver, self).__init__(
+            config.server_name,
+            reactor=reactor,
+            config=config,
+            database_engine=database_engine,
+            **kwargs
+        )
+
+        self.database_engine = database_engine
+        self.db_conn = db_conn
+
+    def get_db_conn(self):
+        return self.db_conn
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description=(
+            "Updates a synapse database to the latest schema and runs background updates"
+            " on it."
+        )
+    )
+    parser.add_argument("-v", action='store_true')
+    parser.add_argument(
+        "--database-config",
+        type=argparse.FileType('r'),
+        required=True,
+        help="A database config file for either a SQLite3 database or a PostgreSQL one.",
+    )
+
+    args = parser.parse_args()
+
+    logging_config = {
+        "level": logging.DEBUG if args.v else logging.INFO,
+        "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
+    }
+
+    logging.basicConfig(**logging_config)
+
+    # Load, process and sanity-check the config.
+    hs_config = yaml.safe_load(args.database_config)
+
+    if "database" not in hs_config:
+        sys.stderr.write("The configuration file must have a 'database' section.\n")
+        sys.exit(4)
+
+    config = HomeServerConfig()
+    config.parse_config_dict(hs_config, "", "")
+
+    # Create the database engine and a connection to it.
+    database_engine = create_engine(config.database_config)
+    db_conn = database_engine.module.connect(
+        **{
+            k: v
+            for k, v in config.database_config.get("args", {}).items()
+            if not k.startswith("cp_")
+        }
+    )
+
+    # Update the database to the latest schema.
+    prepare_database(db_conn, database_engine, config=config)
+    db_conn.commit()
+
+    # Instantiate and initialise the homeserver object.
+    hs = MockHomeserver(
+        config,
+        database_engine,
+        db_conn,
+        db_config=config.database_config,
+    )
+    # setup instantiates the store within the homeserver object.
+    hs.setup()
+    store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def run_background_updates():
+        yield store.run_background_updates(sleep=False)
+        # Stop the reactor to exit the script once every background update is run.
+        reactor.stop()
+
+    # Apply all background updates on the database.
+    reactor.callWhenRunning(lambda: run_as_background_process(
+        "background_updates", run_background_updates
+    ))
+
+    reactor.run()
+
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 80b57a948c..37d469ffd7 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -94,13 +94,16 @@ class BackgroundUpdateStore(SQLBaseStore):
         self._all_done = False
 
     def start_doing_background_updates(self):
-        run_as_background_process("background_updates", self._run_background_updates)
+        run_as_background_process("background_updates", self.run_background_updates)
 
     @defer.inlineCallbacks
-    def _run_background_updates(self):
+    def run_background_updates(self, sleep=True):
         logger.info("Starting background schema updates")
         while True:
-            yield self.hs.get_clock().sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
+            if sleep:
+                yield self.hs.get_clock().sleep(
+                    self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0
+                )
 
             try:
                 result = yield self.do_next_background_update(
-- 
cgit 1.4.1


From b7a0ea686ff6d2fb854603c5ebaad32daf1720c5 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 29 Oct 2019 10:25:25 +0000
Subject: Newsfile

---
 changelog.d/6286.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6286.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6286.bugfix b/changelog.d/6286.bugfix
new file mode 100644
index 0000000000..a4bebec1c7
--- /dev/null
+++ b/changelog.d/6286.bugfix
@@ -0,0 +1 @@
+Fix bug where room directory search was case sensitive.
-- 
cgit 1.4.1


From 1a7ed371490e8916ca6ce31949e3814360feacf2 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 29 Oct 2019 13:01:50 +0000
Subject: Newsfile

---
 changelog.d/6274.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6274.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6274.misc b/changelog.d/6274.misc
new file mode 100644
index 0000000000..eb4966124f
--- /dev/null
+++ b/changelog.d/6274.misc
@@ -0,0 +1 @@
+Port replication http server endpoints to async/await.
-- 
cgit 1.4.1


From 387324688ee8f4314e43d1a1804df11197be977c Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 29 Oct 2019 13:10:45 +0000
Subject: Newsfile

---
 changelog.d/6275.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6275.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6275.misc b/changelog.d/6275.misc
new file mode 100644
index 0000000000..f57e2c4adb
--- /dev/null
+++ b/changelog.d/6275.misc
@@ -0,0 +1 @@
+Port room rest handlers to async/await.
-- 
cgit 1.4.1


From 20ebd2497369b63cc4555bf5e4c52306196439a6 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 29 Oct 2019 14:04:02 +0000
Subject: Fix changelog name

---
 changelog.d/6268.bugfix | 1 +
 changelog.d/6286.bugfix | 1 -
 2 files changed, 1 insertion(+), 1 deletion(-)
 create mode 100644 changelog.d/6268.bugfix
 delete mode 100644 changelog.d/6286.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6268.bugfix b/changelog.d/6268.bugfix
new file mode 100644
index 0000000000..a4bebec1c7
--- /dev/null
+++ b/changelog.d/6268.bugfix
@@ -0,0 +1 @@
+Fix bug where room directory search was case sensitive.
diff --git a/changelog.d/6286.bugfix b/changelog.d/6286.bugfix
deleted file mode 100644
index a4bebec1c7..0000000000
--- a/changelog.d/6286.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where room directory search was case sensitive.
-- 
cgit 1.4.1


From fec7d88645191778db66d8873f9cdf0a0287bc53 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 29 Oct 2019 14:27:18 +0000
Subject: Newsfile

---
 changelog.d/6276.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6276.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6276.misc b/changelog.d/6276.misc
new file mode 100644
index 0000000000..5f5144a9ee
--- /dev/null
+++ b/changelog.d/6276.misc
@@ -0,0 +1 @@
+Port `federation_server.py` to async/await.
-- 
cgit 1.4.1


From 9ffcf0f7ba72f16e366f04db6384a9233b1808cb Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 29 Oct 2019 14:28:54 +0000
Subject: 1.5.0

---
 CHANGES.md              | 25 ++++++++++++++++++-------
 changelog.d/6268.bugfix |  1 -
 debian/changelog        |  6 ++++++
 synapse/__init__.py     |  2 +-
 4 files changed, 25 insertions(+), 9 deletions(-)
 delete mode 100644 changelog.d/6268.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index c59b139eae..6faa4b8dce 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,17 @@
+Synapse 1.5.0 (2019-10-29)
+==========================
+
+Security updates
+----------------
+
+This release includes a security fix ([\#6262](https://github.com/matrix-org/synapse/issues/6262), below). Administrators are encouraged to upgrade as soon as possible.
+
+Bugfixes
+--------
+
+- Fix bug where room directory search was case sensitive. ([\#6268](https://github.com/matrix-org/synapse/issues/6268))
+
+
 Synapse 1.5.0rc2 (2019-10-28)
 =============================
 
@@ -19,13 +33,6 @@ Internal Changes
 Synapse 1.5.0rc1 (2019-10-24)
 ==========================
 
-This release includes a database migration step **which may take a long time to complete**:
-
-- Allow devices to be marked as hidden, for use by features such as cross-signing.
-  This adds a new field with a default value to the devices field in the database,
-  and so the database upgrade may take a long time depending on how many devices
-  are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
-
 Features
 --------
 
@@ -69,6 +76,10 @@ Internal Changes
 ----------------
 
 - Update `user_filters` table to have a unique index, and non-null columns. Thanks to @pik for contributing this. ([\#1172](https://github.com/matrix-org/synapse/issues/1172), [\#6175](https://github.com/matrix-org/synapse/issues/6175), [\#6184](https://github.com/matrix-org/synapse/issues/6184))
+- Allow devices to be marked as hidden, for use by features such as cross-signing.
+  This adds a new field with a default value to the devices field in the database,
+  and so the database upgrade may take a long time depending on how many devices
+  are in the database. ([\#5759](https://github.com/matrix-org/synapse/issues/5759))
 - Move lookup-related functions from RoomMemberHandler to IdentityHandler. ([\#5978](https://github.com/matrix-org/synapse/issues/5978))
 - Improve performance of the public room list directory. ([\#6019](https://github.com/matrix-org/synapse/issues/6019), [\#6152](https://github.com/matrix-org/synapse/issues/6152), [\#6153](https://github.com/matrix-org/synapse/issues/6153), [\#6154](https://github.com/matrix-org/synapse/issues/6154))
 - Edit header dicts docstrings in `SimpleHttpClient` to note that `str` or `bytes` can be passed as header keys. ([\#6077](https://github.com/matrix-org/synapse/issues/6077))
diff --git a/changelog.d/6268.bugfix b/changelog.d/6268.bugfix
deleted file mode 100644
index a4bebec1c7..0000000000
--- a/changelog.d/6268.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where room directory search was case sensitive.
diff --git a/debian/changelog b/debian/changelog
index 02f2b508c2..acda7e5c63 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.5.0) stable; urgency=medium
+
+  * New synapse release 1.5.0.
+
+ -- Synapse Packaging team   Tue, 29 Oct 2019 14:28:41 +0000
+
 matrix-synapse-py3 (1.4.1) stable; urgency=medium
 
   * New synapse release 1.4.1.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index d0f92ffbf3..8587ffa76f 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.5.0rc2"
+__version__ = "1.5.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 7dd7a385f9c1eca2369840240a49263668027cde Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 29 Oct 2019 15:09:48 +0000
Subject: Newsfile

---
 changelog.d/6280.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6280.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6280.misc b/changelog.d/6280.misc
new file mode 100644
index 0000000000..96a0eb21b2
--- /dev/null
+++ b/changelog.d/6280.misc
@@ -0,0 +1 @@
+Port receipt and read markers to async/wait.
-- 
cgit 1.4.1


From d79151921ac6b1770533eef098f78db77ea6d528 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 29 Oct 2019 15:39:44 +0000
Subject: Fix CI for synapse_port_db (#6276)

* Don't use a virtualenv

* Generate the server's signing key to allow it to start

* Add signing key paths to CI configuration files

* Use a Python script to create the postgresql database

* Improve logging
---
 .buildkite/postgres-config.yaml            |  2 ++
 .buildkite/scripts/create_postgres_db.py   | 36 ++++++++++++++++++++++++++++++
 .buildkite/scripts/test_synapse_port_db.sh | 15 +++++++++----
 .buildkite/sqlite-config.yaml              |  2 ++
 changelog.d/6276.misc                      |  1 +
 5 files changed, 52 insertions(+), 4 deletions(-)
 create mode 100755 .buildkite/scripts/create_postgres_db.py
 create mode 100644 changelog.d/6276.misc

(limited to 'changelog.d')

diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml
index 23db43fac9..a35fec394d 100644
--- a/.buildkite/postgres-config.yaml
+++ b/.buildkite/postgres-config.yaml
@@ -3,6 +3,8 @@
 # CI's Docker setup at the point where this file is considered.
 server_name: "test"
 
+signing_key_path: "/src/.buildkite/test.signing.key"
+
 report_stats: false
 
 database:
diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py
new file mode 100755
index 0000000000..df6082b0ac
--- /dev/null
+++ b/.buildkite/scripts/create_postgres_db.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from synapse.storage.engines import create_engine
+
+logger = logging.getLogger("create_postgres_db")
+
+if __name__ == "__main__":
+    # Create a PostgresEngine.
+    db_engine = create_engine({"name": "psycopg2", "args": {}})
+
+    # Connect to postgres to create the base database.
+    # We use "postgres" as a database because it's bound to exist and the "synapse" one
+    # doesn't exist yet.
+    db_conn = db_engine.module.connect(
+        user="postgres", host="postgres", password="postgres", dbname="postgres"
+    )
+    db_conn.autocommit = True
+    cur = db_conn.cursor()
+    cur.execute("CREATE DATABASE synapse;")
+    cur.close()
+    db_conn.close()
diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh
index 7defd47bc6..9ed2177635 100755
--- a/.buildkite/scripts/test_synapse_port_db.sh
+++ b/.buildkite/scripts/test_synapse_port_db.sh
@@ -9,9 +9,7 @@
 set -xe
 cd `dirname $0`/../..
 
-# Create a virtualenv and use it.
-virtualenv env
-source env/bin/activate
+echo "--- Install dependencies"
 
 # Install dependencies for this test.
 pip install psycopg2 coverage coverage-enable-subprocess
@@ -19,11 +17,20 @@ pip install psycopg2 coverage coverage-enable-subprocess
 # Install Synapse itself. This won't update any libraries.
 pip install -e .
 
+echo "--- Generate the signing key"
+
+# Generate the server's signing key.
+python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
+
+echo "--- Prepare the databases"
+
 # Make sure the SQLite3 database is using the latest schema and has no pending background update.
 scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
 
 # Create the PostgreSQL database.
-PGPASSWORD=postgres createdb -h postgres -U postgres synapse
+./.buildkite/scripts/create_postgres_db.py
+
+echo "+++ Run synapse_port_db"
 
 # Run the script
 coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml
index 56503cc4ce..635b921764 100644
--- a/.buildkite/sqlite-config.yaml
+++ b/.buildkite/sqlite-config.yaml
@@ -3,6 +3,8 @@
 # schema and run background updates on it.
 server_name: "test"
 
+signing_key_path: "/src/.buildkite/test.signing.key"
+
 report_stats: false
 
 database:
diff --git a/changelog.d/6276.misc b/changelog.d/6276.misc
new file mode 100644
index 0000000000..4a4428251e
--- /dev/null
+++ b/changelog.d/6276.misc
@@ -0,0 +1 @@
+Add a CI job to test the `synapse_port_db` script.
-- 
cgit 1.4.1


From b39ca49db167e814d7848c6a4872c8b65ec03ff1 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:00:15 +0000
Subject: Handle FileNotFound error in checking git repository version (#6284)

---
 changelog.d/6284.bugfix       |  1 +
 synapse/util/versionstring.py | 10 ++++++----
 2 files changed, 7 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6284.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6284.bugfix b/changelog.d/6284.bugfix
new file mode 100644
index 0000000000..cf15053d2d
--- /dev/null
+++ b/changelog.d/6284.bugfix
@@ -0,0 +1 @@
+Prevent errors from appearing on Synapse startup if `git` is not installed.
\ No newline at end of file
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index fa404b9d75..ab7d03af3a 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -42,6 +42,7 @@ def get_version_string(module):
     try:
         null = open(os.devnull, "w")
         cwd = os.path.dirname(os.path.abspath(module.__file__))
+
         try:
             git_branch = (
                 subprocess.check_output(
@@ -51,7 +52,8 @@ def get_version_string(module):
                 .decode("ascii")
             )
             git_branch = "b=" + git_branch
-        except subprocess.CalledProcessError:
+        except (subprocess.CalledProcessError, FileNotFoundError):
+            # FileNotFoundError can arise when git is not installed
             git_branch = ""
 
         try:
@@ -63,7 +65,7 @@ def get_version_string(module):
                 .decode("ascii")
             )
             git_tag = "t=" + git_tag
-        except subprocess.CalledProcessError:
+        except (subprocess.CalledProcessError, FileNotFoundError):
             git_tag = ""
 
         try:
@@ -74,7 +76,7 @@ def get_version_string(module):
                 .strip()
                 .decode("ascii")
             )
-        except subprocess.CalledProcessError:
+        except (subprocess.CalledProcessError, FileNotFoundError):
             git_commit = ""
 
         try:
@@ -89,7 +91,7 @@ def get_version_string(module):
             )
 
             git_dirty = "dirty" if is_dirty else ""
-        except subprocess.CalledProcessError:
+        except (subprocess.CalledProcessError, FileNotFoundError):
             git_dirty = ""
 
         if git_branch or git_tag or git_commit or git_dirty:
-- 
cgit 1.4.1


From 9178ac1b6a79727aac3859a2ff2ac91a27da5bd4 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:07:18 +0000
Subject: Remove redundant arguments to CI's flake8 (#6277)

---
 changelog.d/6277.misc | 1 +
 tox.ini               | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6277.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6277.misc b/changelog.d/6277.misc
new file mode 100644
index 0000000000..490713577f
--- /dev/null
+++ b/changelog.d/6277.misc
@@ -0,0 +1 @@
+Remove redundant CLI parameters on CI's `flake8` step.
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index e3a53f340a..b381fbe06d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -117,7 +117,7 @@ deps =
     black==19.3b0  # We pin so that our tests don't start failing on new releases of black.
 commands =
     python -m black --check --diff .
-    /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
+    /bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}"
     {toxinidir}/scripts-dev/config-lint.sh
 
 [testenv:check_isort]
-- 
cgit 1.4.1


From 46c12918add132d8d0cbb808b499c815e2745f72 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:07:42 +0000
Subject: Fix typo in domain name in account_threepid_delegates config option
 (#6273)

---
 changelog.d/6273.doc           | 1 +
 docs/sample_config.yaml        | 2 +-
 synapse/config/registration.py | 2 +-
 3 files changed, 3 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6273.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6273.doc b/changelog.d/6273.doc
new file mode 100644
index 0000000000..21a41d987d
--- /dev/null
+++ b/changelog.d/6273.doc
@@ -0,0 +1 @@
+Fix a small typo in `account_threepid_delegates` configuration option.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 6c81c0db75..d2f4aff826 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -955,7 +955,7 @@ uploads_path: "DATADIR/uploads"
 # If a delegate is specified, the config option public_baseurl must also be filled out.
 #
 account_threepid_delegates:
-    #email: https://example.com     # Delegate email sending to example.org
+    #email: https://example.com     # Delegate email sending to example.com
     #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
 # Users who register on this homeserver will automatically be joined
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ab41623b2b..1f6dac69da 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -300,7 +300,7 @@ class RegistrationConfig(Config):
         # If a delegate is specified, the config option public_baseurl must also be filled out.
         #
         account_threepid_delegates:
-            #email: https://example.com     # Delegate email sending to example.org
+            #email: https://example.com     # Delegate email sending to example.com
             #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
         # Users who register on this homeserver will automatically be joined
-- 
cgit 1.4.1


From 7955abeaac54e97332bd42186299e648bb3ace6c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:16:19 +0000
Subject: Fix small typo in comment (#6269)

---
 changelog.d/6269.misc                   | 1 +
 synapse/federation/federation_server.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6269.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6269.misc b/changelog.d/6269.misc
new file mode 100644
index 0000000000..9fd333cc89
--- /dev/null
+++ b/changelog.d/6269.misc
@@ -0,0 +1 @@
+Fix incorrect comment regarding the functionality of an `if` statement.
\ No newline at end of file
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 7c331753ad..d5a19764d2 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -145,7 +145,7 @@ class FederationServer(FederationBase):
 
         logger.debug("[%s] Transaction is new", transaction.transaction_id)
 
-        # Reject if PDU count > 50 and EDU count > 100
+        # Reject if PDU count > 50 or EDU count > 100
         if len(transaction.pdus) > 50 or (
             hasattr(transaction, "edus") and len(transaction.edus) > 100
         ):
-- 
cgit 1.4.1


From 2cab02f9d123924a6ccbf8e59b7e973f3c0a3d26 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:17:14 +0000
Subject: Update CI to run isort on scripts and scripts-dev (#6270)

---
 changelog.d/6270.misc       | 1 +
 scripts-dev/update_database | 3 +--
 tox.ini                     | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6270.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6270.misc b/changelog.d/6270.misc
new file mode 100644
index 0000000000..d1c5811323
--- /dev/null
+++ b/changelog.d/6270.misc
@@ -0,0 +1 @@
+Update CI to run `isort` over the `scripts` and `scripts-dev` directories.
\ No newline at end of file
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
index 10166583e1..27a1ad1e7e 100755
--- a/scripts-dev/update_database
+++ b/scripts-dev/update_database
@@ -25,8 +25,8 @@ from twisted.internet import defer, reactor
 from synapse.config.homeserver import HomeServerConfig
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.server import HomeServer
-from synapse.storage.engines import create_engine
 from synapse.storage import DataStore
+from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 
 logger = logging.getLogger("update_database")
@@ -122,4 +122,3 @@ if __name__ == "__main__":
     ))
 
     reactor.run()
-
diff --git a/tox.ini b/tox.ini
index b381fbe06d..50b6afe611 100644
--- a/tox.ini
+++ b/tox.ini
@@ -123,7 +123,7 @@ commands =
 [testenv:check_isort]
 skip_install = True
 deps = isort
-commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
+commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev scripts"
 
 [testenv:check-newsfragment]
 skip_install = True
-- 
cgit 1.4.1


From a2276d4d3ca72896582ef24d01fdff6c01e38689 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:28:48 +0000
Subject: Fix log line that was printing undefined value (#6278)

---
 changelog.d/6278.bugfix        | 1 +
 synapse/handlers/federation.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6278.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6278.bugfix b/changelog.d/6278.bugfix
new file mode 100644
index 0000000000..c107270461
--- /dev/null
+++ b/changelog.d/6278.bugfix
@@ -0,0 +1 @@
+Fix exception when remote servers attempt to join a room that they're not allowed to join.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 488058fe68..2da520e6e8 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1250,7 +1250,7 @@ class FederationHandler(BaseHandler):
                 builder=builder
             )
         except AuthError as e:
-            logger.warn("Failed to create join %r because %s", event, e)
+            logger.warn("Failed to create join to %s because %s", room_id, e)
             raise e
 
         event_allowed = yield self.third_party_event_rules.check_event_allowed(
-- 
cgit 1.4.1


From 1de28183cb92a2967d527c175123251514f58e69 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 30 Oct 2019 11:37:56 +0000
Subject: Newsfile

---
 changelog.d/6291.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6291.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6291.misc b/changelog.d/6291.misc
new file mode 100644
index 0000000000..7b1bb4b679
--- /dev/null
+++ b/changelog.d/6291.misc
@@ -0,0 +1 @@
+Change cache descriptors to always return deferreds.
-- 
cgit 1.4.1


From 7e179599848a1005f753a1ab58953107fc2540df Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 30 Oct 2019 11:37:56 +0000
Subject: Update email section of INSTALL.md about account_threepid_delegates
 (#6272)

---
 INSTALL.md           | 16 +++++++++-------
 changelog.d/6272.doc |  1 +
 2 files changed, 10 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6272.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index 69e423923b..e7b429c05d 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -413,16 +413,18 @@ For a more detailed guide to configuring your server for federation, see
 
 ## Email
 
-It is desirable for Synapse to have the capability to send email. For example,
-this is required to support the 'password reset' feature.
+It is desirable for Synapse to have the capability to send email. This allows
+Synapse to send password reset emails, send verifications when an email address
+is added to a user's account, and send email notifications to users when they
+receive new messages.
 
 To configure an SMTP server for Synapse, modify the configuration section
-headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
-and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
-``smtp_pass``, and ``require_transport_security``.
+headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
+and `notif_from` fields filled out.  You may also need to set `smtp_user`,
+`smtp_pass`, and `require_transport_security`.
 
-If Synapse is not configured with an SMTP server, password reset via email will
- be disabled by default.
+If email is not configured, password reset, registration and notifications via
+email will be disabled.
 
 ## Registering a user
 
diff --git a/changelog.d/6272.doc b/changelog.d/6272.doc
new file mode 100644
index 0000000000..232180bcdc
--- /dev/null
+++ b/changelog.d/6272.doc
@@ -0,0 +1 @@
+Update `INSTALL.md` Email section to talk about `account_threepid_delegates`.
\ No newline at end of file
-- 
cgit 1.4.1


From 9677613e9cc52af4a31e5f706ad0bec14ca645de Mon Sep 17 00:00:00 2001
From: Yash Jipkate <34203227+YashJipkate@users.noreply.github.com>
Date: Wed, 30 Oct 2019 18:00:20 +0530
Subject: Modify doc to update Google ReCaptcha terms (#6257)

---
 changelog.d/6257.doc  | 1 +
 docs/CAPTCHA_SETUP.md | 6 +++---
 2 files changed, 4 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6257.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6257.doc b/changelog.d/6257.doc
new file mode 100644
index 0000000000..e985afde0e
--- /dev/null
+++ b/changelog.d/6257.doc
@@ -0,0 +1 @@
+Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate.
diff --git a/docs/CAPTCHA_SETUP.md b/docs/CAPTCHA_SETUP.md
index 5f9057530b..331e5d059a 100644
--- a/docs/CAPTCHA_SETUP.md
+++ b/docs/CAPTCHA_SETUP.md
@@ -4,7 +4,7 @@ The captcha mechanism used is Google's ReCaptcha. This requires API keys from Go
 
 ## Getting keys
 
-Requires a public/private key pair from:
+Requires a site/secret key pair from:
 
 
 
@@ -15,8 +15,8 @@ Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
 The keys are a config option on the home server config. If they are not
 visible, you can generate them via `--generate-config`. Set the following value:
 
-    recaptcha_public_key: YOUR_PUBLIC_KEY
-    recaptcha_private_key: YOUR_PRIVATE_KEY
+    recaptcha_public_key: YOUR_SITE_KEY
+    recaptcha_private_key: YOUR_SECRET_KEY
 
 In addition, you MUST enable captchas via:
 
-- 
cgit 1.4.1


From d3f694d628a57a7676ffb10ffba1453131a74e12 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 30 Oct 2019 14:53:09 +0000
Subject: Newsfile

---
 changelog.d/6294.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6294.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6294.misc b/changelog.d/6294.misc
new file mode 100644
index 0000000000..a3e6b8296e
--- /dev/null
+++ b/changelog.d/6294.misc
@@ -0,0 +1 @@
+Split out state storage into separate data store.
-- 
cgit 1.4.1


From ecfba89a784db12b48074ade3a44092267ba9cf7 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 30 Oct 2019 15:14:29 +0000
Subject: Newsfile

---
 changelog.d/6295.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6295.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6295.misc b/changelog.d/6295.misc
new file mode 100644
index 0000000000..a3e6b8296e
--- /dev/null
+++ b/changelog.d/6295.misc
@@ -0,0 +1 @@
+Split out state storage into separate data store.
-- 
cgit 1.4.1


From 62588eae4a1a0a894f66709a38403a153d78687d Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 30 Oct 2019 17:54:40 +0000
Subject: Changelog

---
 changelog.d/6301.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6301.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6301.feature b/changelog.d/6301.feature
new file mode 100644
index 0000000000..b7ff3fad3b
--- /dev/null
+++ b/changelog.d/6301.feature
@@ -0,0 +1 @@
+Implement label-based filtering.
-- 
cgit 1.4.1


From 0467f335847dd096913dcf404ca839f61c38758f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 30 Oct 2019 18:05:00 +0000
Subject: fix delete_existing for _persist_events (#6300)

this is part of _retry_on_integrity_error, so should only be on _persist_events_and_state_updates
---
 changelog.d/6300.misc                      | 1 +
 synapse/storage/data_stores/main/events.py | 2 +-
 synapse/storage/persist_events.py          | 5 +----
 3 files changed, 3 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6300.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6300.misc b/changelog.d/6300.misc
new file mode 100644
index 0000000000..0b3d7a14a1
--- /dev/null
+++ b/changelog.d/6300.misc
@@ -0,0 +1 @@
+Move `persist_events` out from main data store.
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 7c3607f308..a4dab86a13 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -82,7 +82,7 @@ def _retry_on_integrity_error(func):
     @defer.inlineCallbacks
     def f(self, *args, **kwargs):
         try:
-            res = yield func(self, *args, **kwargs)
+            res = yield func(self, *args, delete_existing=False, **kwargs)
         except self.database_engine.module.IntegrityError:
             logger.exception("IntegrityError, retrying.")
             res = yield func(self, *args, delete_existing=True, **kwargs)
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index cf66225574..931dcb6558 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -260,9 +260,7 @@ class EventsPersistenceStorage(object):
         self._event_persist_queue.handle_queue(room_id, persisting_queue)
 
     @defer.inlineCallbacks
-    def _persist_events(
-        self, events_and_contexts, backfilled=False, delete_existing=False
-    ):
+    def _persist_events(self, events_and_contexts, backfilled=False):
         """Calculates the change to current state and forward extremities, and
         persists the given events and with those updates.
 
@@ -412,7 +410,6 @@ class EventsPersistenceStorage(object):
                 state_delta_for_room=state_delta_for_room,
                 new_forward_extremeties=new_forward_extremeties,
                 backfilled=backfilled,
-                delete_existing=delete_existing,
             )
 
     @defer.inlineCallbacks
-- 
cgit 1.4.1


From 54fef094b31e0401d6d35efdf7d5d6b0b9e5d51f Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 31 Oct 2019 10:23:24 +0000
Subject: Remove usage of deprecated logger.warn method from codebase (#6271)

Replace every instance of `logger.warn` with `logger.warning` as the former is deprecated.
---
 changelog.d/6271.misc                              |  1 +
 scripts/move_remote_media_to_new_store.py          |  2 +-
 scripts/synapse_port_db                            |  6 ++--
 synapse/api/auth.py                                |  2 +-
 synapse/app/__init__.py                            |  4 ++-
 synapse/app/appservice.py                          |  4 +--
 synapse/app/client_reader.py                       |  4 +--
 synapse/app/event_creator.py                       |  4 +--
 synapse/app/federation_reader.py                   |  4 +--
 synapse/app/federation_sender.py                   |  4 +--
 synapse/app/frontend_proxy.py                      |  4 +--
 synapse/app/homeserver.py                          |  6 ++--
 synapse/app/media_repository.py                    |  4 +--
 synapse/app/pusher.py                              |  4 +--
 synapse/app/synchrotron.py                         |  4 +--
 synapse/app/user_dir.py                            |  4 +--
 synapse/config/key.py                              |  4 +--
 synapse/config/logger.py                           |  2 +-
 synapse/event_auth.py                              |  2 +-
 synapse/federation/federation_base.py              |  6 ++--
 synapse/federation/federation_client.py            |  8 +++--
 synapse/federation/federation_server.py            | 20 ++++++------
 synapse/federation/sender/transaction_manager.py   |  4 +--
 synapse/federation/transport/server.py             |  8 +++--
 synapse/groups/attestations.py                     |  2 +-
 synapse/groups/groups_server.py                    |  2 +-
 synapse/handlers/auth.py                           |  6 ++--
 synapse/handlers/device.py                         |  4 +--
 synapse/handlers/devicemessage.py                  |  2 +-
 synapse/handlers/federation.py                     | 36 ++++++++++++----------
 synapse/handlers/groups_local.py                   |  2 +-
 synapse/handlers/identity.py                       |  6 ++--
 synapse/handlers/message.py                        |  2 +-
 synapse/handlers/profile.py                        |  2 +-
 synapse/handlers/room.py                           |  2 +-
 synapse/http/client.py                             |  4 +--
 synapse/http/federation/srv_resolver.py            |  2 +-
 synapse/http/matrixfederationclient.py             | 10 +++---
 synapse/http/request_metrics.py                    |  2 +-
 synapse/http/server.py                             |  2 +-
 synapse/http/servlet.py                            |  4 +--
 synapse/http/site.py                               |  4 +--
 synapse/logging/context.py                         |  2 +-
 synapse/push/httppusher.py                         |  4 +--
 synapse/push/push_rule_evaluator.py                |  4 +--
 synapse/replication/http/_base.py                  |  2 +-
 synapse/replication/http/membership.py             |  2 +-
 synapse/replication/tcp/client.py                  |  2 +-
 synapse/replication/tcp/protocol.py                |  2 +-
 synapse/rest/admin/__init__.py                     |  2 +-
 synapse/rest/client/v1/login.py                    |  2 +-
 synapse/rest/client/v2_alpha/account.py            | 14 ++++-----
 synapse/rest/client/v2_alpha/register.py           | 10 +++---
 synapse/rest/client/v2_alpha/sync.py               |  2 +-
 synapse/rest/media/v1/media_repository.py          | 12 +++++---
 synapse/rest/media/v1/preview_url_resource.py      | 16 +++++-----
 synapse/rest/media/v1/thumbnail_resource.py        |  4 +--
 .../resource_limits_server_notices.py              |  2 +-
 synapse/storage/_base.py                           |  6 ++--
 synapse/storage/data_stores/main/pusher.py         |  2 +-
 synapse/storage/data_stores/main/search.py         |  2 +-
 synapse/util/async_helpers.py                      |  2 +-
 synapse/util/caches/__init__.py                    |  2 +-
 synapse/util/metrics.py                            |  6 ++--
 synapse/util/rlimit.py                             |  2 +-
 65 files changed, 164 insertions(+), 149 deletions(-)
 create mode 100644 changelog.d/6271.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6271.misc b/changelog.d/6271.misc
new file mode 100644
index 0000000000..2369760272
--- /dev/null
+++ b/changelog.d/6271.misc
@@ -0,0 +1 @@
+Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated.
\ No newline at end of file
diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py
index 12747c6024..b5b63933ab 100755
--- a/scripts/move_remote_media_to_new_store.py
+++ b/scripts/move_remote_media_to_new_store.py
@@ -72,7 +72,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
     # check that the original exists
     original_file = src_paths.remote_media_filepath(origin_server, file_id)
     if not os.path.exists(original_file):
-        logger.warn(
+        logger.warning(
             "Original for %s/%s (%s) does not exist",
             origin_server,
             file_id,
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 54faed1e83..0d3321682c 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -157,7 +157,7 @@ class Store(
                         )
                     except self.database_engine.module.DatabaseError as e:
                         if self.database_engine.is_deadlock(e):
-                            logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
+                            logger.warning("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
                             if i < N:
                                 i += 1
                                 conn.rollback()
@@ -432,7 +432,7 @@ class Porter(object):
                     for row in rows:
                         d = dict(zip(headers, row))
                         if "\0" in d['value']:
-                            logger.warn('dropping search row %s', d)
+                            logger.warning('dropping search row %s', d)
                         else:
                             rows_dict.append(d)
 
@@ -647,7 +647,7 @@ class Porter(object):
             if isinstance(col, bytes):
                 return bytearray(col)
             elif isinstance(col, string_types) and "\0" in col:
-                logger.warn(
+                logger.warning(
                     "DROPPING ROW: NUL value in table %s col %s: %r",
                     table,
                     headers[j],
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 53f3bb0fa8..5d0b7d2801 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -497,7 +497,7 @@ class Auth(object):
         token = self.get_access_token_from_request(request)
         service = self.store.get_app_service_by_token(token)
         if not service:
-            logger.warn("Unrecognised appservice access token.")
+            logger.warning("Unrecognised appservice access token.")
             raise InvalidClientTokenError()
         request.authenticated_entity = service.sender
         return defer.succeed(service)
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index d877c77834..a01bac2997 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -44,6 +44,8 @@ def check_bind_error(e, address, bind_addresses):
         bind_addresses (list): Addresses on which the service listens.
     """
     if address == "0.0.0.0" and "::" in bind_addresses:
-        logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
+        logger.warning(
+            "Failed to listen on 0.0.0.0, continuing because listening on [::]"
+        )
     else:
         raise e
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 767b87d2db..02b900f382 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -94,7 +94,7 @@ class AppserviceServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -103,7 +103,7 @@ class AppserviceServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index dbcc414c42..dadb487d5f 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -153,7 +153,7 @@ class ClientReaderServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -162,7 +162,7 @@ class ClientReaderServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index f20d810ece..d110599a35 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -147,7 +147,7 @@ class EventCreatorServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -156,7 +156,7 @@ class EventCreatorServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 1ef027a88c..418c086254 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -132,7 +132,7 @@ class FederationReaderServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -141,7 +141,7 @@ class FederationReaderServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 04fbb407af..139221ad34 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 9504bfbc70..e647459d0e 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -204,7 +204,7 @@ class FrontendProxyServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -213,7 +213,7 @@ class FrontendProxyServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index eb54f56853..8997c1f9e7 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -282,7 +282,7 @@ class SynapseHomeServer(HomeServer):
                     reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -291,7 +291,7 @@ class SynapseHomeServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
     def run_startup_checks(self, db_conn, database_engine):
         all_users_native = are_all_users_on_domain(
@@ -569,7 +569,7 @@ def run(hs):
                 hs.config.report_stats_endpoint, stats
             )
         except Exception as e:
-            logger.warn("Error reporting stats: %s", e)
+            logger.warning("Error reporting stats: %s", e)
 
     def performance_stats_init():
         try:
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 6bc7202f33..2c6dd3ef02 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -120,7 +120,7 @@ class MediaRepositoryServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -129,7 +129,7 @@ class MediaRepositoryServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index d84732ee3c..01a5ffc363 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -114,7 +114,7 @@ class PusherServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -123,7 +123,7 @@ class PusherServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 6a7e2fa707..b14da09f47 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -326,7 +326,7 @@ class SynchrotronServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -335,7 +335,7 @@ class SynchrotronServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index a5d6dc7915..6cb100319f 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer):
                 )
             elif listener["type"] == "metrics":
                 if not self.get_config().enable_metrics:
-                    logger.warn(
+                    logger.warning(
                         (
                             "Metrics listener configured, but "
                             "enable_metrics is not True!"
@@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer):
                 else:
                     _base.listen_metrics(listener["bind_addresses"], listener["port"])
             else:
-                logger.warn("Unrecognized listener type: %s", listener["type"])
+                logger.warning("Unrecognized listener type: %s", listener["type"])
 
         self.get_tcp_replication().start_replication(self)
 
diff --git a/synapse/config/key.py b/synapse/config/key.py
index ec5d430afb..52ff1b2621 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -125,7 +125,7 @@ class KeyConfig(Config):
 
         # if neither trusted_key_servers nor perspectives are given, use the default.
         if "perspectives" not in config and "trusted_key_servers" not in config:
-            logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
+            logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
             key_servers = [{"server_name": "matrix.org"}]
         else:
             key_servers = config.get("trusted_key_servers", [])
@@ -156,7 +156,7 @@ class KeyConfig(Config):
         if not self.macaroon_secret_key:
             # Unfortunately, there are people out there that don't have this
             # set. Lets just be "nice" and derive one from their secret key.
-            logger.warn("Config is missing macaroon_secret_key")
+            logger.warning("Config is missing macaroon_secret_key")
             seed = bytes(self.signing_key[0])
             self.macaroon_secret_key = hashlib.sha256(seed).digest()
 
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index be92e33f93..2d2c1e54df 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -182,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None):
     logger = logging.getLogger("")
 
     if not log_config:
-        logger.warn("Reloaded a blank config?")
+        logger.warning("Reloaded a blank config?")
 
     logging.config.dictConfig(log_config)
 
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index e7b722547b..ec3243b27b 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -77,7 +77,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
     if auth_events is None:
         # Oh, we don't know what the state of the room was, so we
         # are trusting that this is allowed (at least for now)
-        logger.warn("Trusting event: %s", event.event_id)
+        logger.warning("Trusting event: %s", event.event_id)
         return
 
     if event.type == EventTypes.Create:
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 223aace0d9..0e22183280 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -102,7 +102,7 @@ class FederationBase(object):
                     pass
 
             if not res:
-                logger.warn(
+                logger.warning(
                     "Failed to find copy of %s with valid signature", pdu.event_id
                 )
 
@@ -173,7 +173,7 @@ class FederationBase(object):
                     return redacted_event
 
                 if self.spam_checker.check_event_for_spam(pdu):
-                    logger.warn(
+                    logger.warning(
                         "Event contains spam, redacting %s: %s",
                         pdu.event_id,
                         pdu.get_pdu_json(),
@@ -185,7 +185,7 @@ class FederationBase(object):
         def errback(failure, pdu):
             failure.trap(SynapseError)
             with PreserveLoggingContext(ctx):
-                logger.warn(
+                logger.warning(
                     "Signature check failed for %s: %s",
                     pdu.event_id,
                     failure.getErrorMessage(),
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index f5c1632916..595706d01a 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -522,12 +522,12 @@ class FederationClient(FederationBase):
                 res = yield callback(destination)
                 return res
             except InvalidResponseError as e:
-                logger.warn("Failed to %s via %s: %s", description, destination, e)
+                logger.warning("Failed to %s via %s: %s", description, destination, e)
             except HttpResponseException as e:
                 if not 500 <= e.code < 600:
                     raise e.to_synapse_error()
                 else:
-                    logger.warn(
+                    logger.warning(
                         "Failed to %s via %s: %i %s",
                         description,
                         destination,
@@ -535,7 +535,9 @@ class FederationClient(FederationBase):
                         e.args[0],
                     )
             except Exception:
-                logger.warn("Failed to %s via %s", description, destination, exc_info=1)
+                logger.warning(
+                    "Failed to %s via %s", description, destination, exc_info=1
+                )
 
         raise SynapseError(502, "Failed to %s via any server" % (description,))
 
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d5a19764d2..d942d77a72 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -220,7 +220,7 @@ class FederationServer(FederationBase):
             try:
                 await self.check_server_matches_acl(origin_host, room_id)
             except AuthError as e:
-                logger.warn("Ignoring PDUs for room %s from banned server", room_id)
+                logger.warning("Ignoring PDUs for room %s from banned server", room_id)
                 for pdu in pdus_by_room[room_id]:
                     event_id = pdu.event_id
                     pdu_results[event_id] = e.error_dict()
@@ -233,7 +233,7 @@ class FederationServer(FederationBase):
                         await self._handle_received_pdu(origin, pdu)
                         pdu_results[event_id] = {}
                     except FederationError as e:
-                        logger.warn("Error handling PDU %s: %s", event_id, e)
+                        logger.warning("Error handling PDU %s: %s", event_id, e)
                         pdu_results[event_id] = {"error": str(e)}
                     except Exception as e:
                         f = failure.Failure()
@@ -333,7 +333,9 @@ class FederationServer(FederationBase):
 
         room_version = await self.store.get_room_version(room_id)
         if room_version not in supported_versions:
-            logger.warn("Room version %s not in %s", room_version, supported_versions)
+            logger.warning(
+                "Room version %s not in %s", room_version, supported_versions
+            )
             raise IncompatibleRoomVersionError(room_version=room_version)
 
         pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
@@ -679,7 +681,7 @@ def server_matches_acl_event(server_name, acl_event):
     # server name is a literal IP
     allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
     if not isinstance(allow_ip_literals, bool):
-        logger.warn("Ignorning non-bool allow_ip_literals flag")
+        logger.warning("Ignorning non-bool allow_ip_literals flag")
         allow_ip_literals = True
     if not allow_ip_literals:
         # check for ipv6 literals. These start with '['.
@@ -693,7 +695,7 @@ def server_matches_acl_event(server_name, acl_event):
     # next,  check the deny list
     deny = acl_event.content.get("deny", [])
     if not isinstance(deny, (list, tuple)):
-        logger.warn("Ignorning non-list deny ACL %s", deny)
+        logger.warning("Ignorning non-list deny ACL %s", deny)
         deny = []
     for e in deny:
         if _acl_entry_matches(server_name, e):
@@ -703,7 +705,7 @@ def server_matches_acl_event(server_name, acl_event):
     # then the allow list.
     allow = acl_event.content.get("allow", [])
     if not isinstance(allow, (list, tuple)):
-        logger.warn("Ignorning non-list allow ACL %s", allow)
+        logger.warning("Ignorning non-list allow ACL %s", allow)
         allow = []
     for e in allow:
         if _acl_entry_matches(server_name, e):
@@ -717,7 +719,7 @@ def server_matches_acl_event(server_name, acl_event):
 
 def _acl_entry_matches(server_name, acl_entry):
     if not isinstance(acl_entry, six.string_types):
-        logger.warn(
+        logger.warning(
             "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
         )
         return False
@@ -772,7 +774,7 @@ class FederationHandlerRegistry(object):
     async def on_edu(self, edu_type, origin, content):
         handler = self.edu_handlers.get(edu_type)
         if not handler:
-            logger.warn("No handler registered for EDU type %s", edu_type)
+            logger.warning("No handler registered for EDU type %s", edu_type)
 
         with start_active_span_from_edu(content, "handle_edu"):
             try:
@@ -785,7 +787,7 @@ class FederationHandlerRegistry(object):
     def on_query(self, query_type, args):
         handler = self.query_handlers.get(query_type)
         if not handler:
-            logger.warn("No handler registered for query type %s", query_type)
+            logger.warning("No handler registered for query type %s", query_type)
             raise NotFoundError("No handler for Query type '%s'" % (query_type,))
 
         return handler(args)
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 5b6c79c51a..67b3e1ab6e 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -146,7 +146,7 @@ class TransactionManager(object):
             if code == 200:
                 for e_id, r in response.get("pdus", {}).items():
                     if "error" in r:
-                        logger.warn(
+                        logger.warning(
                             "TX [%s] {%s} Remote returned error for %s: %s",
                             destination,
                             txn_id,
@@ -155,7 +155,7 @@ class TransactionManager(object):
                         )
             else:
                 for p in pdus:
-                    logger.warn(
+                    logger.warning(
                         "TX [%s] {%s} Failed to send event %s",
                         destination,
                         txn_id,
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 0f16f21c2d..d6c23f22bd 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -202,7 +202,7 @@ def _parse_auth_header(header_bytes):
         sig = strip_quotes(param_dict["sig"])
         return origin, key, sig
     except Exception as e:
-        logger.warn(
+        logger.warning(
             "Error parsing auth header '%s': %s",
             header_bytes.decode("ascii", "replace"),
             e,
@@ -287,10 +287,12 @@ class BaseFederationServlet(object):
             except NoAuthenticationError:
                 origin = None
                 if self.REQUIRE_AUTH:
-                    logger.warn("authenticate_request failed: missing authentication")
+                    logger.warning(
+                        "authenticate_request failed: missing authentication"
+                    )
                     raise
             except Exception as e:
-                logger.warn("authenticate_request failed: %s", e)
+                logger.warning("authenticate_request failed: %s", e)
                 raise
 
             request_tags = {
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index dfd7ae041b..d950a8b246 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -181,7 +181,7 @@ class GroupAttestionRenewer(object):
                 elif not self.is_mine_id(user_id):
                     destination = get_domain_from_id(user_id)
                 else:
-                    logger.warn(
+                    logger.warning(
                         "Incorrectly trying to do attestations for user: %r in %r",
                         user_id,
                         group_id,
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 8f10b6adbb..29e8ffc295 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -488,7 +488,7 @@ class GroupsServerHandler(object):
                 profile = yield self.profile_handler.get_profile_from_cache(user_id)
                 user_profile.update(profile)
             except Exception as e:
-                logger.warn("Error getting profile for %s: %s", user_id, e)
+                logger.warning("Error getting profile for %s: %s", user_id, e)
             user_profiles.append(user_profile)
 
         return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 333eb30625..7a0f54ca24 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -525,7 +525,7 @@ class AuthHandler(BaseHandler):
 
         result = None
         if not user_infos:
-            logger.warn("Attempted to login as %s but they do not exist", user_id)
+            logger.warning("Attempted to login as %s but they do not exist", user_id)
         elif len(user_infos) == 1:
             # a single match (possibly not exact)
             result = user_infos.popitem()
@@ -534,7 +534,7 @@ class AuthHandler(BaseHandler):
             result = (user_id, user_infos[user_id])
         else:
             # multiple matches, none of them exact
-            logger.warn(
+            logger.warning(
                 "Attempted to login as %s but it matches more than one user "
                 "inexactly: %r",
                 user_id,
@@ -728,7 +728,7 @@ class AuthHandler(BaseHandler):
 
         result = yield self.validate_hash(password, password_hash)
         if not result:
-            logger.warn("Failed password login for user %s", user_id)
+            logger.warning("Failed password login for user %s", user_id)
             return None
         return user_id
 
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 5f23ee4488..befef2cf3d 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -656,7 +656,7 @@ class DeviceListUpdater(object):
         except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
             # TODO: Remember that we are now out of sync and try again
             # later
-            logger.warn("Failed to handle device list update for %s", user_id)
+            logger.warning("Failed to handle device list update for %s", user_id)
             # We abort on exceptions rather than accepting the update
             # as otherwise synapse will 'forget' that its device list
             # is out of date. If we bail then we will retry the resync
@@ -694,7 +694,7 @@ class DeviceListUpdater(object):
         # up on storing the total list of devices and only handle the
         # delta instead.
         if len(devices) > 1000:
-            logger.warn(
+            logger.warning(
                 "Ignoring device list snapshot for %s as it has >1K devs (%d)",
                 user_id,
                 len(devices),
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 0043cbea17..73b9e120f5 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -52,7 +52,7 @@ class DeviceMessageHandler(object):
         local_messages = {}
         sender_user_id = content["sender"]
         if origin != get_domain_from_id(sender_user_id):
-            logger.warn(
+            logger.warning(
                 "Dropping device message from %r with spoofed sender %r",
                 origin,
                 sender_user_id,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 08276fdebf..f1547e3039 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -181,7 +181,7 @@ class FederationHandler(BaseHandler):
         try:
             self._sanity_check_event(pdu)
         except SynapseError as err:
-            logger.warn(
+            logger.warning(
                 "[%s %s] Received event failed sanity checks", room_id, event_id
             )
             raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
@@ -302,7 +302,7 @@ class FederationHandler(BaseHandler):
                 # following.
 
                 if sent_to_us_directly:
-                    logger.warn(
+                    logger.warning(
                         "[%s %s] Rejecting: failed to fetch %d prev events: %s",
                         room_id,
                         event_id,
@@ -406,7 +406,7 @@ class FederationHandler(BaseHandler):
                     state = [event_map[e] for e in six.itervalues(state_map)]
                     auth_chain = list(auth_chains)
                 except Exception:
-                    logger.warn(
+                    logger.warning(
                         "[%s %s] Error attempting to resolve state at missing "
                         "prev_events",
                         room_id,
@@ -519,7 +519,9 @@ class FederationHandler(BaseHandler):
             # We failed to get the missing events, but since we need to handle
             # the case of `get_missing_events` not returning the necessary
             # events anyway, it is safe to simply log the error and continue.
-            logger.warn("[%s %s]: Failed to get prev_events: %s", room_id, event_id, e)
+            logger.warning(
+                "[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
+            )
             return
 
         logger.info(
@@ -546,7 +548,7 @@ class FederationHandler(BaseHandler):
                     yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
                 except FederationError as e:
                     if e.code == 403:
-                        logger.warn(
+                        logger.warning(
                             "[%s %s] Received prev_event %s failed history check.",
                             room_id,
                             event_id,
@@ -1060,7 +1062,7 @@ class FederationHandler(BaseHandler):
             SynapseError if the event does not pass muster
         """
         if len(ev.prev_event_ids()) > 20:
-            logger.warn(
+            logger.warning(
                 "Rejecting event %s which has %i prev_events",
                 ev.event_id,
                 len(ev.prev_event_ids()),
@@ -1068,7 +1070,7 @@ class FederationHandler(BaseHandler):
             raise SynapseError(http_client.BAD_REQUEST, "Too many prev_events")
 
         if len(ev.auth_event_ids()) > 10:
-            logger.warn(
+            logger.warning(
                 "Rejecting event %s which has %i auth_events",
                 ev.event_id,
                 len(ev.auth_event_ids()),
@@ -1204,7 +1206,7 @@ class FederationHandler(BaseHandler):
                 with nested_logging_context(p.event_id):
                     yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
             except Exception as e:
-                logger.warn(
+                logger.warning(
                     "Error handling queued PDU %s from %s: %s", p.event_id, origin, e
                 )
 
@@ -1251,7 +1253,7 @@ class FederationHandler(BaseHandler):
                 builder=builder
             )
         except AuthError as e:
-            logger.warn("Failed to create join to %s because %s", room_id, e)
+            logger.warning("Failed to create join to %s because %s", room_id, e)
             raise e
 
         event_allowed = yield self.third_party_event_rules.check_event_allowed(
@@ -1495,7 +1497,7 @@ class FederationHandler(BaseHandler):
                 room_version, event, context, do_sig_check=False
             )
         except AuthError as e:
-            logger.warn("Failed to create new leave %r because %s", event, e)
+            logger.warning("Failed to create new leave %r because %s", event, e)
             raise e
 
         return event
@@ -1789,7 +1791,7 @@ class FederationHandler(BaseHandler):
                 # cause SynapseErrors in auth.check. We don't want to give up
                 # the attempt to federate altogether in such cases.
 
-                logger.warn("Rejecting %s because %s", e.event_id, err.msg)
+                logger.warning("Rejecting %s because %s", e.event_id, err.msg)
 
                 if e == event:
                     raise
@@ -1845,7 +1847,9 @@ class FederationHandler(BaseHandler):
         try:
             yield self.do_auth(origin, event, context, auth_events=auth_events)
         except AuthError as e:
-            logger.warn("[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg)
+            logger.warning(
+                "[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg
+            )
 
             context.rejected = RejectedReason.AUTH_ERROR
 
@@ -1939,7 +1943,7 @@ class FederationHandler(BaseHandler):
             try:
                 event_auth.check(room_version, event, auth_events=current_auth_events)
             except AuthError as e:
-                logger.warn("Soft-failing %r because %s", event, e)
+                logger.warning("Soft-failing %r because %s", event, e)
                 event.internal_metadata.soft_failed = True
 
     @defer.inlineCallbacks
@@ -2038,7 +2042,7 @@ class FederationHandler(BaseHandler):
         try:
             event_auth.check(room_version, event, auth_events=auth_events)
         except AuthError as e:
-            logger.warn("Failed auth resolution for %r because %s", event, e)
+            logger.warning("Failed auth resolution for %r because %s", event, e)
             raise e
 
     @defer.inlineCallbacks
@@ -2432,7 +2436,7 @@ class FederationHandler(BaseHandler):
             try:
                 yield self.auth.check_from_context(room_version, event, context)
             except AuthError as e:
-                logger.warn("Denying new third party invite %r because %s", event, e)
+                logger.warning("Denying new third party invite %r because %s", event, e)
                 raise e
 
             yield self._check_signature(event, context)
@@ -2488,7 +2492,7 @@ class FederationHandler(BaseHandler):
         try:
             yield self.auth.check_from_context(room_version, event, context)
         except AuthError as e:
-            logger.warn("Denying third party invite %r because %s", event, e)
+            logger.warning("Denying third party invite %r because %s", event, e)
             raise e
         yield self._check_signature(event, context)
 
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 46eb9ee88b..92fecbfc44 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -392,7 +392,7 @@ class GroupsLocalHandler(object):
         try:
             user_profile = yield self.profile_handler.get_profile(user_id)
         except Exception as e:
-            logger.warn("No profile for user %s: %s", user_id, e)
+            logger.warning("No profile for user %s: %s", user_id, e)
             user_profile = {}
 
         return {"state": "invite", "user_profile": user_profile}
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index ba99ddf76d..000fbf090f 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -272,7 +272,7 @@ class IdentityHandler(BaseHandler):
             changed = False
             if e.code in (400, 404, 501):
                 # The remote server probably doesn't support unbinding (yet)
-                logger.warn("Received %d response while unbinding threepid", e.code)
+                logger.warning("Received %d response while unbinding threepid", e.code)
             else:
                 logger.error("Failed to unbind threepid on identity server: %s", e)
                 raise SynapseError(500, "Failed to contact identity server")
@@ -403,7 +403,7 @@ class IdentityHandler(BaseHandler):
 
         if self.hs.config.using_identity_server_from_trusted_list:
             # Warn that a deprecated config option is in use
-            logger.warn(
+            logger.warning(
                 'The config option "trust_identity_server_for_password_resets" '
                 'has been replaced by "account_threepid_delegate". '
                 "Please consult the sample config at docs/sample_config.yaml for "
@@ -457,7 +457,7 @@ class IdentityHandler(BaseHandler):
 
         if self.hs.config.using_identity_server_from_trusted_list:
             # Warn that a deprecated config option is in use
-            logger.warn(
+            logger.warning(
                 'The config option "trust_identity_server_for_password_resets" '
                 'has been replaced by "account_threepid_delegate". '
                 "Please consult the sample config at docs/sample_config.yaml for "
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 7908a2d52c..5698e5fee0 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -688,7 +688,7 @@ class EventCreationHandler(object):
         try:
             yield self.auth.check_from_context(room_version, event, context)
         except AuthError as err:
-            logger.warn("Denying new event %r because %s", event, err)
+            logger.warning("Denying new event %r because %s", event, err)
             raise err
 
         # Ensure that we can round trip before trying to persist in db
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 8690f69d45..22e0a04da4 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -275,7 +275,7 @@ class BaseProfileHandler(BaseHandler):
                     ratelimit=False,  # Try to hide that these events aren't atomic.
                 )
             except Exception as e:
-                logger.warn(
+                logger.warning(
                     "Failed to update join event for room %s - %s", room_id, str(e)
                 )
 
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 2816bd8f87..445a08f445 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -922,7 +922,7 @@ class RoomEventSource(object):
 
         from_token = RoomStreamToken.parse(from_key)
         if from_token.topological:
-            logger.warn("Stream has topological part!!!! %r", from_key)
+            logger.warning("Stream has topological part!!!! %r", from_key)
             from_key = "s%s" % (from_token.stream,)
 
         app_service = self.store.get_app_service_by_user_id(user.to_string())
diff --git a/synapse/http/client.py b/synapse/http/client.py
index cdf828a4ff..2df5b383b5 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -535,7 +535,7 @@ class SimpleHttpClient(object):
             b"Content-Length" in resp_headers
             and int(resp_headers[b"Content-Length"][0]) > max_size
         ):
-            logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
+            logger.warning("Requested URL is too large > %r bytes" % (self.max_size,))
             raise SynapseError(
                 502,
                 "Requested file is too large > %r bytes" % (self.max_size,),
@@ -543,7 +543,7 @@ class SimpleHttpClient(object):
             )
 
         if response.code > 299:
-            logger.warn("Got %d when downloading %s" % (response.code, url))
+            logger.warning("Got %d when downloading %s" % (response.code, url))
             raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
 
         # TODO: if our Content-Type is HTML or something, just read the first
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index 3fe4ffb9e5..021b233a7d 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -148,7 +148,7 @@ class SrvResolver(object):
             # Try something in the cache, else rereaise
             cache_entry = self._cache.get(service_name, None)
             if cache_entry:
-                logger.warn(
+                logger.warning(
                     "Failed to resolve %r, falling back to cache. %r", service_name, e
                 )
                 return list(cache_entry)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 3f7c93ffcb..691380abda 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -149,7 +149,7 @@ def _handle_json_response(reactor, timeout_sec, request, response):
 
         body = yield make_deferred_yieldable(d)
     except Exception as e:
-        logger.warn(
+        logger.warning(
             "{%s} [%s] Error reading response: %s",
             request.txn_id,
             request.destination,
@@ -457,7 +457,7 @@ class MatrixFederationHttpClient(object):
                         except Exception as e:
                             # Eh, we're already going to raise an exception so lets
                             # ignore if this fails.
-                            logger.warn(
+                            logger.warning(
                                 "{%s} [%s] Failed to get error response: %s %s: %s",
                                 request.txn_id,
                                 request.destination,
@@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object):
 
                     break
                 except RequestSendFailed as e:
-                    logger.warn(
+                    logger.warning(
                         "{%s} [%s] Request failed: %s %s: %s",
                         request.txn_id,
                         request.destination,
@@ -513,7 +513,7 @@ class MatrixFederationHttpClient(object):
                         raise
 
                 except Exception as e:
-                    logger.warn(
+                    logger.warning(
                         "{%s} [%s] Request failed: %s %s: %s",
                         request.txn_id,
                         request.destination,
@@ -889,7 +889,7 @@ class MatrixFederationHttpClient(object):
             d.addTimeout(self.default_timeout, self.reactor)
             length = yield make_deferred_yieldable(d)
         except Exception as e:
-            logger.warn(
+            logger.warning(
                 "{%s} [%s] Error reading response: %s",
                 request.txn_id,
                 request.destination,
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index 46af27c8f6..58f9cc61c8 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -170,7 +170,7 @@ class RequestMetrics(object):
             tag = context.tag
 
             if context != self.start_context:
-                logger.warn(
+                logger.warning(
                     "Context have unexpectedly changed %r, %r",
                     context,
                     self.start_context,
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 2ccb210fd6..943d12c907 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -454,7 +454,7 @@ def respond_with_json(
     # the Deferred fires, but since the flag is RIGHT THERE it seems like
     # a waste.
     if request._disconnected:
-        logger.warn(
+        logger.warning(
             "Not sending response to request %s, already disconnected.", request
         )
         return
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 274c1a6a87..e9a5e46ced 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -219,13 +219,13 @@ def parse_json_value_from_request(request, allow_empty_body=False):
     try:
         content_unicode = content_bytes.decode("utf8")
     except UnicodeDecodeError:
-        logger.warn("Unable to decode UTF-8")
+        logger.warning("Unable to decode UTF-8")
         raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
 
     try:
         content = json.loads(content_unicode)
     except Exception as e:
-        logger.warn("Unable to parse JSON: %s", e)
+        logger.warning("Unable to parse JSON: %s", e)
         raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
 
     return content
diff --git a/synapse/http/site.py b/synapse/http/site.py
index df5274c177..ff8184a3d0 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -199,7 +199,7 @@ class SynapseRequest(Request):
         # It's useful to log it here so that we can get an idea of when
         # the client disconnects.
         with PreserveLoggingContext(self.logcontext):
-            logger.warn(
+            logger.warning(
                 "Error processing request %r: %s %s", self, reason.type, reason.value
             )
 
@@ -305,7 +305,7 @@ class SynapseRequest(Request):
         try:
             self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
         except Exception as e:
-            logger.warn("Failed to stop metrics: %r", e)
+            logger.warning("Failed to stop metrics: %r", e)
 
 
 class XForwardedForRequest(SynapseRequest):
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 370000e377..2c1fb9ddac 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -294,7 +294,7 @@ class LoggingContext(object):
         """Enters this logging context into thread local storage"""
         old_context = self.set_current_context(self)
         if self.previous_context != old_context:
-            logger.warn(
+            logger.warning(
                 "Expected previous context %r, found %r",
                 self.previous_context,
                 old_context,
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 6299587808..23d3678420 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -246,7 +246,7 @@ class HttpPusher(object):
                     # we really only give up so that if the URL gets
                     # fixed, we don't suddenly deliver a load
                     # of old notifications.
-                    logger.warn(
+                    logger.warning(
                         "Giving up on a notification to user %s, " "pushkey %s",
                         self.user_id,
                         self.pushkey,
@@ -299,7 +299,7 @@ class HttpPusher(object):
                 if pk != self.pushkey:
                     # for sanity, we only remove the pushkey if it
                     # was the one we actually sent...
-                    logger.warn(
+                    logger.warning(
                         ("Ignoring rejected pushkey %s because we" " didn't send it"),
                         pk,
                     )
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
index 5ed9147de4..b1587183a8 100644
--- a/synapse/push/push_rule_evaluator.py
+++ b/synapse/push/push_rule_evaluator.py
@@ -117,7 +117,7 @@ class PushRuleEvaluatorForEvent(object):
                 pattern = UserID.from_string(user_id).localpart
 
         if not pattern:
-            logger.warn("event_match condition with no pattern")
+            logger.warning("event_match condition with no pattern")
             return False
 
         # XXX: optimisation: cache our pattern regexps
@@ -173,7 +173,7 @@ def _glob_matches(glob, value, word_boundary=False):
             regex_cache[(glob, word_boundary)] = r
         return r.search(value)
     except re.error:
-        logger.warn("Failed to parse glob to regex: %r", glob)
+        logger.warning("Failed to parse glob to regex: %r", glob)
         return False
 
 
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 9be37cd998..c8056b0c0c 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -180,7 +180,7 @@ class ReplicationEndpoint(object):
                         if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
                             raise
 
-                    logger.warn("%s request timed out", cls.NAME)
+                    logger.warning("%s request timed out", cls.NAME)
 
                     # If we timed out we probably don't need to worry about backing
                     # off too much, but lets just wait a little anyway.
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index b5f5f13a62..cc1f249740 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -144,7 +144,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
             # The 'except' clause is very broad, but we need to
             # capture everything from DNS failures upwards
             #
-            logger.warn("Failed to reject invite: %s", e)
+            logger.warning("Failed to reject invite: %s", e)
 
             await self.store.locally_reject_invite(user_id, room_id)
             ret = {}
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index a44ceb00e7..563ce0fc53 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -168,7 +168,7 @@ class ReplicationClientHandler(object):
         if self.connection:
             self.connection.send_command(cmd)
         else:
-            logger.warn("Queuing command as not connected: %r", cmd.NAME)
+            logger.warning("Queuing command as not connected: %r", cmd.NAME)
             self.pending_commands.append(cmd)
 
     def send_federation_ack(self, token):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 5ffdf2675d..b64f3f44b5 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -249,7 +249,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         return handler(cmd)
 
     def close(self):
-        logger.warn("[%s] Closing connection", self.id())
+        logger.warning("[%s] Closing connection", self.id())
         self.time_we_closed = self.clock.time_msec()
         self.transport.loseConnection()
         self.on_connection_closed()
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 939418ee2b..5c2a2eb593 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -286,7 +286,7 @@ class PurgeHistoryRestServlet(RestServlet):
                 room_id, stream_ordering
             )
             if not r:
-                logger.warn(
+                logger.warning(
                     "[purge] purging events not possible: No event found "
                     "(received_ts %i => stream_ordering %i)",
                     ts,
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 8414af08cb..39a5c5e9de 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -221,7 +221,7 @@ class LoginRestServlet(RestServlet):
                 medium, address
             )
             if not user_id:
-                logger.warn(
+                logger.warning(
                     "unknown 3pid identifier medium %s, address %r", medium, address
                 )
                 raise LoginError(403, "", errcode=Codes.FORBIDDEN)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 80cf7126a0..332d7138b1 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -71,7 +71,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "User password resets have been disabled due to lack of email config"
                 )
             raise SynapseError(
@@ -162,7 +162,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
             )
         if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "Password reset emails have been disabled due to lack of an email config"
                 )
             raise SynapseError(
@@ -183,7 +183,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
             # Perform a 302 redirect if next_link is set
             if next_link:
                 if next_link.startswith("file:///"):
-                    logger.warn(
+                    logger.warning(
                         "Not redirecting to next_link as it is a local file: address"
                     )
                 else:
@@ -350,7 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "Adding emails have been disabled due to lack of an email config"
                 )
             raise SynapseError(
@@ -441,7 +441,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
             raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
 
         if not self.hs.config.account_threepid_delegate_msisdn:
-            logger.warn(
+            logger.warning(
                 "No upstream msisdn account_threepid_delegate configured on the server to "
                 "handle this request"
             )
@@ -488,7 +488,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
     def on_GET(self, request):
         if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "Adding emails have been disabled due to lack of an email config"
                 )
             raise SynapseError(
@@ -515,7 +515,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
             # Perform a 302 redirect if next_link is set
             if next_link:
                 if next_link.startswith("file:///"):
-                    logger.warn(
+                    logger.warning(
                         "Not redirecting to next_link as it is a local file: address"
                     )
                 else:
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 4f24a124a6..6c7d25d411 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -106,7 +106,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
     def on_POST(self, request):
         if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "Email registration has been disabled due to lack of email config"
                 )
             raise SynapseError(
@@ -207,7 +207,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
             )
 
         if not self.hs.config.account_threepid_delegate_msisdn:
-            logger.warn(
+            logger.warning(
                 "No upstream msisdn account_threepid_delegate configured on the server to "
                 "handle this request"
             )
@@ -266,7 +266,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
             )
         if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
             if self.config.local_threepid_handling_disabled_due_to_email_config:
-                logger.warn(
+                logger.warning(
                     "User registration via email has been disabled due to lack of email config"
                 )
             raise SynapseError(
@@ -287,7 +287,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
             # Perform a 302 redirect if next_link is set
             if next_link:
                 if next_link.startswith("file:///"):
-                    logger.warn(
+                    logger.warning(
                         "Not redirecting to next_link as it is a local file: address"
                     )
                 else:
@@ -480,7 +480,7 @@ class RegisterRestServlet(RestServlet):
             # a password to work around a client bug where it sent
             # the 'initial_device_display_name' param alone, wiping out
             # the original registration params
-            logger.warn("Ignoring initial_device_display_name without password")
+            logger.warning("Ignoring initial_device_display_name without password")
             del body["initial_device_display_name"]
 
         session_id = self.auth_handler.get_session_id(body)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 541a6b0e10..ccd8b17b23 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -394,7 +394,7 @@ class SyncRestServlet(RestServlet):
             # We've had bug reports that events were coming down under the
             # wrong room.
             if event.room_id != room.room_id:
-                logger.warn(
+                logger.warning(
                     "Event %r is under room %r instead of %r",
                     event.event_id,
                     room.room_id,
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index b972e152a9..bd9186fe50 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -363,7 +363,7 @@ class MediaRepository(object):
                     },
                 )
             except RequestSendFailed as e:
-                logger.warn(
+                logger.warning(
                     "Request failed fetching remote media %s/%s: %r",
                     server_name,
                     media_id,
@@ -372,7 +372,7 @@ class MediaRepository(object):
                 raise SynapseError(502, "Failed to fetch remote media")
 
             except HttpResponseException as e:
-                logger.warn(
+                logger.warning(
                     "HTTP error fetching remote media %s/%s: %s",
                     server_name,
                     media_id,
@@ -383,10 +383,12 @@ class MediaRepository(object):
                 raise SynapseError(502, "Failed to fetch remote media")
 
             except SynapseError:
-                logger.warn("Failed to fetch remote media %s/%s", server_name, media_id)
+                logger.warning(
+                    "Failed to fetch remote media %s/%s", server_name, media_id
+                )
                 raise
             except NotRetryingDestination:
-                logger.warn("Not retrying destination %r", server_name)
+                logger.warning("Not retrying destination %r", server_name)
                 raise SynapseError(502, "Failed to fetch remote media")
             except Exception:
                 logger.exception(
@@ -691,7 +693,7 @@ class MediaRepository(object):
                 try:
                     os.remove(full_path)
                 except OSError as e:
-                    logger.warn("Failed to remove file: %r", full_path)
+                    logger.warning("Failed to remove file: %r", full_path)
                     if e.errno == errno.ENOENT:
                         pass
                     else:
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 094ebad770..5a25b6b3fc 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -136,7 +136,7 @@ class PreviewUrlResource(DirectServeResource):
                         match = False
                         continue
             if match:
-                logger.warn("URL %s blocked by url_blacklist entry %s", url, entry)
+                logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
                 raise SynapseError(
                     403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
                 )
@@ -208,7 +208,7 @@ class PreviewUrlResource(DirectServeResource):
                 og["og:image:width"] = dims["width"]
                 og["og:image:height"] = dims["height"]
             else:
-                logger.warn("Couldn't get dims for %s" % url)
+                logger.warning("Couldn't get dims for %s" % url)
 
             # define our OG response for this media
         elif _is_html(media_info["media_type"]):
@@ -256,7 +256,7 @@ class PreviewUrlResource(DirectServeResource):
                         og["og:image:width"] = dims["width"]
                         og["og:image:height"] = dims["height"]
                     else:
-                        logger.warn("Couldn't get dims for %s", og["og:image"])
+                        logger.warning("Couldn't get dims for %s", og["og:image"])
 
                     og["og:image"] = "mxc://%s/%s" % (
                         self.server_name,
@@ -267,7 +267,7 @@ class PreviewUrlResource(DirectServeResource):
                 else:
                     del og["og:image"]
         else:
-            logger.warn("Failed to find any OG data in %s", url)
+            logger.warning("Failed to find any OG data in %s", url)
             og = {}
 
         logger.debug("Calculated OG for %s as %s", url, og)
@@ -319,7 +319,7 @@ class PreviewUrlResource(DirectServeResource):
                 )
             except Exception as e:
                 # FIXME: pass through 404s and other error messages nicely
-                logger.warn("Error downloading %s: %r", url, e)
+                logger.warning("Error downloading %s: %r", url, e)
 
                 raise SynapseError(
                     500,
@@ -400,7 +400,7 @@ class PreviewUrlResource(DirectServeResource):
             except OSError as e:
                 # If the path doesn't exist, meh
                 if e.errno != errno.ENOENT:
-                    logger.warn("Failed to remove media: %r: %s", media_id, e)
+                    logger.warning("Failed to remove media: %r: %s", media_id, e)
                     continue
 
             removed_media.append(media_id)
@@ -432,7 +432,7 @@ class PreviewUrlResource(DirectServeResource):
             except OSError as e:
                 # If the path doesn't exist, meh
                 if e.errno != errno.ENOENT:
-                    logger.warn("Failed to remove media: %r: %s", media_id, e)
+                    logger.warning("Failed to remove media: %r: %s", media_id, e)
                     continue
 
             try:
@@ -448,7 +448,7 @@ class PreviewUrlResource(DirectServeResource):
             except OSError as e:
                 # If the path doesn't exist, meh
                 if e.errno != errno.ENOENT:
-                    logger.warn("Failed to remove media: %r: %s", media_id, e)
+                    logger.warning("Failed to remove media: %r: %s", media_id, e)
                     continue
 
             removed_media.append(media_id)
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index 08329884ac..931ce79be8 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -182,7 +182,7 @@ class ThumbnailResource(DirectServeResource):
         if file_path:
             yield respond_with_file(request, desired_type, file_path)
         else:
-            logger.warn("Failed to generate thumbnail")
+            logger.warning("Failed to generate thumbnail")
             respond_404(request)
 
     @defer.inlineCallbacks
@@ -245,7 +245,7 @@ class ThumbnailResource(DirectServeResource):
         if file_path:
             yield respond_with_file(request, desired_type, file_path)
         else:
-            logger.warn("Failed to generate thumbnail")
+            logger.warning("Failed to generate thumbnail")
             respond_404(request)
 
     @defer.inlineCallbacks
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index c0e7f475c9..9fae2e0afe 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -83,7 +83,7 @@ class ResourceLimitsServerNotices(object):
         room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
 
         if not room_id:
-            logger.warn("Failed to get server notices room")
+            logger.warning("Failed to get server notices room")
             return
 
         yield self._check_and_set_tags(user_id, room_id)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index f5906fcd54..1a2b7ebe25 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -494,7 +494,7 @@ class SQLBaseStore(object):
         exception_callbacks = []
 
         if LoggingContext.current_context() == LoggingContext.sentinel:
-            logger.warn("Starting db txn '%s' from sentinel context", desc)
+            logger.warning("Starting db txn '%s' from sentinel context", desc)
 
         try:
             result = yield self.runWithConnection(
@@ -532,7 +532,7 @@ class SQLBaseStore(object):
         """
         parent_context = LoggingContext.current_context()
         if parent_context == LoggingContext.sentinel:
-            logger.warn(
+            logger.warning(
                 "Starting db connection from sentinel context: metrics will be lost"
             )
             parent_context = None
@@ -719,7 +719,7 @@ class SQLBaseStore(object):
                     raise
 
                 # presumably we raced with another transaction: let's retry.
-                logger.warn(
+                logger.warning(
                     "IntegrityError when upserting into %s; retrying: %s", table, e
                 )
 
diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/data_stores/main/pusher.py
index f005c1ae0a..d76861cdc0 100644
--- a/synapse/storage/data_stores/main/pusher.py
+++ b/synapse/storage/data_stores/main/pusher.py
@@ -44,7 +44,7 @@ class PusherWorkerStore(SQLBaseStore):
 
                 r["data"] = json.loads(dataJson)
             except Exception as e:
-                logger.warn(
+                logger.warning(
                     "Invalid JSON in data for pusher %d: %s, %s",
                     r["id"],
                     dataJson,
diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index 0e08497452..a59b8331e1 100644
--- a/synapse/storage/data_stores/main/search.py
+++ b/synapse/storage/data_stores/main/search.py
@@ -196,7 +196,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
                         " ON event_search USING GIN (vector)"
                     )
                 except psycopg2.ProgrammingError as e:
-                    logger.warn(
+                    logger.warning(
                         "Ignoring error %r when trying to switch from GIST to GIN", e
                     )
 
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index b60a604474..5c4de2e69f 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -309,7 +309,7 @@ class Linearizer(object):
                 )
 
             else:
-                logger.warn(
+                logger.warning(
                     "Unexpected exception waiting for linearizer lock %r for key %r",
                     self.name,
                     key,
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 43fd65d693..da5077b471 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -107,7 +107,7 @@ def register_cache(cache_type, cache_name, cache, collect_callback=None):
                 if collect_callback:
                     collect_callback()
             except Exception as e:
-                logger.warn("Error calculating metrics for %s: %s", cache_name, e)
+                logger.warning("Error calculating metrics for %s: %s", cache_name, e)
                 raise
 
             yield GaugeMetricFamily("__unused", "")
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 4b1bcdf23c..3286804322 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -119,7 +119,7 @@ class Measure(object):
         context = LoggingContext.current_context()
 
         if context != self.start_context:
-            logger.warn(
+            logger.warning(
                 "Context has unexpectedly changed from '%s' to '%s'. (%r)",
                 self.start_context,
                 context,
@@ -128,7 +128,7 @@ class Measure(object):
             return
 
         if not context:
-            logger.warn("Expected context. (%r)", self.name)
+            logger.warning("Expected context. (%r)", self.name)
             return
 
         current = context.get_resource_usage()
@@ -140,7 +140,7 @@ class Measure(object):
             block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
             block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
         except ValueError:
-            logger.warn(
+            logger.warning(
                 "Failed to save metrics! OLD: %r, NEW: %r", self.start_usage, current
             )
 
diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py
index 6c0f2bb0cf..207cd17c2a 100644
--- a/synapse/util/rlimit.py
+++ b/synapse/util/rlimit.py
@@ -33,4 +33,4 @@ def change_resource_limit(soft_file_no):
             resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
         )
     except (ValueError, resource.error) as e:
-        logger.warn("Failed to set file or core limit: %s", e)
+        logger.warning("Failed to set file or core limit: %s", e)
-- 
cgit 1.4.1


From b2ff8c305f66b796ba06e9d481f156badf0f31d8 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 31 Oct 2019 11:32:53 +0000
Subject: Newsfile

---
 changelog.d/6307.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6307.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6307.bugfix b/changelog.d/6307.bugfix
new file mode 100644
index 0000000000..f2917c5053
--- /dev/null
+++ b/changelog.d/6307.bugfix
@@ -0,0 +1 @@
+Fix `/purge_room` admin API.
-- 
cgit 1.4.1


From 3a74c03ffb5532831c8412b52a2d682bdeb9f322 Mon Sep 17 00:00:00 2001
From: Travis Ralston 
Date: Thu, 31 Oct 2019 09:16:14 -0600
Subject: Expose some homeserver functionality to spam checkers (#6259)

* Offer the homeserver instance to the spam checker

* Newsfile

* Linting

* Expose a Spam Checker API instead of passing the homeserver object

* Alter changelog

* s/hs/api
---
 changelog.d/6259.misc                |  1 +
 synapse/events/spamcheck.py          | 14 +++++++++-
 synapse/spam_checker_api/__init__.py | 51 ++++++++++++++++++++++++++++++++++++
 3 files changed, 65 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6259.misc
 create mode 100644 synapse/spam_checker_api/__init__.py

(limited to 'changelog.d')

diff --git a/changelog.d/6259.misc b/changelog.d/6259.misc
new file mode 100644
index 0000000000..3ff81b1ac7
--- /dev/null
+++ b/changelog.d/6259.misc
@@ -0,0 +1 @@
+Expose some homeserver functionality to spam checkers.
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 129771f183..5a907718d6 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2017 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,6 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import inspect
+
+from synapse.spam_checker_api import SpamCheckerApi
+
 
 class SpamChecker(object):
     def __init__(self, hs):
@@ -26,7 +31,14 @@ class SpamChecker(object):
             pass
 
         if module is not None:
-            self.spam_checker = module(config=config)
+            # Older spam checkers don't accept the `api` argument, so we
+            # try and detect support.
+            spam_args = inspect.getfullargspec(module)
+            if "api" in spam_args.args:
+                api = SpamCheckerApi(hs)
+                self.spam_checker = module(config=config, api=api)
+            else:
+                self.spam_checker = module(config=config)
 
     def check_event_for_spam(self, event):
         """Checks if a given event is considered "spammy" by this server.
diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py
new file mode 100644
index 0000000000..efcc10f808
--- /dev/null
+++ b/synapse/spam_checker_api/__init__.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+
+from synapse.storage.state import StateFilter
+
+logger = logging.getLogger(__name__)
+
+
+class SpamCheckerApi(object):
+    """A proxy object that gets passed to spam checkers so they can get
+    access to rooms and other relevant information.
+    """
+
+    def __init__(self, hs):
+        self.hs = hs
+
+        self._store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def get_state_events_in_room(self, room_id, types):
+        """Gets state events for the given room.
+
+        Args:
+            room_id (string): The room ID to get state events in.
+            types (tuple): The event type and state key (using None
+                to represent 'any') of the room state to acquire.
+
+        Returns:
+            twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
+                The filtered state events in the room.
+        """
+        state_ids = yield self._store.get_filtered_current_state_ids(
+            room_id=room_id, state_filter=StateFilter.from_types(types)
+        )
+        state = yield self._store.get_events(state_ids.values())
+        return state.values()
-- 
cgit 1.4.1


From 020add50997f697c7847ac84b86b457ba2f3e32d Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Fri, 1 Nov 2019 02:43:24 +1100
Subject: Update black to 19.10b0 (#6304)

* update version of black and also fix the mypy config being overridden
---
 changelog.d/6304.misc                              |  1 +
 contrib/experiments/test_messaging.py              |  4 +--
 mypy.ini                                           | 11 ++++---
 synapse/federation/sender/per_destination_queue.py | 11 ++++---
 synapse/handlers/account_data.py                   |  7 ++--
 synapse/handlers/appservice.py                     |  5 ++-
 synapse/handlers/e2e_keys.py                       | 37 ++++++++++++++--------
 synapse/handlers/federation.py                     |  9 +++---
 synapse/handlers/initial_sync.py                   |  4 +--
 synapse/handlers/message.py                        | 14 ++++----
 synapse/handlers/pagination.py                     | 13 ++++----
 synapse/handlers/register.py                       |  4 +--
 synapse/handlers/room.py                           | 29 +++++++++--------
 synapse/handlers/room_member.py                    | 35 ++++++++++----------
 synapse/handlers/search.py                         | 12 +++----
 synapse/handlers/stats.py                          |  5 ++-
 synapse/handlers/sync.py                           | 16 ++++++----
 synapse/logging/_structured.py                     |  2 +-
 synapse/push/bulk_push_rule_evaluator.py           |  7 ++--
 synapse/push/emailpusher.py                        | 14 ++++----
 synapse/push/httppusher.py                         | 14 ++++----
 synapse/push/pusherpool.py                         |  4 +--
 synapse/rest/client/v1/login.py                    | 13 ++++----
 synapse/rest/client/v2_alpha/account.py            |  4 +--
 synapse/rest/client/v2_alpha/register.py           |  4 +--
 synapse/rest/key/v2/remote_key_resource.py         |  2 +-
 synapse/server.pyi                                 | 16 +++++-----
 synapse/storage/data_stores/main/__init__.py       |  4 +--
 .../storage/data_stores/main/event_push_actions.py |  2 +-
 synapse/storage/data_stores/main/events.py         |  8 ++---
 .../storage/data_stores/main/events_bg_updates.py  |  2 +-
 synapse/storage/data_stores/main/group_server.py   |  4 +--
 .../data_stores/main/monthly_active_users.py       |  2 +-
 synapse/storage/data_stores/main/push_rule.py      |  2 +-
 synapse/storage/data_stores/main/registration.py   |  2 +-
 synapse/storage/data_stores/main/roommember.py     |  2 +-
 synapse/storage/data_stores/main/search.py         |  2 +-
 synapse/storage/data_stores/main/state.py          | 20 ++++++------
 synapse/storage/data_stores/main/stats.py          |  4 +--
 synapse/storage/util/id_generators.py              |  2 +-
 tox.ini                                            |  4 +--
 41 files changed, 191 insertions(+), 166 deletions(-)
 create mode 100644 changelog.d/6304.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6304.misc b/changelog.d/6304.misc
new file mode 100644
index 0000000000..20372b4f7c
--- /dev/null
+++ b/changelog.d/6304.misc
@@ -0,0 +1 @@
+Update the version of black used to 19.10b0.
diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py
index 6b22400a60..3bbbcfa1b4 100644
--- a/contrib/experiments/test_messaging.py
+++ b/contrib/experiments/test_messaging.py
@@ -78,7 +78,7 @@ class InputOutput(object):
             m = re.match("^join (\S+)$", line)
             if m:
                 # The `sender` wants to join a room.
-                room_name, = m.groups()
+                (room_name,) = m.groups()
                 self.print_line("%s joining %s" % (self.user, room_name))
                 self.server.join_room(room_name, self.user, self.user)
                 # self.print_line("OK.")
@@ -105,7 +105,7 @@ class InputOutput(object):
             m = re.match("^backfill (\S+)$", line)
             if m:
                 # we want to backfill a room
-                room_name, = m.groups()
+                (room_name,) = m.groups()
                 self.print_line("backfill %s" % room_name)
                 self.server.backfill(room_name)
                 return
diff --git a/mypy.ini b/mypy.ini
index ffadaddc0b..1d77c0ecc8 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,8 +1,11 @@
 [mypy]
-namespace_packages=True
-plugins=mypy_zope:plugin
-follow_imports=skip
-mypy_path=stubs
+namespace_packages = True
+plugins = mypy_zope:plugin
+follow_imports = normal
+check_untyped_defs = True
+show_error_codes = True
+show_traceback = True
+mypy_path = stubs
 
 [mypy-zope]
 ignore_missing_imports = True
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index cc75c39476..b754a09d7a 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -192,15 +192,16 @@ class PerDestinationQueue(object):
                 # We have to keep 2 free slots for presence and rr_edus
                 limit = MAX_EDUS_PER_TRANSACTION - 2
 
-                device_update_edus, dev_list_id = (
-                    yield self._get_device_update_edus(limit)
+                device_update_edus, dev_list_id = yield self._get_device_update_edus(
+                    limit
                 )
 
                 limit -= len(device_update_edus)
 
-                to_device_edus, device_stream_id = (
-                    yield self._get_to_device_message_edus(limit)
-                )
+                (
+                    to_device_edus,
+                    device_stream_id,
+                ) = yield self._get_to_device_message_edus(limit)
 
                 pending_edus = device_update_edus + to_device_edus
 
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 38bc67191c..2d7e6df6e4 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -38,9 +38,10 @@ class AccountDataEventSource(object):
                 {"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
             )
 
-        account_data, room_account_data = (
-            yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
-        )
+        (
+            account_data,
+            room_account_data,
+        ) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
 
         for account_data_type, content in account_data.items():
             results.append({"type": account_data_type, "content": content})
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 3e9b298154..fe62f78e67 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -73,7 +73,10 @@ class ApplicationServicesHandler(object):
             try:
                 limit = 100
                 while True:
-                    upper_bound, events = yield self.store.get_new_events_for_appservice(
+                    (
+                        upper_bound,
+                        events,
+                    ) = yield self.store.get_new_events_for_appservice(
                         self.current_max, limit
                     )
 
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 5ea54f60be..0449034a4e 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -119,9 +119,10 @@ class E2eKeysHandler(object):
                 else:
                     query_list.append((user_id, None))
 
-            user_ids_not_in_cache, remote_results = (
-                yield self.store.get_user_devices_from_cache(query_list)
-            )
+            (
+                user_ids_not_in_cache,
+                remote_results,
+            ) = yield self.store.get_user_devices_from_cache(query_list)
             for user_id, devices in iteritems(remote_results):
                 user_devices = results.setdefault(user_id, {})
                 for device_id, device in iteritems(devices):
@@ -688,17 +689,21 @@ class E2eKeysHandler(object):
 
         try:
             # get our self-signing key to verify the signatures
-            _, self_signing_key_id, self_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
-                user_id, "self_signing"
-            )
+            (
+                _,
+                self_signing_key_id,
+                self_signing_verify_key,
+            ) = yield self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
 
             # get our master key, since we may have received a signature of it.
             # We need to fetch it here so that we know what its key ID is, so
             # that we can check if a signature that was sent is a signature of
             # the master key or of a device
-            master_key, _, master_verify_key = yield self._get_e2e_cross_signing_verify_key(
-                user_id, "master"
-            )
+            (
+                master_key,
+                _,
+                master_verify_key,
+            ) = yield self._get_e2e_cross_signing_verify_key(user_id, "master")
 
             # fetch our stored devices.  This is used to 1. verify
             # signatures on the master key, and 2. to compare with what
@@ -838,9 +843,11 @@ class E2eKeysHandler(object):
 
         try:
             # get our user-signing key to verify the signatures
-            user_signing_key, user_signing_key_id, user_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
-                user_id, "user_signing"
-            )
+            (
+                user_signing_key,
+                user_signing_key_id,
+                user_signing_verify_key,
+            ) = yield self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
         except SynapseError as e:
             failure = _exception_to_failure(e)
             for user, devicemap in signatures.items():
@@ -859,7 +866,11 @@ class E2eKeysHandler(object):
             try:
                 # get the target user's master key, to make sure it matches
                 # what was sent
-                master_key, master_key_id, _ = yield self._get_e2e_cross_signing_verify_key(
+                (
+                    master_key,
+                    master_key_id,
+                    _,
+                ) = yield self._get_e2e_cross_signing_verify_key(
                     target_user, "master", user_id
                 )
 
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d2d9f8c26a..a932d3085f 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -352,10 +352,11 @@ class FederationHandler(BaseHandler):
                             # note that if any of the missing prevs share missing state or
                             # auth events, the requests to fetch those events are deduped
                             # by the get_pdu_cache in federation_client.
-                            remote_state, got_auth_chain = (
-                                yield self.federation_client.get_state_for_room(
-                                    origin, room_id, p
-                                )
+                            (
+                                remote_state,
+                                got_auth_chain,
+                            ) = yield self.federation_client.get_state_for_room(
+                                origin, room_id, p
                             )
 
                             # we want the state *after* p; get_state_for_room returns the
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 49c9e031f9..81dce96f4b 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -128,8 +128,8 @@ class InitialSyncHandler(BaseHandler):
 
         tags_by_room = yield self.store.get_tags_for_user(user_id)
 
-        account_data, account_data_by_room = (
-            yield self.store.get_account_data_for_user(user_id)
+        account_data, account_data_by_room = yield self.store.get_account_data_for_user(
+            user_id
         )
 
         public_room_ids = yield self.store.get_public_room_ids()
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 0d546d2487..d682dc2b7a 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -76,9 +76,10 @@ class MessageHandler(object):
         Raises:
             SynapseError if something went wrong.
         """
-        membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
-            room_id, user_id
-        )
+        (
+            membership,
+            membership_event_id,
+        ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
 
         if membership == Membership.JOIN:
             data = yield self.state.get_current_state(room_id, event_type, state_key)
@@ -153,9 +154,10 @@ class MessageHandler(object):
                     % (user_id, room_id, at_token),
                 )
         else:
-            membership, membership_event_id = (
-                yield self.auth.check_in_room_or_world_readable(room_id, user_id)
-            )
+            (
+                membership,
+                membership_event_id,
+            ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
 
             if membership == Membership.JOIN:
                 state_ids = yield self.store.get_filtered_current_state_ids(
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index b7185fe7a0..97f15a1c32 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -212,9 +212,10 @@ class PaginationHandler(object):
         source_config = pagin_config.get_source_config("room")
 
         with (yield self.pagination_lock.read(room_id)):
-            membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
-                room_id, user_id
-            )
+            (
+                membership,
+                member_event_id,
+            ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
 
             if source_config.direction == "b":
                 # if we're going backwards, we might need to backfill. This
@@ -297,10 +298,8 @@ class PaginationHandler(object):
         }
 
         if state:
-            chunk["state"] = (
-                yield self._event_serializer.serialize_events(
-                    state, time_now, as_client_event=as_client_event
-                )
+            chunk["state"] = yield self._event_serializer.serialize_events(
+                state, time_now, as_client_event=as_client_event
             )
 
         return chunk
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 53410f120b..cff6b0d375 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -396,8 +396,8 @@ class RegistrationHandler(BaseHandler):
             room_id = room_identifier
         elif RoomAlias.is_valid(room_identifier):
             room_alias = RoomAlias.from_string(room_identifier)
-            room_id, remote_room_hosts = (
-                yield room_member_handler.lookup_room_alias(room_alias)
+            room_id, remote_room_hosts = yield room_member_handler.lookup_room_alias(
+                room_alias
             )
             room_id = room_id.to_string()
         else:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 650bd28abb..0182e5b432 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -147,21 +147,22 @@ class RoomCreationHandler(BaseHandler):
 
         # we create and auth the tombstone event before properly creating the new
         # room, to check our user has perms in the old room.
-        tombstone_event, tombstone_context = (
-            yield self.event_creation_handler.create_event(
-                requester,
-                {
-                    "type": EventTypes.Tombstone,
-                    "state_key": "",
-                    "room_id": old_room_id,
-                    "sender": user_id,
-                    "content": {
-                        "body": "This room has been replaced",
-                        "replacement_room": new_room_id,
-                    },
+        (
+            tombstone_event,
+            tombstone_context,
+        ) = yield self.event_creation_handler.create_event(
+            requester,
+            {
+                "type": EventTypes.Tombstone,
+                "state_key": "",
+                "room_id": old_room_id,
+                "sender": user_id,
+                "content": {
+                    "body": "This room has been replaced",
+                    "replacement_room": new_room_id,
                 },
-                token_id=requester.access_token_id,
-            )
+            },
+            token_id=requester.access_token_id,
         )
         old_room_version = yield self.store.get_room_version(old_room_id)
         yield self.auth.check_from_context(
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 380e2fad5e..9a940d2c05 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -759,22 +759,25 @@ class RoomMemberHandler(object):
         if room_avatar_event:
             room_avatar_url = room_avatar_event.content.get("url", "")
 
-        token, public_keys, fallback_public_key, display_name = (
-            yield self.identity_handler.ask_id_server_for_third_party_invite(
-                requester=requester,
-                id_server=id_server,
-                medium=medium,
-                address=address,
-                room_id=room_id,
-                inviter_user_id=user.to_string(),
-                room_alias=canonical_room_alias,
-                room_avatar_url=room_avatar_url,
-                room_join_rules=room_join_rules,
-                room_name=room_name,
-                inviter_display_name=inviter_display_name,
-                inviter_avatar_url=inviter_avatar_url,
-                id_access_token=id_access_token,
-            )
+        (
+            token,
+            public_keys,
+            fallback_public_key,
+            display_name,
+        ) = yield self.identity_handler.ask_id_server_for_third_party_invite(
+            requester=requester,
+            id_server=id_server,
+            medium=medium,
+            address=address,
+            room_id=room_id,
+            inviter_user_id=user.to_string(),
+            room_alias=canonical_room_alias,
+            room_avatar_url=room_avatar_url,
+            room_join_rules=room_join_rules,
+            room_name=room_name,
+            inviter_display_name=inviter_display_name,
+            inviter_avatar_url=inviter_avatar_url,
+            id_access_token=id_access_token,
         )
 
         yield self.event_creation_handler.create_and_send_nonmember_event(
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index f4d8a60774..56ed262a1f 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -396,15 +396,11 @@ class SearchHandler(BaseHandler):
         time_now = self.clock.time_msec()
 
         for context in contexts.values():
-            context["events_before"] = (
-                yield self._event_serializer.serialize_events(
-                    context["events_before"], time_now
-                )
+            context["events_before"] = yield self._event_serializer.serialize_events(
+                context["events_before"], time_now
             )
-            context["events_after"] = (
-                yield self._event_serializer.serialize_events(
-                    context["events_after"], time_now
-                )
+            context["events_after"] = yield self._event_serializer.serialize_events(
+                context["events_after"], time_now
             )
 
         state_results = {}
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 26bc276692..7f7d56390e 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -108,7 +108,10 @@ class StatsHandler(StateDeltasHandler):
                 user_deltas = {}
 
             # Then count deltas for total_events and total_event_bytes.
-            room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
+            (
+                room_count,
+                user_count,
+            ) = yield self.store.get_changes_room_total_events_and_bytes(
                 self.pos, max_pos
             )
 
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 43a082dcda..b536d410e5 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1206,10 +1206,11 @@ class SyncHandler(object):
         since_token = sync_result_builder.since_token
 
         if since_token and not sync_result_builder.full_state:
-            account_data, account_data_by_room = (
-                yield self.store.get_updated_account_data_for_user(
-                    user_id, since_token.account_data_key
-                )
+            (
+                account_data,
+                account_data_by_room,
+            ) = yield self.store.get_updated_account_data_for_user(
+                user_id, since_token.account_data_key
             )
 
             push_rules_changed = yield self.store.have_push_rules_changed_for_user(
@@ -1221,9 +1222,10 @@ class SyncHandler(object):
                     sync_config.user
                 )
         else:
-            account_data, account_data_by_room = (
-                yield self.store.get_account_data_for_user(sync_config.user.to_string())
-            )
+            (
+                account_data,
+                account_data_by_room,
+            ) = yield self.store.get_account_data_for_user(sync_config.user.to_string())
 
             account_data["m.push_rules"] = yield self.push_rules_for_user(
                 sync_config.user
diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
index 3220e985a9..334ddaf39a 100644
--- a/synapse/logging/_structured.py
+++ b/synapse/logging/_structured.py
@@ -185,7 +185,7 @@ DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
 
 
 def parse_drain_configs(
-    drains: dict
+    drains: dict,
 ) -> typing.Generator[DrainConfiguration, None, None]:
     """
     Parse the drain configurations.
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 2bbdd11941..1ba7bcd4d8 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -149,9 +149,10 @@ class BulkPushRuleEvaluator(object):
 
         room_members = yield self.store.get_joined_users_from_context(event, context)
 
-        (power_levels, sender_power_level) = (
-            yield self._get_power_levels_and_sender_level(event, context)
-        )
+        (
+            power_levels,
+            sender_power_level,
+        ) = yield self._get_power_levels_and_sender_level(event, context)
 
         evaluator = PushRuleEvaluatorForEvent(
             event, len(room_members), sender_power_level, power_levels
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index 42e5b0c0a5..8c818a86bf 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -234,14 +234,12 @@ class EmailPusher(object):
             return
 
         self.last_stream_ordering = last_stream_ordering
-        pusher_still_exists = (
-            yield self.store.update_pusher_last_stream_ordering_and_success(
-                self.app_id,
-                self.email,
-                self.user_id,
-                last_stream_ordering,
-                self.clock.time_msec(),
-            )
+        pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
+            self.app_id,
+            self.email,
+            self.user_id,
+            last_stream_ordering,
+            self.clock.time_msec(),
         )
         if not pusher_still_exists:
             # The pusher has been deleted while we were processing, so
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 9a1bb64887..7dde2ad055 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -211,14 +211,12 @@ class HttpPusher(object):
                 http_push_processed_counter.inc()
                 self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                 self.last_stream_ordering = push_action["stream_ordering"]
-                pusher_still_exists = (
-                    yield self.store.update_pusher_last_stream_ordering_and_success(
-                        self.app_id,
-                        self.pushkey,
-                        self.user_id,
-                        self.last_stream_ordering,
-                        self.clock.time_msec(),
-                    )
+                pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
+                    self.app_id,
+                    self.pushkey,
+                    self.user_id,
+                    self.last_stream_ordering,
+                    self.clock.time_msec(),
                 )
                 if not pusher_still_exists:
                     # The pusher has been deleted while we were processing, so
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 08e840fdc2..0f6992202d 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -103,9 +103,7 @@ class PusherPool:
         # create the pusher setting last_stream_ordering to the current maximum
         # stream ordering in event_push_actions, so it will process
         # pushes from this point onwards.
-        last_stream_ordering = (
-            yield self.store.get_latest_push_action_stream_ordering()
-        )
+        last_stream_ordering = yield self.store.get_latest_push_action_stream_ordering()
 
         yield self.store.add_pusher(
             user_id=user_id,
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 39a5c5e9de..00a7dd6d09 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -203,10 +203,11 @@ class LoginRestServlet(RestServlet):
                 address = address.lower()
 
             # Check for login providers that support 3pid login types
-            canonical_user_id, callback_3pid = (
-                yield self.auth_handler.check_password_provider_3pid(
-                    medium, address, login_submission["password"]
-                )
+            (
+                canonical_user_id,
+                callback_3pid,
+            ) = yield self.auth_handler.check_password_provider_3pid(
+                medium, address, login_submission["password"]
             )
             if canonical_user_id:
                 # Authentication through password provider and 3pid succeeded
@@ -280,8 +281,8 @@ class LoginRestServlet(RestServlet):
     def do_token_login(self, login_submission):
         token = login_submission["token"]
         auth_handler = self.auth_handler
-        user_id = (
-            yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
+        user_id = yield auth_handler.validate_short_term_login_token_and_get_user_id(
+            token
         )
 
         result = yield self._register_device_with_callback(user_id, login_submission)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 332d7138b1..f26eae794c 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -148,7 +148,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
         self.clock = hs.get_clock()
         self.store = hs.get_datastore()
         if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
-            self.failure_email_template, = load_jinja2_templates(
+            (self.failure_email_template,) = load_jinja2_templates(
                 self.config.email_template_dir,
                 [self.config.email_password_reset_template_failure_html],
             )
@@ -479,7 +479,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
         self.clock = hs.get_clock()
         self.store = hs.get_datastore()
         if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
-            self.failure_email_template, = load_jinja2_templates(
+            (self.failure_email_template,) = load_jinja2_templates(
                 self.config.email_template_dir,
                 [self.config.email_add_threepid_template_failure_html],
             )
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 6c7d25d411..91db923814 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -247,13 +247,13 @@ class RegistrationSubmitTokenServlet(RestServlet):
         self.store = hs.get_datastore()
 
         if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
-            self.failure_email_template, = load_jinja2_templates(
+            (self.failure_email_template,) = load_jinja2_templates(
                 self.config.email_template_dir,
                 [self.config.email_registration_template_failure_html],
             )
 
         if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
-            self.failure_email_template, = load_jinja2_templates(
+            (self.failure_email_template,) = load_jinja2_templates(
                 self.config.email_template_dir,
                 [self.config.email_registration_template_failure_html],
             )
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 55580bc59e..e7fc3f0431 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -102,7 +102,7 @@ class RemoteKey(DirectServeResource):
     @wrap_json_request_handler
     async def _async_render_GET(self, request):
         if len(request.postpath) == 1:
-            server, = request.postpath
+            (server,) = request.postpath
             query = {server.decode("ascii"): {}}
         elif len(request.postpath) == 2:
             server, key_id = request.postpath
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 16f8f6b573..83d1f11283 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -39,7 +39,7 @@ class HomeServer(object):
     def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
         pass
     def get_deactivate_account_handler(
-        self
+        self,
     ) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
         pass
     def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
@@ -47,32 +47,32 @@ class HomeServer(object):
     def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
         pass
     def get_event_creation_handler(
-        self
+        self,
     ) -> synapse.handlers.message.EventCreationHandler:
         pass
     def get_set_password_handler(
-        self
+        self,
     ) -> synapse.handlers.set_password.SetPasswordHandler:
         pass
     def get_federation_sender(self) -> synapse.federation.sender.FederationSender:
         pass
     def get_federation_transport_client(
-        self
+        self,
     ) -> synapse.federation.transport.client.TransportLayerClient:
         pass
     def get_media_repository_resource(
-        self
+        self,
     ) -> synapse.rest.media.v1.media_repository.MediaRepositoryResource:
         pass
     def get_media_repository(
-        self
+        self,
     ) -> synapse.rest.media.v1.media_repository.MediaRepository:
         pass
     def get_server_notices_manager(
-        self
+        self,
     ) -> synapse.server_notices.server_notices_manager.ServerNoticesManager:
         pass
     def get_server_notices_sender(
-        self
+        self,
     ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
         pass
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index b185ba0b3e..60ae01d972 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -317,7 +317,7 @@ class DataStore(
             ) u
         """
         txn.execute(sql, (time_from,))
-        count, = txn.fetchone()
+        (count,) = txn.fetchone()
         return count
 
     def count_r30_users(self):
@@ -396,7 +396,7 @@ class DataStore(
 
             txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
 
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             results["all"] = count
 
             return results
diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py
index 22025effbc..04ce21ac66 100644
--- a/synapse/storage/data_stores/main/event_push_actions.py
+++ b/synapse/storage/data_stores/main/event_push_actions.py
@@ -863,7 +863,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         )
         stream_row = txn.fetchone()
         if stream_row:
-            offset_stream_ordering, = stream_row
+            (offset_stream_ordering,) = stream_row
             rotate_to_stream_ordering = min(
                 self.stream_ordering_day_ago, offset_stream_ordering
             )
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 64a8a05279..aafc2007d3 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -1125,7 +1125,7 @@ class EventsStore(
                 AND stream_ordering > ?
             """
             txn.execute(sql, (self.stream_ordering_day_ago,))
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count
 
         ret = yield self.runInteraction("count_messages", _count_messages)
@@ -1146,7 +1146,7 @@ class EventsStore(
             """
 
             txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count
 
         ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
@@ -1161,7 +1161,7 @@ class EventsStore(
                 AND stream_ordering > ?
             """
             txn.execute(sql, (self.stream_ordering_day_ago,))
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count
 
         ret = yield self.runInteraction("count_daily_active_rooms", _count)
@@ -1646,7 +1646,7 @@ class EventsStore(
         """,
             (room_id,),
         )
-        min_depth, = txn.fetchone()
+        (min_depth,) = txn.fetchone()
 
         logger.info("[purge] updating room_depth to %d", min_depth)
 
diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py
index 31ea6f917f..51352b9966 100644
--- a/synapse/storage/data_stores/main/events_bg_updates.py
+++ b/synapse/storage/data_stores/main/events_bg_updates.py
@@ -438,7 +438,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
             if not rows:
                 return 0
 
-            upper_event_id, = rows[-1]
+            (upper_event_id,) = rows[-1]
 
             # Update the redactions with the received_ts.
             #
diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py
index aeae5a2b28..b3a2771f1b 100644
--- a/synapse/storage/data_stores/main/group_server.py
+++ b/synapse/storage/data_stores/main/group_server.py
@@ -249,7 +249,7 @@ class GroupServerStore(SQLBaseStore):
                 WHERE group_id = ? AND category_id = ?
             """
             txn.execute(sql, (group_id, category_id))
-            order, = txn.fetchone()
+            (order,) = txn.fetchone()
 
         if existing:
             to_update = {}
@@ -509,7 +509,7 @@ class GroupServerStore(SQLBaseStore):
                 WHERE group_id = ? AND role_id = ?
             """
             txn.execute(sql, (group_id, role_id))
-            order, = txn.fetchone()
+            (order,) = txn.fetchone()
 
         if existing:
             to_update = {}
diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py
index e6ee1e4aaa..b41c3d317a 100644
--- a/synapse/storage/data_stores/main/monthly_active_users.py
+++ b/synapse/storage/data_stores/main/monthly_active_users.py
@@ -171,7 +171,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
 
             txn.execute(sql)
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count
 
         return self.runInteraction("count_users", _count_users)
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py
index cd95f1ce60..b520062d84 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/data_stores/main/push_rule.py
@@ -143,7 +143,7 @@ class PushRulesWorkerStore(
                     " WHERE user_id = ? AND ? < stream_id"
                 )
                 txn.execute(sql, (user_id, last_id))
-                count, = txn.fetchone()
+                (count,) = txn.fetchone()
                 return bool(count)
 
             return self.runInteraction(
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 6c5b29288a..f70d41ecab 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -459,7 +459,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 WHERE appservice_id IS NULL
             """
             )
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count
 
         ret = yield self.runInteraction("count_users", _count_users)
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index bc04bfd7d4..2af24a20b7 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -927,7 +927,7 @@ class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore):
                 if not row or not row[0]:
                     return processed, True
 
-                next_room, = row
+                (next_room,) = row
 
                 sql = """
                     UPDATE current_state_events
diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index a59b8331e1..d1d7c6863d 100644
--- a/synapse/storage/data_stores/main/search.py
+++ b/synapse/storage/data_stores/main/search.py
@@ -672,7 +672,7 @@ class SearchStore(SearchBackgroundUpdateStore):
                     )
                 )
                 txn.execute(query, (value, search_query))
-                headline, = txn.fetchall()[0]
+                (headline,) = txn.fetchall()[0]
 
                 # Now we need to pick the possible highlights out of the haedline
                 # result.
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 9b2207075b..3132848034 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -725,16 +725,18 @@ class StateGroupWorkerStore(
         member_filter, non_member_filter = state_filter.get_member_split()
 
         # Now we look them up in the member and non-member caches
-        non_member_state, incomplete_groups_nm, = (
-            yield self._get_state_for_groups_using_cache(
-                groups, self._state_group_cache, state_filter=non_member_filter
-            )
+        (
+            non_member_state,
+            incomplete_groups_nm,
+        ) = yield self._get_state_for_groups_using_cache(
+            groups, self._state_group_cache, state_filter=non_member_filter
         )
 
-        member_state, incomplete_groups_m, = (
-            yield self._get_state_for_groups_using_cache(
-                groups, self._state_group_members_cache, state_filter=member_filter
-            )
+        (
+            member_state,
+            incomplete_groups_m,
+        ) = yield self._get_state_for_groups_using_cache(
+            groups, self._state_group_members_cache, state_filter=member_filter
         )
 
         state = dict(non_member_state)
@@ -1076,7 +1078,7 @@ class StateBackgroundUpdateStore(
                     " WHERE id < ? AND room_id = ?",
                     (state_group, room_id),
                 )
-                prev_group, = txn.fetchone()
+                (prev_group,) = txn.fetchone()
                 new_last_state_group = state_group
 
                 if prev_group:
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 4d59b7833f..45b3de7d56 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -773,7 +773,7 @@ class StatsStore(StateDeltasStore):
                 (room_id,),
             )
 
-            current_state_events_count, = txn.fetchone()
+            (current_state_events_count,) = txn.fetchone()
 
             users_in_room = self.get_users_in_room_txn(txn, room_id)
 
@@ -863,7 +863,7 @@ class StatsStore(StateDeltasStore):
                 """,
                 (user_id,),
             )
-            count, = txn.fetchone()
+            (count,) = txn.fetchone()
             return count, pos
 
         joined_rooms, pos = yield self.runInteraction(
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index cbb0a4810a..9d851beaa5 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -46,7 +46,7 @@ def _load_current_id(db_conn, table, column, step=1):
         cur.execute("SELECT MAX(%s) FROM %s" % (column, table))
     else:
         cur.execute("SELECT MIN(%s) FROM %s" % (column, table))
-    val, = cur.fetchone()
+    (val,) = cur.fetchone()
     cur.close()
     current_id = int(val) if val else step
     return (max if step > 0 else min)(current_id, step)
diff --git a/tox.ini b/tox.ini
index 50b6afe611..afe9bc909b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -114,7 +114,7 @@ skip_install = True
 basepython = python3.6
 deps =
     flake8
-    black==19.3b0  # We pin so that our tests don't start failing on new releases of black.
+    black==19.10b0  # We pin so that our tests don't start failing on new releases of black.
 commands =
     python -m black --check --diff .
     /bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}"
@@ -167,6 +167,6 @@ deps =
 env =
     MYPYPATH = stubs/
 extras = all
-commands = mypy --show-traceback --check-untyped-defs --show-error-codes --follow-imports=normal \
+commands = mypy \
             synapse/logging/ \
             synapse/config/
-- 
cgit 1.4.1


From 42e707c663505cecb63da579d5fa2c4d30811db7 Mon Sep 17 00:00:00 2001
From: Will Hunt 
Date: Thu, 31 Oct 2019 17:32:25 +0000
Subject: rstrip slashes from url on appservice (#6306)

---
 changelog.d/6306.bugfix        | 1 +
 synapse/appservice/__init__.py | 4 +++-
 2 files changed, 4 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6306.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6306.bugfix b/changelog.d/6306.bugfix
new file mode 100644
index 0000000000..c7dcbcdce8
--- /dev/null
+++ b/changelog.d/6306.bugfix
@@ -0,0 +1 @@
+Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash.
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 33b3579425..aea3985a5f 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -94,7 +94,9 @@ class ApplicationService(object):
         ip_range_whitelist=None,
     ):
         self.token = token
-        self.url = url
+        self.url = (
+            url.rstrip("/") if isinstance(url, str) else None
+        )  # url must not end with a slash
         self.hs_token = hs_token
         self.sender = sender
         self.server_name = hostname
-- 
cgit 1.4.1


From 1f156398b9c0c46db7907d94089000c36ce8c072 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Thu, 31 Oct 2019 23:02:20 -0400
Subject: add changelog

---
 changelog.d/6313.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6313.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6313.bugfix b/changelog.d/6313.bugfix
new file mode 100644
index 0000000000..f4d4a97f00
--- /dev/null
+++ b/changelog.d/6313.bugfix
@@ -0,0 +1 @@
+Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0.
-- 
cgit 1.4.1


From ace947e8da30c37ead3357abe34adee8a1528296 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 1 Nov 2019 10:28:09 +0000
Subject: Depublish a room from the public rooms list when it is upgraded
 (#6232)

---
 changelog.d/6232.bugfix                 |  1 +
 synapse/federation/federation_client.py |  2 +-
 synapse/handlers/federation.py          | 30 +++++++++++-
 synapse/handlers/room.py                |  8 +++-
 synapse/handlers/room_member.py         | 81 ++++++++++++++++++++++-----------
 5 files changed, 92 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6232.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6232.bugfix b/changelog.d/6232.bugfix
new file mode 100644
index 0000000000..12718ba934
--- /dev/null
+++ b/changelog.d/6232.bugfix
@@ -0,0 +1 @@
+Remove a room from a server's public rooms list on room upgrade.
\ No newline at end of file
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 595706d01a..545d719652 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -555,7 +555,7 @@ class FederationClient(FederationBase):
         Note that this does not append any events to any graphs.
 
         Args:
-            destinations (str): Candidate homeservers which are probably
+            destinations (Iterable[str]): Candidate homeservers which are probably
                 participating in the room.
             room_id (str): The room in which the event will happen.
             user_id (str): The user whose membership is being evented.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index a932d3085f..dab6be9573 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1106,7 +1106,7 @@ class FederationHandler(BaseHandler):
     @defer.inlineCallbacks
     def do_invite_join(self, target_hosts, room_id, joinee, content):
         """ Attempts to join the `joinee` to the room `room_id` via the
-        server `target_host`.
+        servers contained in `target_hosts`.
 
         This first triggers a /make_join/ request that returns a partial
         event that we can fill out and sign. This is then sent to the
@@ -1115,6 +1115,15 @@ class FederationHandler(BaseHandler):
 
         We suspend processing of any received events from this room until we
         have finished processing the join.
+
+        Args:
+            target_hosts (Iterable[str]): List of servers to attempt to join the room with.
+
+            room_id (str): The ID of the room to join.
+
+            joinee (str): The User ID of the joining user.
+
+            content (dict): The event content to use for the join event.
         """
         logger.debug("Joining %s to %s", joinee, room_id)
 
@@ -1174,6 +1183,22 @@ class FederationHandler(BaseHandler):
 
             yield self._persist_auth_tree(origin, auth_chain, state, event)
 
+            # Check whether this room is the result of an upgrade of a room we already know
+            # about. If so, migrate over user information
+            predecessor = yield self.store.get_room_predecessor(room_id)
+            if not predecessor:
+                return
+            old_room_id = predecessor["room_id"]
+            logger.debug(
+                "Found predecessor for %s during remote join: %s", room_id, old_room_id
+            )
+
+            # We retrieve the room member handler here as to not cause a cyclic dependency
+            member_handler = self.hs.get_room_member_handler()
+            yield member_handler.transfer_room_state_on_room_upgrade(
+                old_room_id, room_id
+            )
+
             logger.debug("Finished joining %s to %s", joinee, room_id)
         finally:
             room_queue = self.room_queues[room_id]
@@ -2442,6 +2467,8 @@ class FederationHandler(BaseHandler):
                 raise e
 
             yield self._check_signature(event, context)
+
+            # We retrieve the room member handler here as to not cause a cyclic dependency
             member_handler = self.hs.get_room_member_handler()
             yield member_handler.send_membership_event(None, event, context)
         else:
@@ -2502,6 +2529,7 @@ class FederationHandler(BaseHandler):
         # though the sender isn't a local user.
         event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
 
+        # We retrieve the room member handler here as to not cause a cyclic dependency
         member_handler = self.hs.get_room_member_handler()
         yield member_handler.send_membership_event(None, event, context)
 
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 0182e5b432..e92b2eafd5 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -129,6 +129,7 @@ class RoomCreationHandler(BaseHandler):
             old_room_id,
             new_version,  # args for _upgrade_room
         )
+
         return ret
 
     @defer.inlineCallbacks
@@ -189,7 +190,12 @@ class RoomCreationHandler(BaseHandler):
             requester, old_room_id, new_room_id, old_room_state
         )
 
-        # and finally, shut down the PLs in the old room, and update them in the new
+        # Copy over user push rules, tags and migrate room directory state
+        yield self.room_member_handler.transfer_room_state_on_room_upgrade(
+            old_room_id, new_room_id
+        )
+
+        # finally, shut down the PLs in the old room, and update them in the new
         # room.
         yield self._update_upgraded_room_pls(
             requester, old_room_id, new_room_id, old_room_state
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 9a940d2c05..06d09c2947 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -203,10 +203,6 @@ class RoomMemberHandler(object):
                 prev_member_event = yield self.store.get_event(prev_member_event_id)
                 newly_joined = prev_member_event.membership != Membership.JOIN
             if newly_joined:
-                # Copy over user state if we're joining an upgraded room
-                yield self.copy_user_state_if_room_upgrade(
-                    room_id, requester.user.to_string()
-                )
                 yield self._user_joined_room(target, room_id)
         elif event.membership == Membership.LEAVE:
             if prev_member_event_id:
@@ -455,11 +451,6 @@ class RoomMemberHandler(object):
                     requester, remote_room_hosts, room_id, target, content
                 )
 
-                # Copy over user state if this is a join on an remote upgraded room
-                yield self.copy_user_state_if_room_upgrade(
-                    room_id, requester.user.to_string()
-                )
-
                 return remote_join_response
 
         elif effective_membership_state == Membership.LEAVE:
@@ -498,36 +489,72 @@ class RoomMemberHandler(object):
         return res
 
     @defer.inlineCallbacks
-    def copy_user_state_if_room_upgrade(self, new_room_id, user_id):
-        """Copy user-specific information when they join a new room if that new room is the
+    def transfer_room_state_on_room_upgrade(self, old_room_id, room_id):
+        """Upon our server becoming aware of an upgraded room, either by upgrading a room
+        ourselves or joining one, we can transfer over information from the previous room.
+
+        Copies user state (tags/push rules) for every local user that was in the old room, as
+        well as migrating the room directory state.
+
+        Args:
+            old_room_id (str): The ID of the old room
+
+            room_id (str): The ID of the new room
+
+        Returns:
+            Deferred
+        """
+        # Find all local users that were in the old room and copy over each user's state
+        users = yield self.store.get_users_in_room(old_room_id)
+        yield self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
+
+        # Add new room to the room directory if the old room was there
+        # Remove old room from the room directory
+        old_room = yield self.store.get_room(old_room_id)
+        if old_room and old_room["is_public"]:
+            yield self.store.set_room_is_public(old_room_id, False)
+            yield self.store.set_room_is_public(room_id, True)
+
+    @defer.inlineCallbacks
+    def copy_user_state_on_room_upgrade(self, old_room_id, new_room_id, user_ids):
+        """Copy user-specific information when they join a new room when that new room is the
         result of a room upgrade
 
         Args:
-            new_room_id (str): The ID of the room the user is joining
-            user_id (str): The ID of the user
+            old_room_id (str): The ID of upgraded room
+            new_room_id (str): The ID of the new room
+            user_ids (Iterable[str]): User IDs to copy state for
 
         Returns:
             Deferred
         """
-        # Check if the new room is an upgraded room
-        predecessor = yield self.store.get_room_predecessor(new_room_id)
-        if not predecessor:
-            return
 
         logger.debug(
-            "Found predecessor for %s: %s. Copying over room tags and push " "rules",
+            "Copying over room tags and push rules from %s to %s for users %s",
+            old_room_id,
             new_room_id,
-            predecessor,
+            user_ids,
         )
 
-        # It is an upgraded room. Copy over old tags
-        yield self.copy_room_tags_and_direct_to_room(
-            predecessor["room_id"], new_room_id, user_id
-        )
-        # Copy over push rules
-        yield self.store.copy_push_rules_from_room_to_room_for_user(
-            predecessor["room_id"], new_room_id, user_id
-        )
+        for user_id in user_ids:
+            try:
+                # It is an upgraded room. Copy over old tags
+                yield self.copy_room_tags_and_direct_to_room(
+                    old_room_id, new_room_id, user_id
+                )
+                # Copy over push rules
+                yield self.store.copy_push_rules_from_room_to_room_for_user(
+                    old_room_id, new_room_id, user_id
+                )
+            except Exception:
+                logger.exception(
+                    "Error copying tags and/or push rules from rooms %s to %s for user %s. "
+                    "Skipping...",
+                    old_room_id,
+                    new_room_id,
+                    user_id,
+                )
+                continue
 
     @defer.inlineCallbacks
     def send_membership_event(self, requester, event, context, ratelimit=True):
-- 
cgit 1.4.1


From c6dbca2422bf77ccbf0b52d9245d28c258dac4f3 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 1 Nov 2019 10:30:51 +0000
Subject: Incorporate review

---
 changelog.d/6301.feature                   |  2 +-
 synapse/api/constants.py                   |  5 ++++-
 synapse/api/filtering.py                   |  6 ++++--
 synapse/storage/data_stores/main/events.py | 12 ++++++++++--
 tests/api/test_filtering.py                | 10 +++++-----
 tests/rest/client/v1/test_rooms.py         | 10 +++++-----
 tests/rest/client/v2_alpha/test_sync.py    | 10 +++++-----
 7 files changed, 34 insertions(+), 21 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6301.feature b/changelog.d/6301.feature
index b7ff3fad3b..78a187a1dc 100644
--- a/changelog.d/6301.feature
+++ b/changelog.d/6301.feature
@@ -1 +1 @@
-Implement label-based filtering.
+Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 999ec02fd9..cf4ce5f5a2 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -140,4 +140,7 @@ class LimitBlockingTypes(object):
     HS_DISABLED = "hs_disabled"
 
 
-LabelsField = "org.matrix.labels"
+class EventContentFields(object):
+    """Fields found in events' content, regardless of type."""
+    # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
+    Labels = "org.matrix.labels"
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index bd91b9f018..30a7ee0a7a 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -20,7 +20,7 @@ from jsonschema import FormatChecker
 
 from twisted.internet import defer
 
-from synapse.api.constants import LabelsField
+from synapse.api.constants import EventContentFields
 from synapse.api.errors import SynapseError
 from synapse.storage.presence import UserPresenceState
 from synapse.types import RoomID, UserID
@@ -67,6 +67,8 @@ ROOM_EVENT_FILTER_SCHEMA = {
         "contains_url": {"type": "boolean"},
         "lazy_load_members": {"type": "boolean"},
         "include_redundant_members": {"type": "boolean"},
+        # Include or exclude events with the provided labels.
+        # cf https://github.com/matrix-org/matrix-doc/pull/2326
         "org.matrix.labels": {"type": "array", "items": {"type": "string"}},
         "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
     },
@@ -307,7 +309,7 @@ class Filter(object):
             content = event.get("content", {})
             # check if there is a string url field in the content for filtering purposes
             contains_url = isinstance(content.get("url"), text_type)
-            labels = content.get(LabelsField, [])
+            labels = content.get(EventContentFields.Labels, [])
 
         return self.check_fields(room_id, sender, ev_type, labels, contains_url)
 
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 2b900f1ce1..42ffa9066a 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -29,7 +29,7 @@ from prometheus_client import Counter, Histogram
 from twisted.internet import defer
 
 import synapse.metrics
-from synapse.api.constants import EventTypes, LabelsField
+from synapse.api.constants import EventTypes, EventContentFields
 from synapse.api.errors import SynapseError
 from synapse.events import EventBase  # noqa: F401
 from synapse.events.snapshot import EventContext  # noqa: F401
@@ -1491,7 +1491,7 @@ class EventsStore(
             self._handle_event_relations(txn, event)
 
             # Store the labels for this event.
-            labels = event.content.get(LabelsField)
+            labels = event.content.get(EventContentFields.Labels)
             if labels:
                 self.insert_labels_for_event_txn(txn, event.event_id, labels)
 
@@ -2483,6 +2483,14 @@ class EventsStore(
         )
 
     def insert_labels_for_event_txn(self, txn, event_id, labels):
+        """Store the mapping between an event's ID and its labels, with one row per
+        (event_id, label) tuple.
+
+        Args:
+            txn (LoggingTransaction): The transaction to execute.
+            event_id (str): The event's ID.
+            labels (list[str]): A list of text labels.
+        """
         return self._simple_insert_many_txn(
             txn=txn,
             table="event_labels",
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index e004ab1ee5..8ec48c4154 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -19,7 +19,7 @@ import jsonschema
 
 from twisted.internet import defer
 
-from synapse.api.constants import LabelsField
+from synapse.api.constants import EventContentFields
 from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
 from synapse.events import FrozenEvent
@@ -329,7 +329,7 @@ class FilteringTestCase(unittest.TestCase):
             sender="@foo:bar",
             type="m.room.message",
             room_id="!secretbase:unknown",
-            content={LabelsField: ["#fun"]},
+            content={EventContentFields.Labels: ["#fun"]},
         )
 
         self.assertTrue(Filter(definition).check(event))
@@ -338,7 +338,7 @@ class FilteringTestCase(unittest.TestCase):
             sender="@foo:bar",
             type="m.room.message",
             room_id="!secretbase:unknown",
-            content={LabelsField: ["#notfun"]},
+            content={EventContentFields.Labels: ["#notfun"]},
         )
 
         self.assertFalse(Filter(definition).check(event))
@@ -349,7 +349,7 @@ class FilteringTestCase(unittest.TestCase):
             sender="@foo:bar",
             type="m.room.message",
             room_id="!secretbase:unknown",
-            content={LabelsField: ["#fun"]},
+            content={EventContentFields.Labels: ["#fun"]},
         )
 
         self.assertFalse(Filter(definition).check(event))
@@ -358,7 +358,7 @@ class FilteringTestCase(unittest.TestCase):
             sender="@foo:bar",
             type="m.room.message",
             room_id="!secretbase:unknown",
-            content={LabelsField: ["#notfun"]},
+            content={EventContentFields.Labels: ["#notfun"]},
         )
 
         self.assertTrue(Filter(definition).check(event))
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 188f47bd7d..0dc0faa0e5 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -24,7 +24,7 @@ from six.moves.urllib import parse as urlparse
 from twisted.internet import defer
 
 import synapse.rest.admin
-from synapse.api.constants import EventTypes, LabelsField, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
 from synapse.rest.client.v1 import login, profile, room
 
 from tests import unittest
@@ -860,7 +860,7 @@ class RoomMessageListTestCase(RoomBase):
             content={
                 "msgtype": "m.text",
                 "body": "with right label",
-                LabelsField: ["#fun"],
+                EventContentFields.Labels: ["#fun"],
             },
         )
 
@@ -876,7 +876,7 @@ class RoomMessageListTestCase(RoomBase):
             content={
                 "msgtype": "m.text",
                 "body": "with wrong label",
-                LabelsField: ["#work"],
+                EventContentFields.Labels: ["#work"],
             },
         )
 
@@ -886,7 +886,7 @@ class RoomMessageListTestCase(RoomBase):
             content={
                 "msgtype": "m.text",
                 "body": "with two wrong labels",
-                LabelsField: ["#work", "#notfun"],
+                EventContentFields.Labels: ["#work", "#notfun"],
             },
         )
 
@@ -896,7 +896,7 @@ class RoomMessageListTestCase(RoomBase):
             content={
                 "msgtype": "m.text",
                 "body": "with right label",
-                LabelsField: ["#fun"],
+                EventContentFields.Labels: ["#fun"],
             },
         )
 
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index c5c199d412..c3c6f75ced 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -17,7 +17,7 @@ import json
 from mock import Mock
 
 import synapse.rest.admin
-from synapse.api.constants import EventTypes, LabelsField
+from synapse.api.constants import EventContentFields, EventTypes
 from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import sync
 
@@ -157,7 +157,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase):
             content={
                 "msgtype": "m.text",
                 "body": "with right label",
-                LabelsField: ["#fun"],
+                EventContentFields.Labels: ["#fun"],
             },
             tok=tok,
         )
@@ -175,7 +175,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase):
             content={
                 "msgtype": "m.text",
                 "body": "with wrong label",
-                LabelsField: ["#work"],
+                EventContentFields.Labels: ["#work"],
             },
             tok=tok,
         )
@@ -186,7 +186,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase):
             content={
                 "msgtype": "m.text",
                 "body": "with two wrong labels",
-                LabelsField: ["#work", "#notfun"],
+                EventContentFields.Labels: ["#work", "#notfun"],
             },
             tok=tok,
         )
@@ -197,7 +197,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase):
             content={
                 "msgtype": "m.text",
                 "body": "with right label",
-                LabelsField: ["#fun"],
+                EventContentFields.Labels: ["#fun"],
             },
             tok=tok,
         )
-- 
cgit 1.4.1


From befd58f47bab1b8337032d27a995e08c7dd93a83 Mon Sep 17 00:00:00 2001
From: Neil Pilgrim 
Date: Fri, 1 Nov 2019 03:52:20 -0700
Subject: Document lint.sh & allow application to specified files only (#6312)

---
 CONTRIBUTING.rst      |  8 ++++++++
 changelog.d/6312.misc |  1 +
 scripts-dev/lint.sh   | 14 +++++++++++---
 3 files changed, 20 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6312.misc

(limited to 'changelog.d')

diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a71a4a696b..2fb3a95949 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -58,6 +58,14 @@ All Matrix projects have a well-defined code-style - and sometimes we've even
 got as far as documenting it... For instance, synapse's code style doc lives
 at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
 
+To facilitate meeting these criteria you can run ``scripts-dev/lint.sh``
+locally. Since this runs the tools listed in the above document, you'll need
+python 3.6 and to install each tool. **Note that the script does not just
+test/check, but also reformats code, so you may wish to ensure any new code is
+committed first**. By default this script checks all files and can take some
+time; if you alter only certain files, you might wish to specify paths as
+arguments to reduce the run-time.
+
 Please ensure your changes match the cosmetic style of the existing project,
 and **never** mix cosmetic and functional changes in the same commit, as it
 makes it horribly hard to review otherwise.
diff --git a/changelog.d/6312.misc b/changelog.d/6312.misc
new file mode 100644
index 0000000000..55e3e1654d
--- /dev/null
+++ b/changelog.d/6312.misc
@@ -0,0 +1 @@
+Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only.
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 02a2ca39e5..34c4854e11 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -7,7 +7,15 @@
 
 set -e
 
-isort -y -rc synapse tests scripts-dev scripts
-flake8 synapse tests
-python3 -m black synapse tests scripts-dev scripts
+if [ $# -ge 1 ]
+then
+  files=$*
+else
+  files="synapse tests scripts-dev scripts"
+fi
+
+echo "Linting these locations: $files"
+isort -y -rc $files
+flake8 $files
+python3 -m black $files
 ./scripts-dev/config-lint.sh
-- 
cgit 1.4.1


From fe1f2b452073e5939cddd23acc6f2d226673a03f Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 1 Nov 2019 12:03:44 +0000
Subject: Remove last usages of deprecated logging.warn method (#6314)

---
 changelog.d/6314.misc         | 1 +
 synapse/config/logger.py      | 4 ++--
 synapse/handlers/directory.py | 2 +-
 3 files changed, 4 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6314.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6314.misc b/changelog.d/6314.misc
new file mode 100644
index 0000000000..2369760272
--- /dev/null
+++ b/changelog.d/6314.misc
@@ -0,0 +1 @@
+Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated.
\ No newline at end of file
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 2d2c1e54df..75bb904718 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -234,8 +234,8 @@ def setup_logging(
 
     # make sure that the first thing we log is a thing we can grep backwards
     # for
-    logging.warn("***** STARTING SERVER *****")
-    logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
+    logging.warning("***** STARTING SERVER *****")
+    logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
     logging.info("Server hostname: %s", config.server_name)
 
     return logger
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 526379c6f7..c4632f8984 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -250,7 +250,7 @@ class DirectoryHandler(BaseHandler):
                     ignore_backoff=True,
                 )
             except CodeMessageException as e:
-                logging.warn("Error retrieving alias")
+                logging.warning("Error retrieving alias")
                 if e.code == 404:
                     result = None
                 else:
-- 
cgit 1.4.1


From 1cb84c6486a5131dd284f341bb657434becda255 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 1 Nov 2019 14:07:44 +0000
Subject: Support for routing outbound HTTP requests via a proxy (#6239)

The `http_proxy` and `HTTPS_PROXY` env vars can be set to a `host[:port]` value which should point to a proxy.

The address of the proxy should be excluded from IP blacklists such as the `url_preview_ip_range_blacklist`.

The proxy will then be used for
 * push
 * url previews
 * phone-home stats
 * recaptcha validation
 * CAS auth validation

It will *not* be used for:
 * Application Services
 * Identity servers
 * Outbound federation
 * In worker configurations, connections from workers to masters

Fixes #4198.
---
 changelog.d/6238.feature                           |   1 +
 synapse/app/homeserver.py                          |   2 +-
 synapse/handlers/ui_auth/checkers.py               |   2 +-
 synapse/http/client.py                             |  17 +-
 synapse/http/connectproxyclient.py                 | 195 ++++++++++++
 synapse/http/proxyagent.py                         | 195 ++++++++++++
 synapse/push/httppusher.py                         |   2 +-
 synapse/rest/client/v1/login.py                    |   2 +-
 synapse/rest/media/v1/preview_url_resource.py      |   2 +
 synapse/server.py                                  |   9 +
 synapse/server.pyi                                 |   9 +
 tests/http/__init__.py                             |  17 ++
 .../federation/test_matrix_federation_agent.py     |  11 +-
 tests/http/test_proxyagent.py                      | 334 +++++++++++++++++++++
 tests/push/test_http.py                            |   2 +-
 tests/server.py                                    |  24 +-
 16 files changed, 812 insertions(+), 12 deletions(-)
 create mode 100644 changelog.d/6238.feature
 create mode 100644 synapse/http/connectproxyclient.py
 create mode 100644 synapse/http/proxyagent.py
 create mode 100644 tests/http/test_proxyagent.py

(limited to 'changelog.d')

diff --git a/changelog.d/6238.feature b/changelog.d/6238.feature
new file mode 100644
index 0000000000..d225ac33b6
--- /dev/null
+++ b/changelog.d/6238.feature
@@ -0,0 +1 @@
+Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 8997c1f9e7..8d28076d92 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -565,7 +565,7 @@ def run(hs):
             "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
         )
         try:
-            yield hs.get_simple_http_client().put_json(
+            yield hs.get_proxied_http_client().put_json(
                 hs.config.report_stats_endpoint, stats
             )
         except Exception as e:
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 29aa1e5aaf..8363d887a9 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -81,7 +81,7 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
     def __init__(self, hs):
         super().__init__(hs)
         self._enabled = bool(hs.config.recaptcha_private_key)
-        self._http_client = hs.get_simple_http_client()
+        self._http_client = hs.get_proxied_http_client()
         self._url = hs.config.recaptcha_siteverify_api
         self._secret = hs.config.recaptcha_private_key
 
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 2df5b383b5..d4c285445e 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -45,6 +45,7 @@ from synapse.http import (
     cancelled_to_request_timed_out_error,
     redact_uri,
 )
+from synapse.http.proxyagent import ProxyAgent
 from synapse.logging.context import make_deferred_yieldable
 from synapse.logging.opentracing import set_tag, start_active_span, tags
 from synapse.util.async_helpers import timeout_deferred
@@ -183,7 +184,15 @@ class SimpleHttpClient(object):
     using HTTP in Matrix
     """
 
-    def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
+    def __init__(
+        self,
+        hs,
+        treq_args={},
+        ip_whitelist=None,
+        ip_blacklist=None,
+        http_proxy=None,
+        https_proxy=None,
+    ):
         """
         Args:
             hs (synapse.server.HomeServer)
@@ -192,6 +201,8 @@ class SimpleHttpClient(object):
                 we may not request.
             ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
                request if it were otherwise caught in a blacklist.
+            http_proxy (bytes): proxy server to use for http connections. host[:port]
+            https_proxy (bytes): proxy server to use for https connections. host[:port]
         """
         self.hs = hs
 
@@ -236,11 +247,13 @@ class SimpleHttpClient(object):
         # The default context factory in Twisted 14.0.0 (which we require) is
         # BrowserLikePolicyForHTTPS which will do regular cert validation
         # 'like a browser'
-        self.agent = Agent(
+        self.agent = ProxyAgent(
             self.reactor,
             connectTimeout=15,
             contextFactory=self.hs.get_http_client_context_factory(),
             pool=pool,
+            http_proxy=http_proxy,
+            https_proxy=https_proxy,
         )
 
         if self._ip_blacklist:
diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py
new file mode 100644
index 0000000000..be7b2ceb8e
--- /dev/null
+++ b/synapse/http/connectproxyclient.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from zope.interface import implementer
+
+from twisted.internet import defer, protocol
+from twisted.internet.error import ConnectError
+from twisted.internet.interfaces import IStreamClientEndpoint
+from twisted.internet.protocol import connectionDone
+from twisted.web import http
+
+logger = logging.getLogger(__name__)
+
+
+class ProxyConnectError(ConnectError):
+    pass
+
+
+@implementer(IStreamClientEndpoint)
+class HTTPConnectProxyEndpoint(object):
+    """An Endpoint implementation which will send a CONNECT request to an http proxy
+
+    Wraps an existing HostnameEndpoint for the proxy.
+
+    When we get the connect() request from the connection pool (via the TLS wrapper),
+    we'll first connect to the proxy endpoint with a ProtocolFactory which will make the
+    CONNECT request. Once that completes, we invoke the protocolFactory which was passed
+    in.
+
+    Args:
+        reactor: the Twisted reactor to use for the connection
+        proxy_endpoint (IStreamClientEndpoint): the endpoint to use to connect to the
+            proxy
+        host (bytes): hostname that we want to CONNECT to
+        port (int): port that we want to connect to
+    """
+
+    def __init__(self, reactor, proxy_endpoint, host, port):
+        self._reactor = reactor
+        self._proxy_endpoint = proxy_endpoint
+        self._host = host
+        self._port = port
+
+    def __repr__(self):
+        return "" % (self._proxy_endpoint,)
+
+    def connect(self, protocolFactory):
+        f = HTTPProxiedClientFactory(self._host, self._port, protocolFactory)
+        d = self._proxy_endpoint.connect(f)
+        # once the tcp socket connects successfully, we need to wait for the
+        # CONNECT to complete.
+        d.addCallback(lambda conn: f.on_connection)
+        return d
+
+
+class HTTPProxiedClientFactory(protocol.ClientFactory):
+    """ClientFactory wrapper that triggers an HTTP proxy CONNECT on connect.
+
+    Once the CONNECT completes, invokes the original ClientFactory to build the
+    HTTP Protocol object and run the rest of the connection.
+
+    Args:
+        dst_host (bytes): hostname that we want to CONNECT to
+        dst_port (int): port that we want to connect to
+        wrapped_factory (protocol.ClientFactory): The original Factory
+    """
+
+    def __init__(self, dst_host, dst_port, wrapped_factory):
+        self.dst_host = dst_host
+        self.dst_port = dst_port
+        self.wrapped_factory = wrapped_factory
+        self.on_connection = defer.Deferred()
+
+    def startedConnecting(self, connector):
+        return self.wrapped_factory.startedConnecting(connector)
+
+    def buildProtocol(self, addr):
+        wrapped_protocol = self.wrapped_factory.buildProtocol(addr)
+
+        return HTTPConnectProtocol(
+            self.dst_host, self.dst_port, wrapped_protocol, self.on_connection
+        )
+
+    def clientConnectionFailed(self, connector, reason):
+        logger.debug("Connection to proxy failed: %s", reason)
+        if not self.on_connection.called:
+            self.on_connection.errback(reason)
+        return self.wrapped_factory.clientConnectionFailed(connector, reason)
+
+    def clientConnectionLost(self, connector, reason):
+        logger.debug("Connection to proxy lost: %s", reason)
+        if not self.on_connection.called:
+            self.on_connection.errback(reason)
+        return self.wrapped_factory.clientConnectionLost(connector, reason)
+
+
+class HTTPConnectProtocol(protocol.Protocol):
+    """Protocol that wraps an existing Protocol to do a CONNECT handshake at connect
+
+    Args:
+        host (bytes): The original HTTP(s) hostname or IPv4 or IPv6 address literal
+            to put in the CONNECT request
+
+        port (int): The original HTTP(s) port to put in the CONNECT request
+
+        wrapped_protocol (interfaces.IProtocol): the original protocol (probably
+            HTTPChannel or TLSMemoryBIOProtocol, but could be anything really)
+
+        connected_deferred (Deferred): a Deferred which will be callbacked with
+            wrapped_protocol when the CONNECT completes
+    """
+
+    def __init__(self, host, port, wrapped_protocol, connected_deferred):
+        self.host = host
+        self.port = port
+        self.wrapped_protocol = wrapped_protocol
+        self.connected_deferred = connected_deferred
+        self.http_setup_client = HTTPConnectSetupClient(self.host, self.port)
+        self.http_setup_client.on_connected.addCallback(self.proxyConnected)
+
+    def connectionMade(self):
+        self.http_setup_client.makeConnection(self.transport)
+
+    def connectionLost(self, reason=connectionDone):
+        if self.wrapped_protocol.connected:
+            self.wrapped_protocol.connectionLost(reason)
+
+        self.http_setup_client.connectionLost(reason)
+
+        if not self.connected_deferred.called:
+            self.connected_deferred.errback(reason)
+
+    def proxyConnected(self, _):
+        self.wrapped_protocol.makeConnection(self.transport)
+
+        self.connected_deferred.callback(self.wrapped_protocol)
+
+        # Get any pending data from the http buf and forward it to the original protocol
+        buf = self.http_setup_client.clearLineBuffer()
+        if buf:
+            self.wrapped_protocol.dataReceived(buf)
+
+    def dataReceived(self, data):
+        # if we've set up the HTTP protocol, we can send the data there
+        if self.wrapped_protocol.connected:
+            return self.wrapped_protocol.dataReceived(data)
+
+        # otherwise, we must still be setting up the connection: send the data to the
+        # setup client
+        return self.http_setup_client.dataReceived(data)
+
+
+class HTTPConnectSetupClient(http.HTTPClient):
+    """HTTPClient protocol to send a CONNECT message for proxies and read the response.
+
+    Args:
+        host (bytes): The hostname to send in the CONNECT message
+        port (int): The port to send in the CONNECT message
+    """
+
+    def __init__(self, host, port):
+        self.host = host
+        self.port = port
+        self.on_connected = defer.Deferred()
+
+    def connectionMade(self):
+        logger.debug("Connected to proxy, sending CONNECT")
+        self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
+        self.endHeaders()
+
+    def handleStatus(self, version, status, message):
+        logger.debug("Got Status: %s %s %s", status, message, version)
+        if status != b"200":
+            raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
+
+    def handleEndHeaders(self):
+        logger.debug("End Headers")
+        self.on_connected.callback(None)
+
+    def handleResponse(self, body):
+        pass
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
new file mode 100644
index 0000000000..332da02a8d
--- /dev/null
+++ b/synapse/http/proxyagent.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import re
+
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
+from twisted.python.failure import Failure
+from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
+from twisted.web.error import SchemeNotSupported
+from twisted.web.iweb import IAgent
+
+from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
+
+logger = logging.getLogger(__name__)
+
+_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
+
+
+@implementer(IAgent)
+class ProxyAgent(_AgentBase):
+    """An Agent implementation which will use an HTTP proxy if one was requested
+
+    Args:
+        reactor: twisted reactor to place outgoing
+            connections.
+
+        contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
+            verification parameters of OpenSSL.  The default is to use a
+            `BrowserLikePolicyForHTTPS`, so unless you have special
+            requirements you can leave this as-is.
+
+        connectTimeout (float): The amount of time that this Agent will wait
+            for the peer to accept a connection.
+
+        bindAddress (bytes): The local address for client sockets to bind to.
+
+        pool (HTTPConnectionPool|None): connection pool to be used. If None, a
+            non-persistent pool instance will be created.
+    """
+
+    def __init__(
+        self,
+        reactor,
+        contextFactory=BrowserLikePolicyForHTTPS(),
+        connectTimeout=None,
+        bindAddress=None,
+        pool=None,
+        http_proxy=None,
+        https_proxy=None,
+    ):
+        _AgentBase.__init__(self, reactor, pool)
+
+        self._endpoint_kwargs = {}
+        if connectTimeout is not None:
+            self._endpoint_kwargs["timeout"] = connectTimeout
+        if bindAddress is not None:
+            self._endpoint_kwargs["bindAddress"] = bindAddress
+
+        self.http_proxy_endpoint = _http_proxy_endpoint(
+            http_proxy, reactor, **self._endpoint_kwargs
+        )
+
+        self.https_proxy_endpoint = _http_proxy_endpoint(
+            https_proxy, reactor, **self._endpoint_kwargs
+        )
+
+        self._policy_for_https = contextFactory
+        self._reactor = reactor
+
+    def request(self, method, uri, headers=None, bodyProducer=None):
+        """
+        Issue a request to the server indicated by the given uri.
+
+        Supports `http` and `https` schemes.
+
+        An existing connection from the connection pool may be used or a new one may be
+        created.
+
+        See also: twisted.web.iweb.IAgent.request
+
+        Args:
+            method (bytes): The request method to use, such as `GET`, `POST`, etc
+
+            uri (bytes): The location of the resource to request.
+
+            headers (Headers|None): Extra headers to send with the request
+
+            bodyProducer (IBodyProducer|None): An object which can generate bytes to
+                make up the body of this request (for example, the properly encoded
+                contents of a file for a file upload). Or, None if the request is to
+                have no body.
+
+        Returns:
+            Deferred[IResponse]: completes when the header of the response has
+                 been received (regardless of the response status code).
+        """
+        uri = uri.strip()
+        if not _VALID_URI.match(uri):
+            raise ValueError("Invalid URI {!r}".format(uri))
+
+        parsed_uri = URI.fromBytes(uri)
+        pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
+        request_path = parsed_uri.originForm
+
+        if parsed_uri.scheme == b"http" and self.http_proxy_endpoint:
+            # Cache *all* connections under the same key, since we are only
+            # connecting to a single destination, the proxy:
+            pool_key = ("http-proxy", self.http_proxy_endpoint)
+            endpoint = self.http_proxy_endpoint
+            request_path = uri
+        elif parsed_uri.scheme == b"https" and self.https_proxy_endpoint:
+            endpoint = HTTPConnectProxyEndpoint(
+                self._reactor,
+                self.https_proxy_endpoint,
+                parsed_uri.host,
+                parsed_uri.port,
+            )
+        else:
+            # not using a proxy
+            endpoint = HostnameEndpoint(
+                self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
+            )
+
+        logger.debug("Requesting %s via %s", uri, endpoint)
+
+        if parsed_uri.scheme == b"https":
+            tls_connection_creator = self._policy_for_https.creatorForNetloc(
+                parsed_uri.host, parsed_uri.port
+            )
+            endpoint = wrapClientTLS(tls_connection_creator, endpoint)
+        elif parsed_uri.scheme == b"http":
+            pass
+        else:
+            return defer.fail(
+                Failure(
+                    SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
+                )
+            )
+
+        return self._requestWithEndpoint(
+            pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
+        )
+
+
+def _http_proxy_endpoint(proxy, reactor, **kwargs):
+    """Parses an http proxy setting and returns an endpoint for the proxy
+
+    Args:
+        proxy (bytes|None):  the proxy setting
+        reactor: reactor to be used to connect to the proxy
+        kwargs: other args to be passed to HostnameEndpoint
+
+    Returns:
+        interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
+            or None
+    """
+    if proxy is None:
+        return None
+
+    # currently we only support hostname:port. Some apps also support
+    # protocol://[:port], which allows a way of requiring a TLS connection to the
+    # proxy.
+
+    host, port = parse_host_port(proxy, default_port=1080)
+    return HostnameEndpoint(reactor, host, port, **kwargs)
+
+
+def parse_host_port(hostport, default_port=None):
+    # could have sworn we had one of these somewhere else...
+    if b":" in hostport:
+        host, port = hostport.rsplit(b":", 1)
+        try:
+            port = int(port)
+            return host, port
+        except ValueError:
+            # the thing after the : wasn't a valid port; presumably this is an
+            # IPv6 address.
+            pass
+
+    return hostport, default_port
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 7dde2ad055..e994037be6 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -103,7 +103,7 @@ class HttpPusher(object):
         if "url" not in self.data:
             raise PusherConfigException("'url' required in data for HTTP pusher")
         self.url = self.data["url"]
-        self.http_client = hs.get_simple_http_client()
+        self.http_client = hs.get_proxied_http_client()
         self.data_minus_url = {}
         self.data_minus_url.update(self.data)
         del self.data_minus_url["url"]
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 00a7dd6d09..24a0ce74f2 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -381,7 +381,7 @@ class CasTicketServlet(RestServlet):
         self.cas_displayname_attribute = hs.config.cas_displayname_attribute
         self.cas_required_attributes = hs.config.cas_required_attributes
         self._sso_auth_handler = SSOAuthHandler(hs)
-        self._http_client = hs.get_simple_http_client()
+        self._http_client = hs.get_proxied_http_client()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 5a25b6b3fc..531d923f76 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -74,6 +74,8 @@ class PreviewUrlResource(DirectServeResource):
             treq_args={"browser_like_redirects": True},
             ip_whitelist=hs.config.url_preview_ip_range_whitelist,
             ip_blacklist=hs.config.url_preview_ip_range_blacklist,
+            http_proxy=os.getenv("http_proxy"),
+            https_proxy=os.getenv("HTTPS_PROXY"),
         )
         self.media_repo = media_repo
         self.primary_base_path = media_repo.primary_base_path
diff --git a/synapse/server.py b/synapse/server.py
index 0b81af646c..f8aeebcff8 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,6 +23,7 @@
 # Imports required for the default HomeServer() implementation
 import abc
 import logging
+import os
 
 from twisted.enterprise import adbapi
 from twisted.mail.smtp import sendmail
@@ -168,6 +169,7 @@ class HomeServer(object):
         "filtering",
         "http_client_context_factory",
         "simple_http_client",
+        "proxied_http_client",
         "media_repository",
         "media_repository_resource",
         "federation_transport_client",
@@ -311,6 +313,13 @@ class HomeServer(object):
     def build_simple_http_client(self):
         return SimpleHttpClient(self)
 
+    def build_proxied_http_client(self):
+        return SimpleHttpClient(
+            self,
+            http_proxy=os.getenv("http_proxy"),
+            https_proxy=os.getenv("HTTPS_PROXY"),
+        )
+
     def build_room_creation_handler(self):
         return RoomCreationHandler(self)
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 83d1f11283..b5e0b57095 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -12,6 +12,7 @@ import synapse.handlers.message
 import synapse.handlers.room
 import synapse.handlers.room_member
 import synapse.handlers.set_password
+import synapse.http.client
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
 import synapse.server_notices.server_notices_sender
@@ -38,6 +39,14 @@ class HomeServer(object):
         pass
     def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
         pass
+    def get_simple_http_client(self) -> synapse.http.client.SimpleHttpClient:
+        """Fetch an HTTP client implementation which doesn't do any blacklisting
+        or support any HTTP_PROXY settings"""
+        pass
+    def get_proxied_http_client(self) -> synapse.http.client.SimpleHttpClient:
+        """Fetch an HTTP client implementation which doesn't do any blacklisting
+        but does support HTTP_PROXY settings"""
+        pass
     def get_deactivate_account_handler(
         self,
     ) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
index 2d5dba6464..2096ba3c91 100644
--- a/tests/http/__init__.py
+++ b/tests/http/__init__.py
@@ -20,6 +20,23 @@ from zope.interface import implementer
 from OpenSSL import SSL
 from OpenSSL.SSL import Connection
 from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
+from twisted.internet.ssl import Certificate, trustRootFromCertificates
+from twisted.web.client import BrowserLikePolicyForHTTPS  # noqa: F401
+from twisted.web.iweb import IPolicyForHTTPS  # noqa: F401
+
+
+def get_test_https_policy():
+    """Get a test IPolicyForHTTPS which trusts the test CA cert
+
+    Returns:
+        IPolicyForHTTPS
+    """
+    ca_file = get_test_ca_cert_file()
+    with open(ca_file) as stream:
+        content = stream.read()
+    cert = Certificate.loadPEM(content)
+    trust_root = trustRootFromCertificates([cert])
+    return BrowserLikePolicyForHTTPS(trustRoot=trust_root)
 
 
 def get_test_ca_cert_file():
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 71d7025264..cfcd98ff7d 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -124,19 +124,24 @@ class MatrixFederationAgentTests(unittest.TestCase):
             FakeTransport(client_protocol, self.reactor, server_tls_protocol)
         )
 
+        # grab a hold of the TLS connection, in case it gets torn down
+        server_tls_connection = server_tls_protocol._tlsConnection
+
+        # fish the test server back out of the server-side TLS protocol.
+        http_protocol = server_tls_protocol.wrappedProtocol
+
         # give the reactor a pump to get the TLS juices flowing.
         self.reactor.pump((0.1,))
 
         # check the SNI
-        server_name = server_tls_protocol._tlsConnection.get_servername()
+        server_name = server_tls_connection.get_servername()
         self.assertEqual(
             server_name,
             expected_sni,
             "Expected SNI %s but got %s" % (expected_sni, server_name),
         )
 
-        # fish the test server back out of the server-side TLS protocol.
-        return server_tls_protocol.wrappedProtocol
+        return http_protocol
 
     @defer.inlineCallbacks
     def _make_get_request(self, uri):
diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py
new file mode 100644
index 0000000000..22abf76515
--- /dev/null
+++ b/tests/http/test_proxyagent.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import treq
+
+from twisted.internet import interfaces  # noqa: F401
+from twisted.internet.protocol import Factory
+from twisted.protocols.tls import TLSMemoryBIOFactory
+from twisted.web.http import HTTPChannel
+
+from synapse.http.proxyagent import ProxyAgent
+
+from tests.http import TestServerTLSConnectionFactory, get_test_https_policy
+from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.unittest import TestCase
+
+logger = logging.getLogger(__name__)
+
+HTTPFactory = Factory.forProtocol(HTTPChannel)
+
+
+class MatrixFederationAgentTests(TestCase):
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+
+    def _make_connection(
+        self, client_factory, server_factory, ssl=False, expected_sni=None
+    ):
+        """Builds a test server, and completes the outgoing client connection
+
+        Args:
+            client_factory (interfaces.IProtocolFactory): the the factory that the
+                application is trying to use to make the outbound connection. We will
+                invoke it to build the client Protocol
+
+            server_factory (interfaces.IProtocolFactory): a factory to build the
+                server-side protocol
+
+            ssl (bool): If true, we will expect an ssl connection and wrap
+                server_factory with a TLSMemoryBIOFactory
+
+            expected_sni (bytes|None): the expected SNI value
+
+        Returns:
+            IProtocol: the server Protocol returned by server_factory
+        """
+        if ssl:
+            server_factory = _wrap_server_factory_for_tls(server_factory)
+
+        server_protocol = server_factory.buildProtocol(None)
+
+        # now, tell the client protocol factory to build the client protocol,
+        # and wire the output of said protocol up to the server via
+        # a FakeTransport.
+        #
+        # Normally this would be done by the TCP socket code in Twisted, but we are
+        # stubbing that out here.
+        client_protocol = client_factory.buildProtocol(None)
+        client_protocol.makeConnection(
+            FakeTransport(server_protocol, self.reactor, client_protocol)
+        )
+
+        # tell the server protocol to send its stuff back to the client, too
+        server_protocol.makeConnection(
+            FakeTransport(client_protocol, self.reactor, server_protocol)
+        )
+
+        if ssl:
+            http_protocol = server_protocol.wrappedProtocol
+            tls_connection = server_protocol._tlsConnection
+        else:
+            http_protocol = server_protocol
+            tls_connection = None
+
+        # give the reactor a pump to get the TLS juices flowing (if needed)
+        self.reactor.advance(0)
+
+        if expected_sni is not None:
+            server_name = tls_connection.get_servername()
+            self.assertEqual(
+                server_name,
+                expected_sni,
+                "Expected SNI %s but got %s" % (expected_sni, server_name),
+            )
+
+        return http_protocol
+
+    def test_http_request(self):
+        agent = ProxyAgent(self.reactor)
+
+        self.reactor.lookups["test.com"] = "1.2.3.4"
+        d = agent.request(b"GET", b"http://test.com")
+
+        # there should be a pending TCP connection
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, "1.2.3.4")
+        self.assertEqual(port, 80)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory, _get_test_protocol_factory()
+        )
+
+        # the FakeTransport is async, so we need to pump the reactor
+        self.reactor.advance(0)
+
+        # now there should be a pending request
+        self.assertEqual(len(http_server.requests), 1)
+
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b"GET")
+        self.assertEqual(request.path, b"/")
+        self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+        request.write(b"result")
+        request.finish()
+
+        self.reactor.advance(0)
+
+        resp = self.successResultOf(d)
+        body = self.successResultOf(treq.content(resp))
+        self.assertEqual(body, b"result")
+
+    def test_https_request(self):
+        agent = ProxyAgent(self.reactor, contextFactory=get_test_https_policy())
+
+        self.reactor.lookups["test.com"] = "1.2.3.4"
+        d = agent.request(b"GET", b"https://test.com/abc")
+
+        # there should be a pending TCP connection
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, "1.2.3.4")
+        self.assertEqual(port, 443)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory,
+            _get_test_protocol_factory(),
+            ssl=True,
+            expected_sni=b"test.com",
+        )
+
+        # the FakeTransport is async, so we need to pump the reactor
+        self.reactor.advance(0)
+
+        # now there should be a pending request
+        self.assertEqual(len(http_server.requests), 1)
+
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b"GET")
+        self.assertEqual(request.path, b"/abc")
+        self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+        request.write(b"result")
+        request.finish()
+
+        self.reactor.advance(0)
+
+        resp = self.successResultOf(d)
+        body = self.successResultOf(treq.content(resp))
+        self.assertEqual(body, b"result")
+
+    def test_http_request_via_proxy(self):
+        agent = ProxyAgent(self.reactor, http_proxy=b"proxy.com:8888")
+
+        self.reactor.lookups["proxy.com"] = "1.2.3.5"
+        d = agent.request(b"GET", b"http://test.com")
+
+        # there should be a pending TCP connection
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, "1.2.3.5")
+        self.assertEqual(port, 8888)
+
+        # make a test server, and wire up the client
+        http_server = self._make_connection(
+            client_factory, _get_test_protocol_factory()
+        )
+
+        # the FakeTransport is async, so we need to pump the reactor
+        self.reactor.advance(0)
+
+        # now there should be a pending request
+        self.assertEqual(len(http_server.requests), 1)
+
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b"GET")
+        self.assertEqual(request.path, b"http://test.com")
+        self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+        request.write(b"result")
+        request.finish()
+
+        self.reactor.advance(0)
+
+        resp = self.successResultOf(d)
+        body = self.successResultOf(treq.content(resp))
+        self.assertEqual(body, b"result")
+
+    def test_https_request_via_proxy(self):
+        agent = ProxyAgent(
+            self.reactor,
+            contextFactory=get_test_https_policy(),
+            https_proxy=b"proxy.com",
+        )
+
+        self.reactor.lookups["proxy.com"] = "1.2.3.5"
+        d = agent.request(b"GET", b"https://test.com/abc")
+
+        # there should be a pending TCP connection
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, "1.2.3.5")
+        self.assertEqual(port, 1080)
+
+        # make a test HTTP server, and wire up the client
+        proxy_server = self._make_connection(
+            client_factory, _get_test_protocol_factory()
+        )
+
+        # fish the transports back out so that we can do the old switcheroo
+        s2c_transport = proxy_server.transport
+        client_protocol = s2c_transport.other
+        c2s_transport = client_protocol.transport
+
+        # the FakeTransport is async, so we need to pump the reactor
+        self.reactor.advance(0)
+
+        # now there should be a pending CONNECT request
+        self.assertEqual(len(proxy_server.requests), 1)
+
+        request = proxy_server.requests[0]
+        self.assertEqual(request.method, b"CONNECT")
+        self.assertEqual(request.path, b"test.com:443")
+
+        # tell the proxy server not to close the connection
+        proxy_server.persistent = True
+
+        # this just stops the http Request trying to do a chunked response
+        # request.setHeader(b"Content-Length", b"0")
+        request.finish()
+
+        # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel
+        ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory())
+        ssl_protocol = ssl_factory.buildProtocol(None)
+        http_server = ssl_protocol.wrappedProtocol
+
+        ssl_protocol.makeConnection(
+            FakeTransport(client_protocol, self.reactor, ssl_protocol)
+        )
+        c2s_transport.other = ssl_protocol
+
+        self.reactor.advance(0)
+
+        server_name = ssl_protocol._tlsConnection.get_servername()
+        expected_sni = b"test.com"
+        self.assertEqual(
+            server_name,
+            expected_sni,
+            "Expected SNI %s but got %s" % (expected_sni, server_name),
+        )
+
+        # now there should be a pending request
+        self.assertEqual(len(http_server.requests), 1)
+
+        request = http_server.requests[0]
+        self.assertEqual(request.method, b"GET")
+        self.assertEqual(request.path, b"/abc")
+        self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
+        request.write(b"result")
+        request.finish()
+
+        self.reactor.advance(0)
+
+        resp = self.successResultOf(d)
+        body = self.successResultOf(treq.content(resp))
+        self.assertEqual(body, b"result")
+
+
+def _wrap_server_factory_for_tls(factory, sanlist=None):
+    """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory
+
+    The resultant factory will create a TLS server which presents a certificate
+    signed by our test CA, valid for the domains in `sanlist`
+
+    Args:
+        factory (interfaces.IProtocolFactory): protocol factory to wrap
+        sanlist (iterable[bytes]): list of domains the cert should be valid for
+
+    Returns:
+        interfaces.IProtocolFactory
+    """
+    if sanlist is None:
+        sanlist = [b"DNS:test.com"]
+
+    connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist)
+    return TLSMemoryBIOFactory(
+        connection_creator, isClient=False, wrappedFactory=factory
+    )
+
+
+def _get_test_protocol_factory():
+    """Get a protocol Factory which will build an HTTPChannel
+
+    Returns:
+        interfaces.IProtocolFactory
+    """
+    server_factory = Factory.forProtocol(HTTPChannel)
+
+    # Request.finish expects the factory to have a 'log' method.
+    server_factory.log = _log_request
+
+    return server_factory
+
+
+def _log_request(request):
+    """Implements Factory.log, which is expected by Request.finish"""
+    logger.info("Completed request %s", request)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index 8ce6bb62da..af2327fb66 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -50,7 +50,7 @@ class HTTPPusherTests(HomeserverTestCase):
         config = self.default_config()
         config["start_pushers"] = True
 
-        hs = self.setup_test_homeserver(config=config, simple_http_client=m)
+        hs = self.setup_test_homeserver(config=config, proxied_http_client=m)
 
         return hs
 
diff --git a/tests/server.py b/tests/server.py
index 469efb4edb..f878aeaada 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -395,11 +395,24 @@ class FakeTransport(object):
             self.disconnecting = True
             if self._protocol:
                 self._protocol.connectionLost(reason)
-            self.disconnected = True
+
+            # if we still have data to write, delay until that is done
+            if self.buffer:
+                logger.info(
+                    "FakeTransport: Delaying disconnect until buffer is flushed"
+                )
+            else:
+                self.disconnected = True
 
     def abortConnection(self):
         logger.info("FakeTransport: abortConnection()")
-        self.loseConnection()
+
+        if not self.disconnecting:
+            self.disconnecting = True
+            if self._protocol:
+                self._protocol.connectionLost(None)
+
+        self.disconnected = True
 
     def pauseProducing(self):
         if not self.producer:
@@ -430,6 +443,9 @@ class FakeTransport(object):
             self._reactor.callLater(0.0, _produce)
 
     def write(self, byt):
+        if self.disconnecting:
+            raise Exception("Writing to disconnecting FakeTransport")
+
         self.buffer = self.buffer + byt
 
         # always actually do the write asynchronously. Some protocols (notably the
@@ -474,6 +490,10 @@ class FakeTransport(object):
         if self.buffer and self.autoflush:
             self._reactor.callLater(0.0, self.flush)
 
+        if not self.buffer and self.disconnecting:
+            logger.info("FakeTransport: Buffer now empty, completing disconnect")
+            self.disconnected = True
+
 
 def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
     """
-- 
cgit 1.4.1


From 67a65918ad7723400f25339ef3f4447e7b3dc1b6 Mon Sep 17 00:00:00 2001
From: Jason Robinson 
Date: Fri, 1 Nov 2019 16:45:09 +0200
Subject: Add contributer docs for using the provided linters script (#6164)

* Add lint dependencies black, flake8 and isort

These are required when running the `lint.sh` dev scripts.

Signed-off-by: Jason Robinson 

* Add contributer docs for using the providers linters script

Add also to the pull request template to avoid build failures due
to people not knowing that linters need running.

Signed-off-by: Jason Robinson 

* Fix mention of linter errors correction

Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>

* Add mention for installing linter dependencies

Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>

* Remove linters from python dependencies as per PR review

Signed-off-by: Jason Robinson 
---
 .github/PULL_REQUEST_TEMPLATE.md |  1 +
 CONTRIBUTING.rst                 | 11 +++++++++++
 changelog.d/6164.doc             |  1 +
 3 files changed, 13 insertions(+)
 create mode 100644 changelog.d/6164.doc

(limited to 'changelog.d')

diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 1ead0d0030..8939fda67d 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -5,3 +5,4 @@
 * [ ] Pull request is based on the develop branch
 * [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
 * [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
+* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#code-style))
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 2fb3a95949..df81f6e54f 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -70,6 +70,17 @@ Please ensure your changes match the cosmetic style of the existing project,
 and **never** mix cosmetic and functional changes in the same commit, as it
 makes it horribly hard to review otherwise.
 
+Before doing a commit, ensure the changes you've made don't produce
+linting errors. You can do this by running the linters as follows. Ensure to
+commit any files that were corrected.
+
+::
+    # Install the dependencies
+    pip install -U black flake8 isort
+    
+    # Run the linter script
+    ./scripts-dev/lint.sh
+
 Changelog
 ~~~~~~~~~
 
diff --git a/changelog.d/6164.doc b/changelog.d/6164.doc
new file mode 100644
index 0000000000..f9395b02b3
--- /dev/null
+++ b/changelog.d/6164.doc
@@ -0,0 +1 @@
+Contributor documentation now mentions script to run linters.
-- 
cgit 1.4.1


From c6516adbe03a0acdd614ba6eb9d6f447dd4259e9 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 1 Nov 2019 16:19:09 +0000
Subject: Factor out an _AsyncEventContextImpl (#6298)

The intention here is to make it clearer which fields we can expect to be
populated when: notably, that the _event_type etc aren't used for the
synchronous impl of EventContext.
---
 changelog.d/6298.misc          |   1 +
 synapse/events/snapshot.py     | 107 ++++++++++++++++-------------------------
 synapse/handlers/federation.py |  38 +++++++--------
 tests/test_federation.py       |   4 +-
 4 files changed, 65 insertions(+), 85 deletions(-)
 create mode 100644 changelog.d/6298.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6298.misc b/changelog.d/6298.misc
new file mode 100644
index 0000000000..d4190730b2
--- /dev/null
+++ b/changelog.d/6298.misc
@@ -0,0 +1 @@
+Refactor EventContext for clarity.
\ No newline at end of file
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 27cd8a63ff..a269de5482 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -37,9 +37,6 @@ class EventContext:
         delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
             (type, state_key) -> event_id. ``None`` for an outlier.
 
-        prev_state_events (?): XXX: is this ever set to anything other than
-            the empty list?
-
         app_service: FIXME
 
         _current_state_ids (dict[(str, str), str]|None):
@@ -51,36 +48,16 @@ class EventContext:
             The current state map excluding the current event. None if outlier
             or we haven't fetched the state from DB yet.
             (type, state_key) -> event_id
-
-        _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
-            been calculated. None if we haven't started calculating yet
-
-        _event_type (str): The type of the event the context is associated with.
-            Only set when state has not been fetched yet.
-
-        _event_state_key (str|None): The state_key of the event the context is
-            associated with. Only set when state has not been fetched yet.
-
-        _prev_state_id (str|None): If the event associated with the context is
-            a state event, then `_prev_state_id` is the event_id of the state
-            that was replaced.
-            Only set when state has not been fetched yet.
     """
 
     state_group = attr.ib(default=None)
     rejected = attr.ib(default=False)
     prev_group = attr.ib(default=None)
     delta_ids = attr.ib(default=None)
-    prev_state_events = attr.ib(default=attr.Factory(list))
     app_service = attr.ib(default=None)
 
-    _current_state_ids = attr.ib(default=None)
     _prev_state_ids = attr.ib(default=None)
-    _prev_state_id = attr.ib(default=None)
-
-    _event_type = attr.ib(default=None)
-    _event_state_key = attr.ib(default=None)
-    _fetching_state_deferred = attr.ib(default=None)
+    _current_state_ids = attr.ib(default=None)
 
     @staticmethod
     def with_state(
@@ -90,7 +67,6 @@ class EventContext:
             current_state_ids=current_state_ids,
             prev_state_ids=prev_state_ids,
             state_group=state_group,
-            fetching_state_deferred=defer.succeed(None),
             prev_group=prev_group,
             delta_ids=delta_ids,
         )
@@ -125,7 +101,6 @@ class EventContext:
             "rejected": self.rejected,
             "prev_group": self.prev_group,
             "delta_ids": _encode_state_dict(self.delta_ids),
-            "prev_state_events": self.prev_state_events,
             "app_service_id": self.app_service.id if self.app_service else None,
         }
 
@@ -141,7 +116,7 @@ class EventContext:
         Returns:
             EventContext
         """
-        context = EventContext(
+        context = _AsyncEventContextImpl(
             # We use the state_group and prev_state_id stuff to pull the
             # current_state_ids out of the DB and construct prev_state_ids.
             prev_state_id=input["prev_state_id"],
@@ -151,7 +126,6 @@ class EventContext:
             prev_group=input["prev_group"],
             delta_ids=_decode_state_dict(input["delta_ids"]),
             rejected=input["rejected"],
-            prev_state_events=input["prev_state_events"],
         )
 
         app_service_id = input["app_service_id"]
@@ -170,14 +144,7 @@ class EventContext:
                 Maps a (type, state_key) to the event ID of the state event matching
                 this tuple.
         """
-
-        if not self._fetching_state_deferred:
-            self._fetching_state_deferred = run_in_background(
-                self._fill_out_state, store
-            )
-
-        yield make_deferred_yieldable(self._fetching_state_deferred)
-
+        yield self._ensure_fetched(store)
         return self._current_state_ids
 
     @defer.inlineCallbacks
@@ -190,14 +157,7 @@ class EventContext:
                 Maps a (type, state_key) to the event ID of the state event matching
                 this tuple.
         """
-
-        if not self._fetching_state_deferred:
-            self._fetching_state_deferred = run_in_background(
-                self._fill_out_state, store
-            )
-
-        yield make_deferred_yieldable(self._fetching_state_deferred)
-
+        yield self._ensure_fetched(store)
         return self._prev_state_ids
 
     def get_cached_current_state_ids(self):
@@ -211,6 +171,44 @@ class EventContext:
 
         return self._current_state_ids
 
+    def _ensure_fetched(self, store):
+        return defer.succeed(None)
+
+
+@attr.s(slots=True)
+class _AsyncEventContextImpl(EventContext):
+    """
+    An implementation of EventContext which fetches _current_state_ids and
+    _prev_state_ids from the database on demand.
+
+    Attributes:
+
+        _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
+            been calculated. None if we haven't started calculating yet
+
+        _event_type (str): The type of the event the context is associated with.
+
+        _event_state_key (str): The state_key of the event the context is
+            associated with.
+
+        _prev_state_id (str|None): If the event associated with the context is
+            a state event, then `_prev_state_id` is the event_id of the state
+            that was replaced.
+    """
+
+    _prev_state_id = attr.ib(default=None)
+    _event_type = attr.ib(default=None)
+    _event_state_key = attr.ib(default=None)
+    _fetching_state_deferred = attr.ib(default=None)
+
+    def _ensure_fetched(self, store):
+        if not self._fetching_state_deferred:
+            self._fetching_state_deferred = run_in_background(
+                self._fill_out_state, store
+            )
+
+        return make_deferred_yieldable(self._fetching_state_deferred)
+
     @defer.inlineCallbacks
     def _fill_out_state(self, store):
         """Called to populate the _current_state_ids and _prev_state_ids
@@ -228,27 +226,6 @@ class EventContext:
         else:
             self._prev_state_ids = self._current_state_ids
 
-    @defer.inlineCallbacks
-    def update_state(
-        self, state_group, prev_state_ids, current_state_ids, prev_group, delta_ids
-    ):
-        """Replace the state in the context
-        """
-
-        # We need to make sure we wait for any ongoing fetching of state
-        # to complete so that the updated state doesn't get clobbered
-        if self._fetching_state_deferred:
-            yield make_deferred_yieldable(self._fetching_state_deferred)
-
-        self.state_group = state_group
-        self._prev_state_ids = prev_state_ids
-        self.prev_group = prev_group
-        self._current_state_ids = current_state_ids
-        self.delta_ids = delta_ids
-
-        # We need to ensure that that we've marked as having fetched the state
-        self._fetching_state_deferred = defer.succeed(None)
-
 
 def _encode_state_dict(state_dict):
     """Since dicts of (type, state_key) -> event_id cannot be serialized in
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index dab6be9573..8cafcfdab0 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -45,6 +45,7 @@ from synapse.api.errors import (
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
 from synapse.crypto.event_signing import compute_event_signature
 from synapse.event_auth import auth_types_for_event
+from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.logging.context import (
     make_deferred_yieldable,
@@ -1871,14 +1872,7 @@ class FederationHandler(BaseHandler):
                 if c and c.type == EventTypes.Create:
                     auth_events[(c.type, c.state_key)] = c
 
-        try:
-            yield self.do_auth(origin, event, context, auth_events=auth_events)
-        except AuthError as e:
-            logger.warning(
-                "[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg
-            )
-
-            context.rejected = RejectedReason.AUTH_ERROR
+        context = yield self.do_auth(origin, event, context, auth_events=auth_events)
 
         if not context.rejected:
             yield self._check_for_soft_fail(event, state, backfilled)
@@ -2047,12 +2041,12 @@ class FederationHandler(BaseHandler):
 
                 Also NB that this function adds entries to it.
         Returns:
-            defer.Deferred[None]
+            defer.Deferred[EventContext]: updated context object
         """
         room_version = yield self.store.get_room_version(event.room_id)
 
         try:
-            yield self._update_auth_events_and_context_for_auth(
+            context = yield self._update_auth_events_and_context_for_auth(
                 origin, event, context, auth_events
             )
         except Exception:
@@ -2070,7 +2064,9 @@ class FederationHandler(BaseHandler):
             event_auth.check(room_version, event, auth_events=auth_events)
         except AuthError as e:
             logger.warning("Failed auth resolution for %r because %s", event, e)
-            raise e
+            context.rejected = RejectedReason.AUTH_ERROR
+
+        return context
 
     @defer.inlineCallbacks
     def _update_auth_events_and_context_for_auth(
@@ -2094,7 +2090,7 @@ class FederationHandler(BaseHandler):
             auth_events (dict[(str, str)->synapse.events.EventBase]):
 
         Returns:
-            defer.Deferred[None]
+            defer.Deferred[EventContext]: updated context
         """
         event_auth_events = set(event.auth_event_ids())
 
@@ -2133,7 +2129,7 @@ class FederationHandler(BaseHandler):
                     # The other side isn't around or doesn't implement the
                     # endpoint, so lets just bail out.
                     logger.info("Failed to get event auth from remote: %s", e)
-                    return
+                    return context
 
                 seen_remotes = yield self.store.have_seen_events(
                     [e.event_id for e in remote_auth_chain]
@@ -2174,7 +2170,7 @@ class FederationHandler(BaseHandler):
 
         if event.internal_metadata.is_outlier():
             logger.info("Skipping auth_event fetch for outlier")
-            return
+            return context
 
         # FIXME: Assumes we have and stored all the state for all the
         # prev_events
@@ -2183,7 +2179,7 @@ class FederationHandler(BaseHandler):
         )
 
         if not different_auth:
-            return
+            return context
 
         logger.info(
             "auth_events refers to events which are not in our calculated auth "
@@ -2230,10 +2226,12 @@ class FederationHandler(BaseHandler):
 
             auth_events.update(new_state)
 
-            yield self._update_context_for_auth_events(
+            context = yield self._update_context_for_auth_events(
                 event, context, auth_events, event_key
             )
 
+        return context
+
     @defer.inlineCallbacks
     def _update_context_for_auth_events(self, event, context, auth_events, event_key):
         """Update the state_ids in an event context after auth event resolution,
@@ -2242,14 +2240,16 @@ class FederationHandler(BaseHandler):
         Args:
             event (Event): The event we're handling the context for
 
-            context (synapse.events.snapshot.EventContext): event context
-                to be updated
+            context (synapse.events.snapshot.EventContext): initial event context
 
             auth_events (dict[(str, str)->str]): Events to update in the event
                 context.
 
             event_key ((str, str)): (type, state_key) for the current event.
                 this will not be included in the current_state in the context.
+
+        Returns:
+            Deferred[EventContext]: new event context
         """
         state_updates = {
             k: a.event_id for k, a in iteritems(auth_events) if k != event_key
@@ -2274,7 +2274,7 @@ class FederationHandler(BaseHandler):
             current_state_ids=current_state_ids,
         )
 
-        yield context.update_state(
+        return EventContext.with_state(
             state_group=state_group,
             current_state_ids=current_state_ids,
             prev_state_ids=prev_state_ids,
diff --git a/tests/test_federation.py b/tests/test_federation.py
index d1acb16f30..7d82b58466 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -59,7 +59,9 @@ class MessageAcceptTests(unittest.TestCase):
         )
 
         self.handler = self.homeserver.get_handlers().federation_handler
-        self.handler.do_auth = lambda *a, **b: succeed(True)
+        self.handler.do_auth = lambda origin, event, context, auth_events: succeed(
+            context
+        )
         self.client = self.homeserver.get_federation_client()
         self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
             pdus
-- 
cgit 1.4.1


From 416c7baee64e0d7145c2447c51326af1c901a176 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 31 Oct 2019 15:00:40 +0000
Subject: Changelog

---
 changelog.d/6310.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6310.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature
new file mode 100644
index 0000000000..b7ff3fad3b
--- /dev/null
+++ b/changelog.d/6310.feature
@@ -0,0 +1 @@
+Implement label-based filtering.
-- 
cgit 1.4.1


From cc6243b4c08bfae77c9ff29d23c40568ab284924 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 4 Nov 2019 12:40:18 +0000
Subject: document the REPLICATE command a bit better (#6305)

since I found myself wonder how it works
---
 changelog.d/6305.misc                      |  1 +
 docs/tcp_replication.md                    | 15 +++++-
 synapse/replication/slave/storage/_base.py | 10 +++-
 synapse/replication/tcp/client.py          | 20 +++++---
 synapse/replication/tcp/protocol.py        | 74 +++++++++++++++++++++++++++++-
 5 files changed, 110 insertions(+), 10 deletions(-)
 create mode 100644 changelog.d/6305.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6305.misc b/changelog.d/6305.misc
new file mode 100644
index 0000000000..f047fc3062
--- /dev/null
+++ b/changelog.d/6305.misc
@@ -0,0 +1 @@
+Add some documentation about worker replication.
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index e099d8a87b..ba9e874d07 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -199,7 +199,20 @@ client (C):
 
 #### REPLICATE (C)
 
-   Asks the server to replicate a given stream
+Asks the server to replicate a given stream. The syntax is:
+
+```
+    REPLICATE  
+```
+
+Where `` may be either:
+ * a numeric stream_id to stream updates since (exclusive)
+ * `NOW` to stream all subsequent updates.
+
+The `` is the name of a replication stream to subscribe 
+to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
+of streams). It can also be `ALL` to subscribe to all known streams,
+in which case the `` must be set to `NOW`.
 
 #### USER_SYNC (C)
 
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 182cb2a1d8..456bc005a0 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict
 
 import six
 
@@ -44,7 +45,14 @@ class BaseSlavedStore(SQLBaseStore):
 
         self.hs = hs
 
-    def stream_positions(self):
+    def stream_positions(self) -> Dict[str, int]:
+        """
+        Get the current positions of all the streams this store wants to subscribe to
+
+        Returns:
+            map from stream name to the most recent update we have for
+            that stream (ie, the point we want to start replicating from)
+        """
         pos = {}
         if self._cache_id_gen:
             pos["caches"] = self._cache_id_gen.get_current_token()
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 563ce0fc53..fead78388c 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -16,10 +16,17 @@
 """
 
 import logging
+from typing import Dict
 
 from twisted.internet import defer
 from twisted.internet.protocol import ReconnectingClientFactory
 
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.tcp.protocol import (
+    AbstractReplicationClientHandler,
+    ClientReplicationStreamProtocol,
+)
+
 from .commands import (
     FederationAckCommand,
     InvalidateCacheCommand,
@@ -27,7 +34,6 @@ from .commands import (
     UserIpCommand,
     UserSyncCommand,
 )
-from .protocol import ClientReplicationStreamProtocol
 
 logger = logging.getLogger(__name__)
 
@@ -42,7 +48,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
 
     maxDelay = 30  # Try at least once every N seconds
 
-    def __init__(self, hs, client_name, handler):
+    def __init__(self, hs, client_name, handler: AbstractReplicationClientHandler):
         self.client_name = client_name
         self.handler = handler
         self.server_name = hs.config.server_name
@@ -68,13 +74,13 @@ class ReplicationClientFactory(ReconnectingClientFactory):
         ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
 
 
-class ReplicationClientHandler(object):
+class ReplicationClientHandler(AbstractReplicationClientHandler):
     """A base handler that can be passed to the ReplicationClientFactory.
 
     By default proxies incoming replication data to the SlaveStore.
     """
 
-    def __init__(self, store):
+    def __init__(self, store: BaseSlavedStore):
         self.store = store
 
         # The current connection. None if we are currently (re)connecting
@@ -138,11 +144,13 @@ class ReplicationClientHandler(object):
         if d:
             d.callback(data)
 
-    def get_streams_to_replicate(self):
+    def get_streams_to_replicate(self) -> Dict[str, int]:
         """Called when a new connection has been established and we need to
         subscribe to streams.
 
-        Returns a dictionary of stream name to token.
+        Returns:
+            map from stream name to the most recent update we have for
+            that stream (ie, the point we want to start replicating from)
         """
         args = self.store.stream_positions()
         user_account_data = args.pop("user_account_data", None)
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index b64f3f44b5..afaf002fe6 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -48,7 +48,7 @@ indicate which side is sending, these are *not* included on the wire::
     > ERROR server stopping
     * connection closed by server *
 """
-
+import abc
 import fcntl
 import logging
 import struct
@@ -65,6 +65,7 @@ from twisted.python.failure import Failure
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
 from .commands import (
@@ -558,11 +559,80 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.streamer.lost_connection(self)
 
 
+class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
+    """
+    The interface for the handler that should be passed to
+    ClientReplicationStreamProtocol
+    """
+
+    @abc.abstractmethod
+    def on_rdata(self, stream_name, token, rows):
+        """Called to handle a batch of replication data with a given stream token.
+
+        Args:
+            stream_name (str): name of the replication stream for this batch of rows
+            token (int): stream token for this batch of rows
+            rows (list): a list of Stream.ROW_TYPE objects as returned by
+                Stream.parse_row.
+
+        Returns:
+            Deferred|None
+        """
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def on_position(self, stream_name, token):
+        """Called when we get new position data."""
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def on_sync(self, data):
+        """Called when get a new SYNC command."""
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def get_streams_to_replicate(self):
+        """Called when a new connection has been established and we need to
+        subscribe to streams.
+
+        Returns:
+            map from stream name to the most recent update we have for
+            that stream (ie, the point we want to start replicating from)
+        """
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def get_currently_syncing_users(self):
+        """Get the list of currently syncing users (if any). This is called
+        when a connection has been established and we need to send the
+        currently syncing users."""
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def update_connection(self, connection):
+        """Called when a connection has been established (or lost with None).
+        """
+        raise NotImplementedError()
+
+    @abc.abstractmethod
+    def finished_connecting(self):
+        """Called when we have successfully subscribed and caught up to all
+        streams we're interested in.
+        """
+        raise NotImplementedError()
+
+
 class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
     VALID_INBOUND_COMMANDS = VALID_SERVER_COMMANDS
     VALID_OUTBOUND_COMMANDS = VALID_CLIENT_COMMANDS
 
-    def __init__(self, client_name, server_name, clock, handler):
+    def __init__(
+        self,
+        client_name: str,
+        server_name: str,
+        clock: Clock,
+        handler: AbstractReplicationClientHandler,
+    ):
         BaseReplicationStreamProtocol.__init__(self, clock)
 
         self.client_name = client_name
-- 
cgit 1.4.1


From 09957ce0e4dcfd84c2de4039653059faae03065b Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 4 Nov 2019 17:09:22 +0000
Subject: Implement per-room message retention policies

---
 changelog.d/5815.feature                           |   1 +
 docs/sample_config.yaml                            |  63 ++++
 synapse/api/constants.py                           |   2 +
 synapse/config/server.py                           | 172 +++++++++++
 synapse/events/validator.py                        | 100 ++++++-
 synapse/handlers/federation.py                     |   2 +-
 synapse/handlers/message.py                        |   4 +-
 synapse/handlers/pagination.py                     | 111 +++++++
 synapse/storage/data_stores/main/events.py         |   3 +
 synapse/storage/data_stores/main/room.py           | 252 ++++++++++++++++
 .../main/schema/delta/56/room_retention.sql        |  33 +++
 synapse/visibility.py                              |  17 ++
 tests/rest/client/test_retention.py                | 320 +++++++++++++++++++++
 13 files changed, 1074 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/5815.feature
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/room_retention.sql
 create mode 100644 tests/rest/client/test_retention.py

(limited to 'changelog.d')

diff --git a/changelog.d/5815.feature b/changelog.d/5815.feature
new file mode 100644
index 0000000000..ca4df4e7f6
--- /dev/null
+++ b/changelog.d/5815.feature
@@ -0,0 +1 @@
+Implement per-room message retention policies.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index d2f4aff826..87fba27d13 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -328,6 +328,69 @@ listeners:
 #
 #user_ips_max_age: 14d
 
+# Message retention policy at the server level.
+#
+# Room admins and mods can define a retention period for their rooms using the
+# 'm.room.retention' state event, and server admins can cap this period by setting
+# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+#
+# If this feature is enabled, Synapse will regularly look for and purge events
+# which are older than the room's maximum retention period. Synapse will also
+# filter events received over federation so that events that should have been
+# purged are ignored and not stored again.
+#
+retention:
+  # The message retention policies feature is disabled by default. Uncomment the
+  # following line to enable it.
+  #
+  #enabled: true
+
+  # Default retention policy. If set, Synapse will apply it to rooms that lack the
+  # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+  # matter much because Synapse doesn't take it into account yet.
+  #
+  #default_policy:
+  #  min_lifetime: 1d
+  #  max_lifetime: 1y
+
+  # Retention policy limits. If set, a user won't be able to send a
+  # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+  # that's not within this range. This is especially useful in closed federations,
+  # in which server admins can make sure every federating server applies the same
+  # rules.
+  #
+  #allowed_lifetime_min: 1d
+  #allowed_lifetime_max: 1y
+
+  # Server admins can define the settings of the background jobs purging the
+  # events which lifetime has expired under the 'purge_jobs' section.
+  #
+  # If no configuration is provided, a single job will be set up to delete expired
+  # events in every room daily.
+  #
+  # Each job's configuration defines which range of message lifetimes the job
+  # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+  # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+  # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+  # lower than or equal to 3 days. Both the minimum and the maximum value of a
+  # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+  # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+  # which 'max_lifetime' is lower than or equal to three days.
+  #
+  # The rationale for this per-job configuration is that some rooms might have a
+  # retention policy with a low 'max_lifetime', where history needs to be purged
+  # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+  # that purge to be performed by a job that's iterating over every room it knows,
+  # which would be quite heavy on the server.
+  #
+  #purge_jobs:
+  #  - shortest_max_lifetime: 1d
+  #    longest_max_lifetime: 3d
+  #    interval: 5m:
+  #  - shortest_max_lifetime: 3d
+  #    longest_max_lifetime: 1y
+  #    interval: 24h
+
 
 ## TLS ##
 
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 49c4b85054..e3f086f1c3 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -94,6 +94,8 @@ class EventTypes(object):
     ServerACL = "m.room.server_acl"
     Pinned = "m.room.pinned_events"
 
+    Retention = "m.room.retention"
+
 
 class RejectedReason(object):
     AUTH_ERROR = "auth_error"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index d556df308d..aa93a416f1 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -246,6 +246,115 @@ class ServerConfig(Config):
         # events with profile information that differ from the target's global profile.
         self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
 
+        retention_config = config.get("retention")
+        if retention_config is None:
+            retention_config = {}
+
+        self.retention_enabled = retention_config.get("enabled", False)
+
+        retention_default_policy = retention_config.get("default_policy")
+
+        if retention_default_policy is not None:
+            self.retention_default_min_lifetime = retention_default_policy.get(
+                "min_lifetime"
+            )
+            if self.retention_default_min_lifetime is not None:
+                self.retention_default_min_lifetime = self.parse_duration(
+                    self.retention_default_min_lifetime
+                )
+
+            self.retention_default_max_lifetime = retention_default_policy.get(
+                "max_lifetime"
+            )
+            if self.retention_default_max_lifetime is not None:
+                self.retention_default_max_lifetime = self.parse_duration(
+                    self.retention_default_max_lifetime
+                )
+
+            if (
+                self.retention_default_min_lifetime is not None
+                and self.retention_default_max_lifetime is not None
+                and (
+                    self.retention_default_min_lifetime
+                    > self.retention_default_max_lifetime
+                )
+            ):
+                raise ConfigError(
+                    "The default retention policy's 'min_lifetime' can not be greater"
+                    " than its 'max_lifetime'"
+                )
+        else:
+            self.retention_default_min_lifetime = None
+            self.retention_default_max_lifetime = None
+
+        self.retention_allowed_lifetime_min = retention_config.get("allowed_lifetime_min")
+        if self.retention_allowed_lifetime_min is not None:
+            self.retention_allowed_lifetime_min = self.parse_duration(
+                self.retention_allowed_lifetime_min
+            )
+
+        self.retention_allowed_lifetime_max = retention_config.get("allowed_lifetime_max")
+        if self.retention_allowed_lifetime_max is not None:
+            self.retention_allowed_lifetime_max = self.parse_duration(
+                self.retention_allowed_lifetime_max
+            )
+
+        if (
+            self.retention_allowed_lifetime_min is not None
+            and self.retention_allowed_lifetime_max is not None
+            and self.retention_allowed_lifetime_min > self.retention_allowed_lifetime_max
+        ):
+            raise ConfigError(
+                "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
+                " greater than 'allowed_lifetime_max'"
+            )
+
+        self.retention_purge_jobs = []
+        for purge_job_config in retention_config.get("purge_jobs", []):
+            interval_config = purge_job_config.get("interval")
+
+            if interval_config is None:
+                raise ConfigError(
+                    "A retention policy's purge jobs configuration must have the"
+                    " 'interval' key set."
+                )
+
+            interval = self.parse_duration(interval_config)
+
+            shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
+
+            if shortest_max_lifetime is not None:
+                shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
+
+            longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
+
+            if longest_max_lifetime is not None:
+                longest_max_lifetime = self.parse_duration(longest_max_lifetime)
+
+            if (
+                shortest_max_lifetime is not None
+                and longest_max_lifetime is not None
+                and shortest_max_lifetime > longest_max_lifetime
+            ):
+                raise ConfigError(
+                    "A retention policy's purge jobs configuration's"
+                    " 'shortest_max_lifetime' value can not be greater than its"
+                    " 'longest_max_lifetime' value."
+                )
+
+            self.retention_purge_jobs.append({
+                "interval": interval,
+                "shortest_max_lifetime": shortest_max_lifetime,
+                "longest_max_lifetime": longest_max_lifetime,
+            })
+
+        if not self.retention_purge_jobs:
+            self.retention_purge_jobs = [{
+                "interval": self.parse_duration("1d"),
+                "shortest_max_lifetime": None,
+                "longest_max_lifetime": None,
+            }]
+
         self.listeners = []  # type: List[dict]
         for listener in config.get("listeners", []):
             if not isinstance(listener.get("port", None), int):
@@ -761,6 +870,69 @@ class ServerConfig(Config):
         # Defaults to `28d`. Set to `null` to disable clearing out of old rows.
         #
         #user_ips_max_age: 14d
+        
+        # Message retention policy at the server level.
+        #
+        # Room admins and mods can define a retention period for their rooms using the
+        # 'm.room.retention' state event, and server admins can cap this period by setting
+        # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+        #
+        # If this feature is enabled, Synapse will regularly look for and purge events
+        # which are older than the room's maximum retention period. Synapse will also
+        # filter events received over federation so that events that should have been
+        # purged are ignored and not stored again.
+        #
+        retention:
+          # The message retention policies feature is disabled by default. Uncomment the
+          # following line to enable it.
+          #
+          #enabled: true
+
+          # Default retention policy. If set, Synapse will apply it to rooms that lack the
+          # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+          # matter much because Synapse doesn't take it into account yet.
+          #
+          #default_policy:
+          #  min_lifetime: 1d
+          #  max_lifetime: 1y
+
+          # Retention policy limits. If set, a user won't be able to send a
+          # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+          # that's not within this range. This is especially useful in closed federations,
+          # in which server admins can make sure every federating server applies the same
+          # rules.
+          #
+          #allowed_lifetime_min: 1d
+          #allowed_lifetime_max: 1y
+
+          # Server admins can define the settings of the background jobs purging the
+          # events which lifetime has expired under the 'purge_jobs' section.
+          #
+          # If no configuration is provided, a single job will be set up to delete expired
+          # events in every room daily.
+          #
+          # Each job's configuration defines which range of message lifetimes the job
+          # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+          # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+          # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+          # lower than or equal to 3 days. Both the minimum and the maximum value of a
+          # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+          # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+          # which 'max_lifetime' is lower than or equal to three days.
+          #
+          # The rationale for this per-job configuration is that some rooms might have a
+          # retention policy with a low 'max_lifetime', where history needs to be purged
+          # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+          # that purge to be performed by a job that's iterating over every room it knows,
+          # which would be quite heavy on the server.
+          #
+          #purge_jobs:
+          #  - shortest_max_lifetime: 1d
+          #    longest_max_lifetime: 3d
+          #    interval: 5m:
+          #  - shortest_max_lifetime: 3d
+          #    longest_max_lifetime: 1y
+          #    interval: 24h
         """
             % locals()
         )
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 272426e105..9b90c9ce04 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from six import string_types
+from six import integer_types, string_types
 
 from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
@@ -22,11 +22,12 @@ from synapse.types import EventID, RoomID, UserID
 
 
 class EventValidator(object):
-    def validate_new(self, event):
+    def validate_new(self, event, config):
         """Validates the event has roughly the right format
 
         Args:
-            event (FrozenEvent)
+            event (FrozenEvent): The event to validate.
+            config (Config): The homeserver's configuration.
         """
         self.validate_builder(event)
 
@@ -67,6 +68,99 @@ class EventValidator(object):
                             Codes.INVALID_PARAM,
                         )
 
+        if event.type == EventTypes.Retention:
+            self._validate_retention(event, config)
+
+    def _validate_retention(self, event, config):
+        """Checks that an event that defines the retention policy for a room respects the
+        boundaries imposed by the server's administrator.
+
+        Args:
+            event (FrozenEvent): The event to validate.
+            config (Config): The homeserver's configuration.
+        """
+        min_lifetime = event.content.get("min_lifetime")
+        max_lifetime = event.content.get("max_lifetime")
+
+        if min_lifetime is not None:
+            if not isinstance(min_lifetime, integer_types):
+                raise SynapseError(
+                    code=400,
+                    msg="'min_lifetime' must be an integer",
+                    errcode=Codes.BAD_JSON,
+                )
+
+            if (
+                config.retention_allowed_lifetime_min is not None
+                and min_lifetime < config.retention_allowed_lifetime_min
+            ):
+                raise SynapseError(
+                    code=400,
+                    msg=(
+                        "'min_lifetime' can't be lower than the minimum allowed"
+                        " value enforced by the server's administrator"
+                    ),
+                    errcode=Codes.BAD_JSON,
+                )
+
+            if (
+                config.retention_allowed_lifetime_max is not None
+                and min_lifetime > config.retention_allowed_lifetime_max
+            ):
+                raise SynapseError(
+                    code=400,
+                    msg=(
+                        "'min_lifetime' can't be greater than the maximum allowed"
+                        " value enforced by the server's administrator"
+                    ),
+                    errcode=Codes.BAD_JSON,
+                )
+
+        if max_lifetime is not None:
+            if not isinstance(max_lifetime, integer_types):
+                raise SynapseError(
+                    code=400,
+                    msg="'max_lifetime' must be an integer",
+                    errcode=Codes.BAD_JSON,
+                )
+
+            if (
+                config.retention_allowed_lifetime_min is not None
+                and max_lifetime < config.retention_allowed_lifetime_min
+            ):
+                raise SynapseError(
+                    code=400,
+                    msg=(
+                        "'max_lifetime' can't be lower than the minimum allowed value"
+                        " enforced by the server's administrator"
+                    ),
+                    errcode=Codes.BAD_JSON,
+                )
+
+            if (
+                config.retention_allowed_lifetime_max is not None
+                and max_lifetime > config.retention_allowed_lifetime_max
+            ):
+                raise SynapseError(
+                    code=400,
+                    msg=(
+                        "'max_lifetime' can't be greater than the maximum allowed"
+                        " value enforced by the server's administrator"
+                    ),
+                    errcode=Codes.BAD_JSON,
+                )
+
+        if (
+            min_lifetime is not None
+            and max_lifetime is not None
+            and min_lifetime > max_lifetime
+        ):
+            raise SynapseError(
+                code=400,
+                msg="'min_lifetime' can't be greater than 'max_lifetime",
+                errcode=Codes.BAD_JSON,
+            )
+
     def validate_builder(self, event):
         """Validates that the builder/event has roughly the right format. Only
         checks values that we expect a proto event to have, rather than all the
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 8cafcfdab0..3994137d18 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -2454,7 +2454,7 @@ class FederationHandler(BaseHandler):
                 room_version, event_dict, event, context
             )
 
-            EventValidator().validate_new(event)
+            EventValidator().validate_new(event, self.config)
 
             # We need to tell the transaction queue to send this out, even
             # though the sender isn't a local user.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index d682dc2b7a..155ed6e06a 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -417,7 +417,7 @@ class EventCreationHandler(object):
                     403, "You must be in the room to create an alias for it"
                 )
 
-        self.validator.validate_new(event)
+        self.validator.validate_new(event, self.config)
 
         return (event, context)
 
@@ -634,7 +634,7 @@ class EventCreationHandler(object):
         if requester:
             context.app_service = requester.app_service
 
-        self.validator.validate_new(event)
+        self.validator.validate_new(event, self.config)
 
         # If this event is an annotation then we check that that the sender
         # can't annotate the same way twice (e.g. stops users from liking an
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 97f15a1c32..e1800177fa 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -15,12 +15,15 @@
 # limitations under the License.
 import logging
 
+from six import iteritems
+
 from twisted.internet import defer
 from twisted.python.failure import Failure
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import SynapseError
 from synapse.logging.context import run_in_background
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.state import StateFilter
 from synapse.types import RoomStreamToken
 from synapse.util.async_helpers import ReadWriteLock
@@ -80,6 +83,114 @@ class PaginationHandler(object):
         self._purges_by_id = {}
         self._event_serializer = hs.get_event_client_serializer()
 
+        self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
+
+        if hs.config.retention_enabled:
+            # Run the purge jobs described in the configuration file.
+            for job in hs.config.retention_purge_jobs:
+                self.clock.looping_call(
+                    run_as_background_process,
+                    job["interval"],
+                    "purge_history_for_rooms_in_range",
+                    self.purge_history_for_rooms_in_range,
+                    job["shortest_max_lifetime"],
+                    job["longest_max_lifetime"],
+                )
+
+    @defer.inlineCallbacks
+    def purge_history_for_rooms_in_range(self, min_ms, max_ms):
+        """Purge outdated events from rooms within the given retention range.
+
+        If a default retention policy is defined in the server's configuration and its
+        'max_lifetime' is within this range, also targets rooms which don't have a
+        retention policy.
+
+        Args:
+            min_ms (int|None): Duration in milliseconds that define the lower limit of
+                the range to handle (exclusive). If None, it means that the range has no
+                lower limit.
+            max_ms (int|None): Duration in milliseconds that define the upper limit of
+                the range to handle (inclusive). If None, it means that the range has no
+                upper limit.
+        """
+        # We want the storage layer to to include rooms with no retention policy in its
+        # return value only if a default retention policy is defined in the server's
+        # configuration and that policy's 'max_lifetime' is either lower (or equal) than
+        # max_ms or higher than min_ms (or both).
+        if self._retention_default_max_lifetime is not None:
+            include_null = True
+
+            if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
+                # The default max_lifetime is lower than (or equal to) min_ms.
+                include_null = False
+
+            if max_ms is not None and max_ms < self._retention_default_max_lifetime:
+                # The default max_lifetime is higher than max_ms.
+                include_null = False
+        else:
+            include_null = False
+
+        rooms = yield self.store.get_rooms_for_retention_period_in_range(
+            min_ms, max_ms, include_null
+        )
+
+        for room_id, retention_policy in iteritems(rooms):
+            if room_id in self._purges_in_progress_by_room:
+                logger.warning(
+                    "[purge] not purging room %s as there's an ongoing purge running"
+                    " for this room",
+                    room_id,
+                )
+                continue
+
+            max_lifetime = retention_policy["max_lifetime"]
+
+            if max_lifetime is None:
+                # If max_lifetime is None, it means that include_null equals True,
+                # therefore we can safely assume that there is a default policy defined
+                # in the server's configuration.
+                max_lifetime = self._retention_default_max_lifetime
+
+            # Figure out what token we should start purging at.
+            ts = self.clock.time_msec() - max_lifetime
+
+            stream_ordering = (
+                yield self.store.find_first_stream_ordering_after_ts(ts)
+            )
+
+            r = (
+                yield self.store.get_room_event_after_stream_ordering(
+                    room_id, stream_ordering,
+                )
+            )
+            if not r:
+                logger.warning(
+                    "[purge] purging events not possible: No event found "
+                    "(ts %i => stream_ordering %i)",
+                    ts, stream_ordering,
+                )
+                continue
+
+            (stream, topo, _event_id) = r
+            token = "t%d-%d" % (topo, stream)
+
+            purge_id = random_string(16)
+
+            self._purges_by_id[purge_id] = PurgeStatus()
+
+            logger.info(
+                "Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
+            )
+
+            # We want to purge everything, including local events, and to run the purge in
+            # the background so that it's not blocking any other operation apart from
+            # other purges in the same room.
+            run_as_background_process(
+                "_purge_history",
+                self._purge_history,
+                purge_id, room_id, token, True,
+            )
+
     def start_purge_history(self, room_id, token, delete_local_events=False):
         """Start off a history purge on a room.
 
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 301f8ea128..b332a42d82 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -929,6 +929,9 @@ class EventsStore(
             elif event.type == EventTypes.Redaction:
                 # Insert into the redactions table.
                 self._store_redaction(txn, event)
+            elif event.type == EventTypes.Retention:
+                # Update the room_retention table.
+                self._store_retention_policy_for_room_txn(txn, event)
 
             self._handle_event_relations(txn, event)
 
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 67bb1b6f60..54a7d24c73 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -19,10 +19,13 @@ import logging
 import re
 from typing import Optional, Tuple
 
+from six import integer_types
+
 from canonicaljson import json
 
 from twisted.internet import defer
 
+from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.main.search import SearchStore
@@ -302,6 +305,85 @@ class RoomWorkerStore(SQLBaseStore):
 
 
 class RoomStore(RoomWorkerStore, SearchStore):
+    def __init__(self, db_conn, hs):
+        super(RoomStore, self).__init__(db_conn, hs)
+
+        self.config = hs.config
+
+        self.register_background_update_handler(
+            "insert_room_retention", self._background_insert_retention,
+        )
+
+    @defer.inlineCallbacks
+    def _background_insert_retention(self, progress, batch_size):
+        """Retrieves a list of all rooms within a range and inserts an entry for each of
+        them into the room_retention table.
+        NULLs the property's columns if missing from the retention event in the room's
+        state (or NULLs all of them if there's no retention event in the room's state),
+        so that we fall back to the server's retention policy.
+        """
+
+        last_room = progress.get("room_id", "")
+
+        def _background_insert_retention_txn(txn):
+            txn.execute(
+                """
+                SELECT state.room_id, state.event_id, events.json
+                FROM current_state_events as state
+                LEFT JOIN event_json AS events ON (state.event_id = events.event_id)
+                WHERE state.room_id > ? AND state.type = '%s'
+                ORDER BY state.room_id ASC
+                LIMIT ?;
+                """ % EventTypes.Retention,
+                (last_room, batch_size)
+            )
+
+            rows = self.cursor_to_dict(txn)
+
+            if not rows:
+                return True
+
+            for row in rows:
+                if not row["json"]:
+                    retention_policy = {}
+                else:
+                    ev = json.loads(row["json"])
+                    retention_policy = json.dumps(ev["content"])
+
+                self._simple_insert_txn(
+                    txn=txn,
+                    table="room_retention",
+                    values={
+                        "room_id": row["room_id"],
+                        "event_id": row["event_id"],
+                        "min_lifetime": retention_policy.get("min_lifetime"),
+                        "max_lifetime": retention_policy.get("max_lifetime"),
+                    }
+                )
+
+            logger.info("Inserted %d rows into room_retention", len(rows))
+
+            self._background_update_progress_txn(
+                txn, "insert_room_retention", {
+                    "room_id": rows[-1]["room_id"],
+                }
+            )
+
+            if batch_size > len(rows):
+                return True
+            else:
+                return False
+
+        end = yield self.runInteraction(
+            "insert_room_retention",
+            _background_insert_retention_txn,
+        )
+
+        if end:
+            yield self._end_background_update("insert_room_retention")
+
+        defer.returnValue(batch_size)
+
     @defer.inlineCallbacks
     def store_room(self, room_id, room_creator_user_id, is_public):
         """Stores a room.
@@ -502,6 +584,37 @@ class RoomStore(RoomWorkerStore, SearchStore):
                 txn, event, "content.body", event.content["body"]
             )
 
+    def _store_retention_policy_for_room_txn(self, txn, event):
+        if (
+            hasattr(event, "content")
+            and ("min_lifetime" in event.content or "max_lifetime" in event.content)
+        ):
+            if (
+                ("min_lifetime" in event.content and not isinstance(
+                    event.content.get("min_lifetime"), integer_types
+                ))
+                or ("max_lifetime" in event.content and not isinstance(
+                    event.content.get("max_lifetime"), integer_types
+                ))
+            ):
+                # Ignore the event if one of the value isn't an integer.
+                return
+
+            self._simple_insert_txn(
+                txn=txn,
+                table="room_retention",
+                values={
+                    "room_id": event.room_id,
+                    "event_id": event.event_id,
+                    "min_lifetime": event.content.get("min_lifetime"),
+                    "max_lifetime": event.content.get("max_lifetime"),
+                },
+            )
+
+            self._invalidate_cache_and_stream(
+                txn, self.get_retention_policy_for_room, (event.room_id,)
+            )
+
     def add_event_report(
         self, room_id, event_id, user_id, reason, content, received_ts
     ):
@@ -683,3 +796,142 @@ class RoomStore(RoomWorkerStore, SearchStore):
                             remote_media_mxcs.append((hostname, media_id))
 
         return local_media_mxcs, remote_media_mxcs
+
+    @defer.inlineCallbacks
+    def get_rooms_for_retention_period_in_range(self, min_ms, max_ms, include_null=False):
+        """Retrieves all of the rooms within the given retention range.
+
+        Optionally includes the rooms which don't have a retention policy.
+
+        Args:
+            min_ms (int|None): Duration in milliseconds that define the lower limit of
+                the range to handle (exclusive). If None, doesn't set a lower limit.
+            max_ms (int|None): Duration in milliseconds that define the upper limit of
+                the range to handle (inclusive). If None, doesn't set an upper limit.
+            include_null (bool): Whether to include rooms which retention policy is NULL
+                in the returned set.
+
+        Returns:
+            dict[str, dict]: The rooms within this range, along with their retention
+                policy. The key is "room_id", and maps to a dict describing the retention
+                policy associated with this room ID. The keys for this nested dict are
+                "min_lifetime" (int|None), and "max_lifetime" (int|None).
+        """
+
+        def get_rooms_for_retention_period_in_range_txn(txn):
+            range_conditions = []
+            args = []
+
+            if min_ms is not None:
+                range_conditions.append("max_lifetime > ?")
+                args.append(min_ms)
+
+            if max_ms is not None:
+                range_conditions.append("max_lifetime <= ?")
+                args.append(max_ms)
+
+            # Do a first query which will retrieve the rooms that have a retention policy
+            # in their current state.
+            sql = """
+                SELECT room_id, min_lifetime, max_lifetime FROM room_retention
+                INNER JOIN current_state_events USING (event_id, room_id)
+                """
+
+            if len(range_conditions):
+                sql += " WHERE (" + " AND ".join(range_conditions) + ")"
+
+                if include_null:
+                    sql += " OR max_lifetime IS NULL"
+
+            txn.execute(sql, args)
+
+            rows = self.cursor_to_dict(txn)
+            rooms_dict = {}
+
+            for row in rows:
+                rooms_dict[row["room_id"]] = {
+                    "min_lifetime": row["min_lifetime"],
+                    "max_lifetime": row["max_lifetime"],
+                }
+
+            if include_null:
+                # If required, do a second query that retrieves all of the rooms we know
+                # of so we can handle rooms with no retention policy.
+                sql = "SELECT DISTINCT room_id FROM current_state_events"
+
+                txn.execute(sql)
+
+                rows = self.cursor_to_dict(txn)
+
+                # If a room isn't already in the dict (i.e. it doesn't have a retention
+                # policy in its state), add it with a null policy.
+                for row in rows:
+                    if row["room_id"] not in rooms_dict:
+                        rooms_dict[row["room_id"]] = {
+                            "min_lifetime": None,
+                            "max_lifetime": None,
+                        }
+
+            return rooms_dict
+
+        rooms = yield self.runInteraction(
+            "get_rooms_for_retention_period_in_range",
+            get_rooms_for_retention_period_in_range_txn,
+        )
+
+        defer.returnValue(rooms)
+
+    @cachedInlineCallbacks()
+    def get_retention_policy_for_room(self, room_id):
+        """Get the retention policy for a given room.
+
+        If no retention policy has been found for this room, returns a policy defined
+        by the configured default policy (which has None as both the 'min_lifetime' and
+        the 'max_lifetime' if no default policy has been defined in the server's
+        configuration).
+
+        Args:
+            room_id (str): The ID of the room to get the retention policy of.
+
+        Returns:
+            dict[int, int]: "min_lifetime" and "max_lifetime" for this room.
+        """
+
+        def get_retention_policy_for_room_txn(txn):
+            txn.execute(
+                """
+                SELECT min_lifetime, max_lifetime FROM room_retention
+                INNER JOIN current_state_events USING (event_id, room_id)
+                WHERE room_id = ?;
+                """,
+                (room_id,)
+            )
+
+            return self.cursor_to_dict(txn)
+
+        ret = yield self.runInteraction(
+            "get_retention_policy_for_room",
+            get_retention_policy_for_room_txn,
+        )
+
+        # If we don't know this room ID, ret will be None, in this case return the default
+        # policy.
+        if not ret:
+            defer.returnValue({
+                "min_lifetime": self.config.retention_default_min_lifetime,
+                "max_lifetime": self.config.retention_default_max_lifetime,
+            })
+
+        row = ret[0]
+
+        # If one of the room's policy's attributes isn't defined, use the matching
+        # attribute from the default policy.
+        # The default values will be None if no default policy has been defined, or if one
+        # of the attributes is missing from the default policy.
+        if row["min_lifetime"] is None:
+            row["min_lifetime"] = self.config.retention_default_min_lifetime
+
+        if row["max_lifetime"] is None:
+            row["max_lifetime"] = self.config.retention_default_max_lifetime
+
+        defer.returnValue(row)
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql b/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql
new file mode 100644
index 0000000000..ee6cdf7a14
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql
@@ -0,0 +1,33 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Tracks the retention policy of a room.
+-- A NULL max_lifetime or min_lifetime means that the matching property is not defined in
+-- the room's retention policy state event.
+-- If a room doesn't have a retention policy state event in its state, both max_lifetime
+-- and min_lifetime are NULL.
+CREATE TABLE IF NOT EXISTS room_retention(
+    room_id TEXT,
+    event_id TEXT,
+    min_lifetime BIGINT,
+    max_lifetime BIGINT,
+
+    PRIMARY KEY(room_id, event_id)
+);
+
+CREATE INDEX room_retention_max_lifetime_idx on room_retention(max_lifetime);
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('insert_room_retention', '{}');
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 8c843febd8..4498c156bc 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -86,6 +86,14 @@ def filter_events_for_client(
 
     erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
 
+    room_ids = set(e.room_id for e in events)
+    retention_policies = {}
+
+    for room_id in room_ids:
+        retention_policies[room_id] = yield storage.main.get_retention_policy_for_room(
+            room_id
+        )
+
     def allowed(event):
         """
         Args:
@@ -103,6 +111,15 @@ def filter_events_for_client(
         if not event.is_state() and event.sender in ignore_list:
             return None
 
+        retention_policy = retention_policies[event.room_id]
+        max_lifetime = retention_policy.get("max_lifetime")
+
+        if max_lifetime is not None:
+            oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
+
+            if event.origin_server_ts < oldest_allowed_ts:
+                return None
+
         if event.event_id in always_include_ids:
             return event
 
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
new file mode 100644
index 0000000000..41ea9db689
--- /dev/null
+++ b/tests/rest/client/test_retention.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from mock import Mock
+
+from synapse.api.constants import EventTypes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
+from synapse.visibility import filter_events_for_client
+
+from tests import unittest
+
+one_hour_ms = 3600000
+one_day_ms = one_hour_ms * 24
+
+
+class RetentionTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        config = self.default_config()
+        config["default_room_version"] = "1"
+        config["retention"] = {
+            "enabled": True,
+            "default_policy": {
+                "min_lifetime": one_day_ms,
+                "max_lifetime": one_day_ms * 3,
+            },
+            "allowed_lifetime_min": one_day_ms,
+            "allowed_lifetime_max": one_day_ms * 3,
+        }
+
+        self.hs = self.setup_test_homeserver(config=config)
+        return self.hs
+
+    def prepare(self, reactor, clock, homeserver):
+        self.user_id = self.register_user("user", "password")
+        self.token = self.login("user", "password")
+
+    def test_retention_state_event(self):
+        """Tests that the server configuration can limit the values a user can set to the
+        room's retention policy.
+        """
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        self.helper.send_state(
+            room_id=room_id,
+            event_type=EventTypes.Retention,
+            body={
+                "max_lifetime": one_day_ms * 4,
+            },
+            tok=self.token,
+            expect_code=400,
+        )
+
+        self.helper.send_state(
+            room_id=room_id,
+            event_type=EventTypes.Retention,
+            body={
+                "max_lifetime": one_hour_ms,
+            },
+            tok=self.token,
+            expect_code=400,
+        )
+
+    def test_retention_event_purged_with_state_event(self):
+        """Tests that expired events are correctly purged when the room's retention policy
+        is defined by a state event.
+        """
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        # Set the room's retention period to 2 days.
+        lifetime = one_day_ms * 2
+        self.helper.send_state(
+            room_id=room_id,
+            event_type=EventTypes.Retention,
+            body={
+                "max_lifetime": lifetime,
+            },
+            tok=self.token,
+        )
+
+        self._test_retention_event_purged(room_id, one_day_ms * 1.5)
+
+    def test_retention_event_purged_without_state_event(self):
+        """Tests that expired events are correctly purged when the room's retention policy
+        is defined by the server's configuration's default retention policy.
+        """
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        self._test_retention_event_purged(room_id, one_day_ms * 2)
+
+    def test_visibility(self):
+        """Tests that synapse.visibility.filter_events_for_client correctly filters out
+        outdated events
+        """
+        store = self.hs.get_datastore()
+        storage = self.hs.get_storage()
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+        events = []
+
+        # Send a first event, which should be filtered out at the end of the test.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="1",
+            tok=self.token,
+        )
+
+        # Get the event from the store so that we end up with a FrozenEvent that we can
+        # give to filter_events_for_client. We need to do this now because the event won't
+        # be in the database anymore after it has expired.
+        events.append(self.get_success(
+            store.get_event(
+                resp.get("event_id")
+            )
+        ))
+
+        # Advance the time by 2 days. We're using the default retention policy, therefore
+        # after this the first event will still be valid.
+        self.reactor.advance(one_day_ms * 2 / 1000)
+
+        # Send another event, which shouldn't get filtered out.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="2",
+            tok=self.token,
+        )
+
+        valid_event_id = resp.get("event_id")
+
+        events.append(self.get_success(
+            store.get_event(
+                valid_event_id
+            )
+        ))
+
+        # Advance the time by anothe 2 days. After this, the first event should be
+        # outdated but not the second one.
+        self.reactor.advance(one_day_ms * 2 / 1000)
+
+        # Run filter_events_for_client with our list of FrozenEvents.
+        filtered_events = self.get_success(filter_events_for_client(
+            storage, self.user_id, events
+        ))
+
+        # We should only get one event back.
+        self.assertEqual(len(filtered_events), 1, filtered_events)
+        # That event should be the second, not outdated event.
+        self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events)
+
+    def _test_retention_event_purged(self, room_id, increment):
+        # Send a first event to the room. This is the event we'll want to be purged at the
+        # end of the test.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="1",
+            tok=self.token,
+        )
+
+        expired_event_id = resp.get("event_id")
+
+        # Check that we can retrieve the event.
+        expired_event = self.get_event(room_id, expired_event_id)
+        self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event)
+
+        # Advance the time.
+        self.reactor.advance(increment / 1000)
+
+        # Send another event. We need this because the purge job won't purge the most
+        # recent event in the room.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="2",
+            tok=self.token,
+        )
+
+        valid_event_id = resp.get("event_id")
+
+        # Advance the time again. Now our first event should have expired but our second
+        # one should still be kept.
+        self.reactor.advance(increment / 1000)
+
+        # Check that the event has been purged from the database.
+        self.get_event(room_id, expired_event_id, expected_code=404)
+
+        # Check that the event that hasn't been purged can still be retrieved.
+        valid_event = self.get_event(room_id, valid_event_id)
+        self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event)
+
+    def get_event(self, room_id, event_id, expected_code=200):
+        url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+        request, channel = self.make_request("GET", url, access_token=self.token)
+        self.render(request)
+
+        self.assertEqual(channel.code, expected_code, channel.result)
+
+        return channel.json_body
+
+
+class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        config = self.default_config()
+        config["default_room_version"] = "1"
+        config["retention"] = {
+            "enabled": True,
+        }
+
+        mock_federation_client = Mock(spec=["backfill"])
+
+        self.hs = self.setup_test_homeserver(
+            config=config,
+            federation_client=mock_federation_client,
+        )
+        return self.hs
+
+    def prepare(self, reactor, clock, homeserver):
+        self.user_id = self.register_user("user", "password")
+        self.token = self.login("user", "password")
+
+    def test_no_default_policy(self):
+        """Tests that an event doesn't get expired if there is neither a default retention
+        policy nor a policy specific to the room.
+        """
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        self._test_retention(room_id)
+
+    def test_state_policy(self):
+        """Tests that an event gets correctly expired if there is no default retention
+        policy but there's a policy specific to the room.
+        """
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        # Set the maximum lifetime to 35 days so that the first event gets expired but not
+        # the second one.
+        self.helper.send_state(
+            room_id=room_id,
+            event_type=EventTypes.Retention,
+            body={
+                "max_lifetime": one_day_ms * 35,
+            },
+            tok=self.token,
+        )
+
+        self._test_retention(room_id, expected_code_for_first_event=404)
+
+    def _test_retention(self, room_id, expected_code_for_first_event=200):
+        # Send a first event to the room. This is the event we'll want to be purged at the
+        # end of the test.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="1",
+            tok=self.token,
+        )
+
+        first_event_id = resp.get("event_id")
+
+        # Check that we can retrieve the event.
+        expired_event = self.get_event(room_id, first_event_id)
+        self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event)
+
+        # Advance the time by a month.
+        self.reactor.advance(one_day_ms * 30 / 1000)
+
+        # Send another event. We need this because the purge job won't purge the most
+        # recent event in the room.
+        resp = self.helper.send(
+            room_id=room_id,
+            body="2",
+            tok=self.token,
+        )
+
+        second_event_id = resp.get("event_id")
+
+        # Advance the time by another month.
+        self.reactor.advance(one_day_ms * 30 / 1000)
+
+        # Check if the event has been purged from the database.
+        first_event = self.get_event(
+            room_id, first_event_id, expected_code=expected_code_for_first_event
+        )
+
+        if expected_code_for_first_event == 200:
+            self.assertEqual(first_event.get("content", {}).get("body"), "1", first_event)
+
+        # Check that the event that hasn't been purged can still be retrieved.
+        second_event = self.get_event(room_id, second_event_id)
+        self.assertEqual(second_event.get("content", {}).get("body"), "2", second_event)
+
+    def get_event(self, room_id, event_id, expected_code=200):
+        url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+        request, channel = self.make_request("GET", url, access_token=self.token)
+        self.render(request)
+
+        self.assertEqual(channel.code, expected_code, channel.result)
+
+        return channel.json_body
-- 
cgit 1.4.1


From 4e1c7b79fa3498c48106c17d0edbab2f7bcc0c38 Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Tue, 5 Nov 2019 05:05:48 +1100
Subject: Remove the psutil dependency (#6318)

* remove psutil and replace with resource
---
 changelog.d/6318.misc          |   1 +
 synapse/app/homeserver.py      | 174 ++++++++++++++++++++++-------------------
 synapse/python_dependencies.py |   1 -
 synapse/server.py              |   2 +
 tests/test_phone_home.py       |  51 ++++++++++++
 5 files changed, 146 insertions(+), 83 deletions(-)
 create mode 100644 changelog.d/6318.misc
 create mode 100644 tests/test_phone_home.py

(limited to 'changelog.d')

diff --git a/changelog.d/6318.misc b/changelog.d/6318.misc
new file mode 100644
index 0000000000..63527ccef4
--- /dev/null
+++ b/changelog.d/6318.misc
@@ -0,0 +1 @@
+Remove the dependency on psutil and replace functionality with the stdlib `resource` module.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 8d28076d92..00a7f8330e 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -19,12 +19,13 @@ from __future__ import print_function
 
 import gc
 import logging
+import math
 import os
+import resource
 import sys
 
 from six import iteritems
 
-import psutil
 from prometheus_client import Gauge
 
 from twisted.application import service
@@ -471,6 +472,87 @@ class SynapseService(service.Service):
         return self._port.stopListening()
 
 
+# Contains the list of processes we will be monitoring
+# currently either 0 or 1
+_stats_process = []
+
+
+@defer.inlineCallbacks
+def phone_stats_home(hs, stats, stats_process=_stats_process):
+    logger.info("Gathering stats for reporting")
+    now = int(hs.get_clock().time())
+    uptime = int(now - hs.start_time)
+    if uptime < 0:
+        uptime = 0
+
+    stats["homeserver"] = hs.config.server_name
+    stats["server_context"] = hs.config.server_context
+    stats["timestamp"] = now
+    stats["uptime_seconds"] = uptime
+    version = sys.version_info
+    stats["python_version"] = "{}.{}.{}".format(
+        version.major, version.minor, version.micro
+    )
+    stats["total_users"] = yield hs.get_datastore().count_all_users()
+
+    total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
+    stats["total_nonbridged_users"] = total_nonbridged_users
+
+    daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
+    for name, count in iteritems(daily_user_type_results):
+        stats["daily_user_type_" + name] = count
+
+    room_count = yield hs.get_datastore().get_room_count()
+    stats["total_room_count"] = room_count
+
+    stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
+    stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
+    stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
+    stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
+
+    r30_results = yield hs.get_datastore().count_r30_users()
+    for name, count in iteritems(r30_results):
+        stats["r30_users_" + name] = count
+
+    daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
+    stats["daily_sent_messages"] = daily_sent_messages
+    stats["cache_factor"] = CACHE_SIZE_FACTOR
+    stats["event_cache_size"] = hs.config.event_cache_size
+
+    #
+    # Performance statistics
+    #
+    old = stats_process[0]
+    new = (now, resource.getrusage(resource.RUSAGE_SELF))
+    stats_process[0] = new
+
+    # Get RSS in bytes
+    stats["memory_rss"] = new[1].ru_maxrss
+
+    # Get CPU time in % of a single core, not % of all cores
+    used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - (
+        old[1].ru_utime + old[1].ru_stime
+    )
+    if used_cpu_time == 0 or new[0] == old[0]:
+        stats["cpu_average"] = 0
+    else:
+        stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
+
+    #
+    # Database version
+    #
+
+    stats["database_engine"] = hs.get_datastore().database_engine_name
+    stats["database_server_version"] = hs.get_datastore().get_server_version()
+    logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
+    try:
+        yield hs.get_proxied_http_client().put_json(
+            hs.config.report_stats_endpoint, stats
+        )
+    except Exception as e:
+        logger.warning("Error reporting stats: %s", e)
+
+
 def run(hs):
     PROFILE_SYNAPSE = False
     if PROFILE_SYNAPSE:
@@ -497,91 +579,19 @@ def run(hs):
         reactor.run = profile(reactor.run)
 
     clock = hs.get_clock()
-    start_time = clock.time()
 
     stats = {}
 
-    # Contains the list of processes we will be monitoring
-    # currently either 0 or 1
-    stats_process = []
+    def performance_stats_init():
+        _stats_process.clear()
+        _stats_process.append(
+            (int(hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)))
+        )
 
     def start_phone_stats_home():
-        return run_as_background_process("phone_stats_home", phone_stats_home)
-
-    @defer.inlineCallbacks
-    def phone_stats_home():
-        logger.info("Gathering stats for reporting")
-        now = int(hs.get_clock().time())
-        uptime = int(now - start_time)
-        if uptime < 0:
-            uptime = 0
-
-        stats["homeserver"] = hs.config.server_name
-        stats["server_context"] = hs.config.server_context
-        stats["timestamp"] = now
-        stats["uptime_seconds"] = uptime
-        version = sys.version_info
-        stats["python_version"] = "{}.{}.{}".format(
-            version.major, version.minor, version.micro
-        )
-        stats["total_users"] = yield hs.get_datastore().count_all_users()
-
-        total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
-        stats["total_nonbridged_users"] = total_nonbridged_users
-
-        daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
-        for name, count in iteritems(daily_user_type_results):
-            stats["daily_user_type_" + name] = count
-
-        room_count = yield hs.get_datastore().get_room_count()
-        stats["total_room_count"] = room_count
-
-        stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
-        stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
-        stats[
-            "daily_active_rooms"
-        ] = yield hs.get_datastore().count_daily_active_rooms()
-        stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
-
-        r30_results = yield hs.get_datastore().count_r30_users()
-        for name, count in iteritems(r30_results):
-            stats["r30_users_" + name] = count
-
-        daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
-        stats["daily_sent_messages"] = daily_sent_messages
-        stats["cache_factor"] = CACHE_SIZE_FACTOR
-        stats["event_cache_size"] = hs.config.event_cache_size
-
-        if len(stats_process) > 0:
-            stats["memory_rss"] = 0
-            stats["cpu_average"] = 0
-            for process in stats_process:
-                stats["memory_rss"] += process.memory_info().rss
-                stats["cpu_average"] += int(process.cpu_percent(interval=None))
-
-        stats["database_engine"] = hs.get_datastore().database_engine_name
-        stats["database_server_version"] = hs.get_datastore().get_server_version()
-        logger.info(
-            "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
+        return run_as_background_process(
+            "phone_stats_home", phone_stats_home, hs, stats
         )
-        try:
-            yield hs.get_proxied_http_client().put_json(
-                hs.config.report_stats_endpoint, stats
-            )
-        except Exception as e:
-            logger.warning("Error reporting stats: %s", e)
-
-    def performance_stats_init():
-        try:
-            process = psutil.Process()
-            # Ensure we can fetch both, and make the initial request for cpu_percent
-            # so the next request will use this as the initial point.
-            process.memory_info().rss
-            process.cpu_percent(interval=None)
-            logger.info("report_stats can use psutil")
-            stats_process.append(process)
-        except (AttributeError):
-            logger.warning("Unable to read memory/cpu stats. Disabling reporting.")
 
     def generate_user_daily_visit_stats():
         return run_as_background_process(
@@ -626,7 +636,7 @@ def run(hs):
 
     if hs.config.report_stats:
         logger.info("Scheduling stats reporting for 3 hour intervals")
-        clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
+        clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
 
         # We need to defer this init for the cases that we daemonize
         # otherwise the process ID we get is that of the non-daemon process
@@ -634,7 +644,7 @@ def run(hs):
 
         # We wait 5 minutes to send the first set of stats as the server can
         # be quite busy the first few minutes
-        clock.call_later(5 * 60, start_phone_stats_home)
+        clock.call_later(5 * 60, start_phone_stats_home, hs, stats)
 
     _base.start_reactor(
         "synapse-homeserver",
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index aa7da1c543..5871feaafd 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -61,7 +61,6 @@ REQUIREMENTS = [
     "bcrypt>=3.1.0",
     "pillow>=4.3.0",
     "sortedcontainers>=1.4.4",
-    "psutil>=2.0.0",
     "pymacaroons>=0.13.0",
     "msgpack>=0.5.2",
     "phonenumbers>=8.2.0",
diff --git a/synapse/server.py b/synapse/server.py
index f8aeebcff8..90c3b072e8 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -221,6 +221,7 @@ class HomeServer(object):
         self.hostname = hostname
         self._building = {}
         self._listening_services = []
+        self.start_time = None
 
         self.clock = Clock(reactor)
         self.distributor = Distributor()
@@ -240,6 +241,7 @@ class HomeServer(object):
             datastore = self.DATASTORE_CLASS(conn, self)
             self.datastores = DataStores(datastore, conn, self)
             conn.commit()
+        self.start_time = int(self.get_clock().time())
         logger.info("Finished setting up.")
 
     def setup_master(self):
diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py
new file mode 100644
index 0000000000..7657bddea5
--- /dev/null
+++ b/tests/test_phone_home.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import resource
+
+import mock
+
+from synapse.app.homeserver import phone_stats_home
+
+from tests.unittest import HomeserverTestCase
+
+
+class PhoneHomeStatsTestCase(HomeserverTestCase):
+    def test_performance_frozen_clock(self):
+        """
+        If time doesn't move, don't error out.
+        """
+        past_stats = [
+            (self.hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF))
+        ]
+        stats = {}
+        self.get_success(phone_stats_home(self.hs, stats, past_stats))
+        self.assertEqual(stats["cpu_average"], 0)
+
+    def test_performance_100(self):
+        """
+        1 second of usage over 1 second is 100% CPU usage.
+        """
+        real_res = resource.getrusage(resource.RUSAGE_SELF)
+        old_resource = mock.Mock(spec=real_res)
+        old_resource.ru_utime = real_res.ru_utime - 1
+        old_resource.ru_stime = real_res.ru_stime
+        old_resource.ru_maxrss = real_res.ru_maxrss
+
+        past_stats = [(self.hs.get_clock().time(), old_resource)]
+        stats = {}
+        self.reactor.advance(1)
+        self.get_success(phone_stats_home(self.hs, stats, past_stats))
+        self.assertApproximates(stats["cpu_average"], 100, tolerance=2.5)
-- 
cgit 1.4.1


From 0287d033eec86fb7f6bb84f929e756c99caf2113 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Mon, 4 Nov 2019 18:08:50 +0000
Subject: Transfer upgraded rooms on groups

---
 changelog.d/6235.bugfix                          |  1 +
 synapse/handlers/room_member.py                  |  9 +++++++++
 synapse/storage/data_stores/main/group_server.py | 15 +++++++++++++++
 3 files changed, 25 insertions(+)
 create mode 100644 changelog.d/6235.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6235.bugfix b/changelog.d/6235.bugfix
new file mode 100644
index 0000000000..12718ba934
--- /dev/null
+++ b/changelog.d/6235.bugfix
@@ -0,0 +1 @@
+Remove a room from a server's public rooms list on room upgrade.
\ No newline at end of file
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 06d09c2947..01c65ee222 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -514,6 +514,15 @@ class RoomMemberHandler(object):
         if old_room and old_room["is_public"]:
             yield self.store.set_room_is_public(old_room_id, False)
             yield self.store.set_room_is_public(room_id, True)
+            
+        # Check if any groups we own contain the predecessor room
+        local_group_ids = yield self.store.get_local_groups_for_room(old_room_id)
+        for group_id in local_group_ids:
+            # Add new the new room to those groups
+            yield self.store.add_room_to_group(group_id, room_id, old_room["is_public"])
+
+            # Remove the old room from those groups
+            yield self.store.remove_room_from_group(group_id, old_room_id)
 
     @defer.inlineCallbacks
     def copy_user_state_on_room_upgrade(self, old_room_id, new_room_id, user_ids):
diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py
index b3a2771f1b..13ad71a49c 100644
--- a/synapse/storage/data_stores/main/group_server.py
+++ b/synapse/storage/data_stores/main/group_server.py
@@ -552,6 +552,21 @@ class GroupServerStore(SQLBaseStore):
             keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id},
             desc="remove_user_from_summary",
         )
+    
+    def get_local_groups_for_room(self, room_id):
+        """Get all of the local group that contain a given room
+        Args:
+            room_id (str): The ID of a room
+        Returns:
+            Deferred[list[str]]: A twisted.Deferred containing a list of group ids
+                containing this room
+        """
+        return self._simple_select_onecol(
+            table="group_rooms",
+            keyvalues={"room_id": room_id},
+            retcol="group_id",
+            desc="get_local_groups_for_room",
+        )
 
     def get_users_for_summary_by_role(self, group_id, include_private=False):
         """Get the users and roles that should be included in a summary request
-- 
cgit 1.4.1


From 408600282774391fade9e4a6606f4967184865c0 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Nov 2019 13:23:25 +0000
Subject: Improve documentation for EventContext fields (#6319)

---
 changelog.d/6319.misc      |  1 +
 synapse/events/snapshot.py | 91 +++++++++++++++++++++++++++++++++-------------
 synapse/state/__init__.py  |  3 ++
 3 files changed, 69 insertions(+), 26 deletions(-)
 create mode 100644 changelog.d/6319.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6319.misc b/changelog.d/6319.misc
new file mode 100644
index 0000000000..9711ef21ed
--- /dev/null
+++ b/changelog.d/6319.misc
@@ -0,0 +1 @@
+Improve documentation for EventContext fields.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index a269de5482..5f07f6fe4b 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,6 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Dict, Optional, Tuple, Union
+
 from six import iteritems
 
 import attr
@@ -19,45 +21,82 @@ from frozendict import frozendict
 
 from twisted.internet import defer
 
+from synapse.appservice import ApplicationService
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 
 
 @attr.s(slots=True)
 class EventContext:
     """
+    Holds information relevant to persisting an event
+
     Attributes:
-        state_group (int|None): state group id, if the state has been stored
-            as a state group. This is usually only None if e.g. the event is
-            an outlier.
-        rejected (bool|str): A rejection reason if the event was rejected, else
-            False
-
-        prev_group (int): Previously persisted state group. ``None`` for an
-            outlier.
-        delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
-            (type, state_key) -> event_id. ``None`` for an outlier.
-
-        app_service: FIXME
-
-        _current_state_ids (dict[(str, str), str]|None):
-            The current state map including the current event. None if outlier
-            or we haven't fetched the state from DB yet.
+        rejected: A rejection reason if the event was rejected, else False
+
+        state_group: The ID of the state group for this event. Note that state events
+            are persisted with a state group which includes the new event, so this is
+            effectively the state *after* the event in question.
+
+            For a *rejected* state event, where the state of the rejected event is
+            ignored, this state_group should never make it into the
+            event_to_state_groups table. Indeed, inspecting this value for a rejected
+            state event is almost certainly incorrect.
+
+            For an outlier, where we don't have the state at the event, this will be
+            None.
+
+        prev_group: If it is known, ``state_group``'s prev_group. Note that this being
+            None does not necessarily mean that ``state_group`` does not have
+            a prev_group!
+
+            If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
+            will always also be ``None``.
+
+            Note that this *not* (necessarily) the state group associated with
+            ``_prev_state_ids``.
+
+        delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
+            and ``state_group``.
+
+        app_service: If this event is being sent by a (local) application service, that
+            app service.
+
+        _current_state_ids: The room state map, including this event - ie, the state
+            in ``state_group``.
+
             (type, state_key) -> event_id
 
-        _prev_state_ids (dict[(str, str), str]|None):
-            The current state map excluding the current event. None if outlier
-            or we haven't fetched the state from DB yet.
+            FIXME: what is this for an outlier? it seems ill-defined. It seems like
+            it could be either {}, or the state we were given by the remote
+            server, depending on $THINGS
+
+            Note that this is a private attribute: it should be accessed via
+            ``get_current_state_ids``. _AsyncEventContext impl calculates this
+            on-demand: it will be None until that happens.
+
+        _prev_state_ids: The room state map, excluding this event. For a non-state
+            event, this will be the same as _current_state_events.
+
+            Note that it is a completely different thing to prev_group!
+
             (type, state_key) -> event_id
+
+            FIXME: again, what is this for an outlier?
+
+            As with _current_state_ids, this is a private attribute. It should be
+            accessed via get_prev_state_ids.
     """
 
-    state_group = attr.ib(default=None)
-    rejected = attr.ib(default=False)
-    prev_group = attr.ib(default=None)
-    delta_ids = attr.ib(default=None)
-    app_service = attr.ib(default=None)
+    rejected = attr.ib(default=False, type=Union[bool, str])
+    state_group = attr.ib(default=None, type=Optional[int])
+    prev_group = attr.ib(default=None, type=Optional[int])
+    delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
+    app_service = attr.ib(default=None, type=Optional[ApplicationService])
 
-    _prev_state_ids = attr.ib(default=None)
-    _current_state_ids = attr.ib(default=None)
+    _current_state_ids = attr.ib(
+        default=None, type=Optional[Dict[Tuple[str, str], str]]
+    )
+    _prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
 
     @staticmethod
     def with_state(
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 4e91eb66fe..2c04ab1854 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -232,6 +232,9 @@ class StateHandler(object):
             # If this is an outlier, then we know it shouldn't have any current
             # state. Certainly store.get_current_state won't return any, and
             # persisting the event won't store the state group.
+
+            # FIXME: why do we populate current_state_ids? I thought the point was
+            # that we weren't supposed to have any state for outliers?
             if old_state:
                 prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state}
                 if event.is_state():
-- 
cgit 1.4.1


From d8d808db64c3464924016fab88879085d6c63880 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 5 Nov 2019 14:42:05 +0000
Subject: Changelog

---
 changelog.d/6329.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6329.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6329.feature b/changelog.d/6329.feature
new file mode 100644
index 0000000000..78a187a1dc
--- /dev/null
+++ b/changelog.d/6329.feature
@@ -0,0 +1 @@
+Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
-- 
cgit 1.4.1


From f5d8fdf0a71cadd4ded81e276cc57e9c0c195a2f Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 5 Nov 2019 14:44:25 +0000
Subject: Update changelog

---
 changelog.d/6310.feature | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature
index b7ff3fad3b..78a187a1dc 100644
--- a/changelog.d/6310.feature
+++ b/changelog.d/6310.feature
@@ -1 +1 @@
-Implement label-based filtering.
+Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
-- 
cgit 1.4.1


From e9bfe719ba1928dc191cea93120c5c8a89584434 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 5 Nov 2019 15:45:17 +0000
Subject: Strip overlong OpenGraph data from url preview

... to stop people causing DoSes with malicious web pages
---
 changelog.d/6331.feature                      |  1 +
 synapse/rest/media/v1/preview_url_resource.py | 20 +++++++++++++++-
 tests/rest/media/v1/test_url_preview.py       | 34 +++++++++++++++++++++++++++
 3 files changed, 54 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6331.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6331.feature b/changelog.d/6331.feature
new file mode 100644
index 0000000000..eaf69ef3f6
--- /dev/null
+++ b/changelog.d/6331.feature
@@ -0,0 +1 @@
+Limit the length of data returned by url previews, to prevent DoS attacks.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0c68c3aad5..6d8c39a410 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -56,6 +56,9 @@ logger = logging.getLogger(__name__)
 _charset_match = re.compile(br"<\s*meta[^>]*charset\s*=\s*([a-z0-9-]+)", flags=re.I)
 _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
 
+OG_TAG_NAME_MAXLEN = 50
+OG_TAG_VALUE_MAXLEN = 1000
+
 
 class PreviewUrlResource(DirectServeResource):
     isLeaf = True
@@ -167,7 +170,7 @@ class PreviewUrlResource(DirectServeResource):
             ts (int):
 
         Returns:
-            Deferred[str]: json-encoded og data
+            Deferred[bytes]: json-encoded og data
         """
         # check the URL cache in the DB (which will also provide us with
         # historical previews, if we have any)
@@ -268,6 +271,17 @@ class PreviewUrlResource(DirectServeResource):
             logger.warn("Failed to find any OG data in %s", url)
             og = {}
 
+        # filter out any stupidly long values
+        keys_to_remove = []
+        for k, v in og.items():
+            if len(k) > OG_TAG_NAME_MAXLEN or len(v) > OG_TAG_VALUE_MAXLEN:
+                logger.warning(
+                    "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
+                )
+                keys_to_remove.append(k)
+        for k in keys_to_remove:
+            del og[k]
+
         logger.debug("Calculated OG for %s as %s" % (url, og))
 
         jsonog = json.dumps(og)
@@ -502,6 +516,10 @@ def _calc_og(tree, media_uri):
     og = {}
     for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
         if "content" in tag.attrib:
+            # if we've got more than 50 tags, someone is taking the piss
+            if len(og) >= 50:
+                logger.warning("skipping OG for page with too many og: tags")
+                return {}
             og[tag.attrib["property"]] = tag.attrib["content"]
 
     # TODO: grab article: meta tags too, e.g.:
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 976652aee8..da19a8e86f 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -247,6 +247,40 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
 
+    def test_overlong_title(self):
+        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+
+        end_content = (
+            b""
+            b"" + b"x" * 2000 + b""
+            b''
+            b""
+        )
+
+        request, channel = self.make_request(
+            "GET", "url_preview?url=http://matrix.org", shorthand=False
+        )
+        request.render(self.preview_url)
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            (
+                b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                b'Content-Type: text/html; charset="windows-1251"\r\n\r\n'
+            )
+            % (len(end_content),)
+            + end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        res = channel.json_body
+        self.assertCountEqual(["og:description"], res.keys())
+
     def test_ipaddr(self):
         """
         IP addresses can be previewed directly.
-- 
cgit 1.4.1


From 248111bae8600a3e2d4da49c6e64b72c76219850 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 5 Nov 2019 15:54:23 +0000
Subject: Newsfile

---
 changelog.d/6332.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6332.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix
new file mode 100644
index 0000000000..b14bd7e43c
--- /dev/null
+++ b/changelog.d/6332.bugfix
@@ -0,0 +1 @@
+Fix caching devices for remote users when using workers.
-- 
cgit 1.4.1


From 81d49cbb07a4dc5a673e31a8a626af6e8a18f801 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 5 Nov 2019 17:22:58 +0000
Subject: Fix exception when OpenGraph tag values are ints

---
 changelog.d/6334.feature                      | 1 +
 synapse/rest/media/v1/preview_url_resource.py | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6334.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6334.feature b/changelog.d/6334.feature
new file mode 100644
index 0000000000..eaf69ef3f6
--- /dev/null
+++ b/changelog.d/6334.feature
@@ -0,0 +1 @@
+Limit the length of data returned by url previews, to prevent DoS attacks.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 4d4b3c1462..ec9c4619c9 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -274,7 +274,8 @@ class PreviewUrlResource(DirectServeResource):
         # filter out any stupidly long values
         keys_to_remove = []
         for k, v in og.items():
-            if len(k) > OG_TAG_NAME_MAXLEN or len(v) > OG_TAG_VALUE_MAXLEN:
+            # values can be numeric as well as strings, hence the cast to str
+            if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN:
                 logger.warning(
                     "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
                 )
-- 
cgit 1.4.1


From b437eb48b6bde1cd908d472893c5638e021c6a8f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 5 Nov 2019 17:45:29 +0000
Subject: Newsfile

---
 changelog.d/6336.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6336.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6336.misc b/changelog.d/6336.misc
new file mode 100644
index 0000000000..63527ccef4
--- /dev/null
+++ b/changelog.d/6336.misc
@@ -0,0 +1 @@
+Remove the dependency on psutil and replace functionality with the stdlib `resource` module.
-- 
cgit 1.4.1


From 0e3ab8afdc2b89ac2f47878112d93dd03d01f7ef Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 5 Nov 2019 22:13:37 +0000
Subject: Add some checks that we aren't using state from rejected events
 (#6330)

* Raise an exception if accessing state for rejected events

Add some sanity checks on accessing state_group etc for
rejected events.

* Skip calculating push actions for rejected events

It didn't actually cause any bugs, because rejected events get filtered out at
various later points, but there's not point in trying to calculate the push
actions for a rejected event.
---
 changelog.d/6330.misc          |  1 +
 synapse/events/snapshot.py     | 49 +++++++++++++++++++++++++++++++++++++-----
 synapse/handlers/federation.py |  6 +++++-
 3 files changed, 50 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6330.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6330.misc b/changelog.d/6330.misc
new file mode 100644
index 0000000000..6239cba263
--- /dev/null
+++ b/changelog.d/6330.misc
@@ -0,0 +1 @@
+Add some checks that we aren't using state from rejected events.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 5f07f6fe4b..0f3c5989cb 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -33,7 +33,7 @@ class EventContext:
     Attributes:
         rejected: A rejection reason if the event was rejected, else False
 
-        state_group: The ID of the state group for this event. Note that state events
+        _state_group: The ID of the state group for this event. Note that state events
             are persisted with a state group which includes the new event, so this is
             effectively the state *after* the event in question.
 
@@ -45,6 +45,9 @@ class EventContext:
             For an outlier, where we don't have the state at the event, this will be
             None.
 
+            Note that this is a private attribute: it should be accessed via
+            the ``state_group`` property.
+
         prev_group: If it is known, ``state_group``'s prev_group. Note that this being
             None does not necessarily mean that ``state_group`` does not have
             a prev_group!
@@ -88,7 +91,7 @@ class EventContext:
     """
 
     rejected = attr.ib(default=False, type=Union[bool, str])
-    state_group = attr.ib(default=None, type=Optional[int])
+    _state_group = attr.ib(default=None, type=Optional[int])
     prev_group = attr.ib(default=None, type=Optional[int])
     delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
     app_service = attr.ib(default=None, type=Optional[ApplicationService])
@@ -136,7 +139,7 @@ class EventContext:
             "prev_state_id": prev_state_id,
             "event_type": event.type,
             "event_state_key": event.state_key if event.is_state() else None,
-            "state_group": self.state_group,
+            "state_group": self._state_group,
             "rejected": self.rejected,
             "prev_group": self.prev_group,
             "delta_ids": _encode_state_dict(self.delta_ids),
@@ -173,22 +176,52 @@ class EventContext:
 
         return context
 
+    @property
+    def state_group(self) -> Optional[int]:
+        """The ID of the state group for this event.
+
+        Note that state events are persisted with a state group which includes the new
+        event, so this is effectively the state *after* the event in question.
+
+        For an outlier, where we don't have the state at the event, this will be None.
+
+        It is an error to access this for a rejected event, since rejected state should
+        not make it into the room state. Accessing this property will raise an exception
+        if ``rejected`` is set.
+        """
+        if self.rejected:
+            raise RuntimeError("Attempt to access state_group of rejected event")
+
+        return self._state_group
+
     @defer.inlineCallbacks
     def get_current_state_ids(self, store):
-        """Gets the current state IDs
+        """
+        Gets the room state map, including this event - ie, the state in ``state_group``
+
+        It is an error to access this for a rejected event, since rejected state should
+        not make it into the room state. This method will raise an exception if
+        ``rejected`` is set.
 
         Returns:
             Deferred[dict[(str, str), str]|None]: Returns None if state_group
                 is None, which happens when the associated event is an outlier.
+
                 Maps a (type, state_key) to the event ID of the state event matching
                 this tuple.
         """
+        if self.rejected:
+            raise RuntimeError("Attempt to access state_ids of rejected event")
+
         yield self._ensure_fetched(store)
         return self._current_state_ids
 
     @defer.inlineCallbacks
     def get_prev_state_ids(self, store):
-        """Gets the prev state IDs
+        """
+        Gets the room state map, excluding this event.
+
+        For a non-state event, this will be the same as get_current_state_ids().
 
         Returns:
             Deferred[dict[(str, str), str]|None]: Returns None if state_group
@@ -202,11 +235,17 @@ class EventContext:
     def get_cached_current_state_ids(self):
         """Gets the current state IDs if we have them already cached.
 
+        It is an error to access this for a rejected event, since rejected state should
+        not make it into the room state. This method will raise an exception if
+        ``rejected`` is set.
+
         Returns:
             dict[(str, str), str]|None: Returns None if we haven't cached the
             state or if state_group is None, which happens when the associated
             event is an outlier.
         """
+        if self.rejected:
+            raise RuntimeError("Attempt to access state_ids of rejected event")
 
         return self._current_state_ids
 
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 8cafcfdab0..b7916de909 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1688,7 +1688,11 @@ class FederationHandler(BaseHandler):
         # hack around with a try/finally instead.
         success = False
         try:
-            if not event.internal_metadata.is_outlier() and not backfilled:
+            if (
+                not event.internal_metadata.is_outlier()
+                and not backfilled
+                and not context.rejected
+            ):
                 yield self.action_generator.handle_push_actions_for_event(
                     event, context
                 )
-- 
cgit 1.4.1


From 807ec3bd99908d2991d2b3d0615b0862610c6dc3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 6 Nov 2019 10:01:39 +0000
Subject: Fix bug which caused rejected events to be stored with the wrong room
 state  (#6320)

Fixes a bug where rejected events were persisted with the wrong state group.

Also fixes an occasional internal-server-error when receiving events over
federation which are rejected and (possibly because they are
backwards-extremities) have no prev_group.

Fixes #6289.
---
 changelog.d/6320.bugfix                   |   1 +
 synapse/events/snapshot.py                |  25 ++++-
 synapse/handlers/federation.py            |   1 +
 synapse/state/__init__.py                 | 172 ++++++++++++++----------------
 synapse/storage/data_stores/main/state.py |   2 +-
 tests/handlers/test_federation.py         | 126 ++++++++++++++++++++++
 tests/test_state.py                       |  61 +++++++++--
 7 files changed, 285 insertions(+), 103 deletions(-)
 create mode 100644 changelog.d/6320.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6320.bugfix b/changelog.d/6320.bugfix
new file mode 100644
index 0000000000..2c3fad5655
--- /dev/null
+++ b/changelog.d/6320.bugfix
@@ -0,0 +1 @@
+Fix bug which casued rejected events to be persisted with the wrong room state.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 0f3c5989cb..64e898f40c 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -48,10 +48,21 @@ class EventContext:
             Note that this is a private attribute: it should be accessed via
             the ``state_group`` property.
 
+        state_group_before_event: The ID of the state group representing the state
+            of the room before this event.
+
+            If this is a non-state event, this will be the same as ``state_group``. If
+            it's a state event, it will be the same as ``prev_group``.
+
+            If ``state_group`` is None (ie, the event is an outlier),
+            ``state_group_before_event`` will always also be ``None``.
+
         prev_group: If it is known, ``state_group``'s prev_group. Note that this being
             None does not necessarily mean that ``state_group`` does not have
             a prev_group!
 
+            If the event is a state event, this is normally the same as ``prev_group``.
+
             If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
             will always also be ``None``.
 
@@ -77,7 +88,8 @@ class EventContext:
             ``get_current_state_ids``. _AsyncEventContext impl calculates this
             on-demand: it will be None until that happens.
 
-        _prev_state_ids: The room state map, excluding this event. For a non-state
+        _prev_state_ids: The room state map, excluding this event - ie, the state
+            in ``state_group_before_event``. For a non-state
             event, this will be the same as _current_state_events.
 
             Note that it is a completely different thing to prev_group!
@@ -92,6 +104,7 @@ class EventContext:
 
     rejected = attr.ib(default=False, type=Union[bool, str])
     _state_group = attr.ib(default=None, type=Optional[int])
+    state_group_before_event = attr.ib(default=None, type=Optional[int])
     prev_group = attr.ib(default=None, type=Optional[int])
     delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
     app_service = attr.ib(default=None, type=Optional[ApplicationService])
@@ -103,12 +116,18 @@ class EventContext:
 
     @staticmethod
     def with_state(
-        state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
+        state_group,
+        state_group_before_event,
+        current_state_ids,
+        prev_state_ids,
+        prev_group=None,
+        delta_ids=None,
     ):
         return EventContext(
             current_state_ids=current_state_ids,
             prev_state_ids=prev_state_ids,
             state_group=state_group,
+            state_group_before_event=state_group_before_event,
             prev_group=prev_group,
             delta_ids=delta_ids,
         )
@@ -140,6 +159,7 @@ class EventContext:
             "event_type": event.type,
             "event_state_key": event.state_key if event.is_state() else None,
             "state_group": self._state_group,
+            "state_group_before_event": self.state_group_before_event,
             "rejected": self.rejected,
             "prev_group": self.prev_group,
             "delta_ids": _encode_state_dict(self.delta_ids),
@@ -165,6 +185,7 @@ class EventContext:
             event_type=input["event_type"],
             event_state_key=input["event_state_key"],
             state_group=input["state_group"],
+            state_group_before_event=input["state_group_before_event"],
             prev_group=input["prev_group"],
             delta_ids=_decode_state_dict(input["delta_ids"]),
             rejected=input["rejected"],
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index b7916de909..05dd8d2671 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -2280,6 +2280,7 @@ class FederationHandler(BaseHandler):
 
         return EventContext.with_state(
             state_group=state_group,
+            state_group_before_event=context.state_group_before_event,
             current_state_ids=current_state_ids,
             prev_state_ids=prev_state_ids,
             prev_group=prev_group,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 2c04ab1854..139beef8ed 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,6 +16,7 @@
 
 import logging
 from collections import namedtuple
+from typing import Iterable, Optional
 
 from six import iteritems, itervalues
 
@@ -27,6 +28,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions
+from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
@@ -212,15 +214,17 @@ class StateHandler(object):
         return joined_hosts
 
     @defer.inlineCallbacks
-    def compute_event_context(self, event, old_state=None):
+    def compute_event_context(
+        self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None
+    ):
         """Build an EventContext structure for the event.
 
         This works out what the current state should be for the event, and
         generates a new state group if necessary.
 
         Args:
-            event (synapse.events.EventBase):
-            old_state (dict|None): The state at the event if it can't be
+            event:
+            old_state: The state at the event if it can't be
                 calculated from existing events. This is normally only specified
                 when receiving an event from federation where we don't have the
                 prev events for, e.g. when backfilling.
@@ -251,113 +255,103 @@ class StateHandler(object):
             # group for it.
             context = EventContext.with_state(
                 state_group=None,
+                state_group_before_event=None,
                 current_state_ids=current_state_ids,
                 prev_state_ids=prev_state_ids,
             )
 
             return context
 
+        #
+        # first of all, figure out the state before the event
+        #
+
         if old_state:
-            # We already have the state, so we don't need to calculate it.
-            # Let's just correctly fill out the context and create a
-            # new state group for it.
-
-            prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state}
-
-            if event.is_state():
-                key = (event.type, event.state_key)
-                if key in prev_state_ids:
-                    replaces = prev_state_ids[key]
-                    if replaces != event.event_id:  # Paranoia check
-                        event.unsigned["replaces_state"] = replaces
-                current_state_ids = dict(prev_state_ids)
-                current_state_ids[key] = event.event_id
-            else:
-                current_state_ids = prev_state_ids
+            # if we're given the state before the event, then we use that
+            state_ids_before_event = {
+                (s.type, s.state_key): s.event_id for s in old_state
+            }
+            state_group_before_event = None
+            state_group_before_event_prev_group = None
+            deltas_to_state_group_before_event = None
 
-            state_group = yield self.state_store.store_state_group(
-                event.event_id,
-                event.room_id,
-                prev_group=None,
-                delta_ids=None,
-                current_state_ids=current_state_ids,
-            )
+        else:
+            # otherwise, we'll need to resolve the state across the prev_events.
+            logger.debug("calling resolve_state_groups from compute_event_context")
 
-            context = EventContext.with_state(
-                state_group=state_group,
-                current_state_ids=current_state_ids,
-                prev_state_ids=prev_state_ids,
+            entry = yield self.resolve_state_groups_for_events(
+                event.room_id, event.prev_event_ids()
             )
 
-            return context
+            state_ids_before_event = entry.state
+            state_group_before_event = entry.state_group
+            state_group_before_event_prev_group = entry.prev_group
+            deltas_to_state_group_before_event = entry.delta_ids
 
-        logger.debug("calling resolve_state_groups from compute_event_context")
+        #
+        # make sure that we have a state group at that point. If it's not a state event,
+        # that will be the state group for the new event. If it *is* a state event,
+        # it might get rejected (in which case we'll need to persist it with the
+        # previous state group)
+        #
 
-        entry = yield self.resolve_state_groups_for_events(
-            event.room_id, event.prev_event_ids()
-        )
+        if not state_group_before_event:
+            state_group_before_event = yield self.state_store.store_state_group(
+                event.event_id,
+                event.room_id,
+                prev_group=state_group_before_event_prev_group,
+                delta_ids=deltas_to_state_group_before_event,
+                current_state_ids=state_ids_before_event,
+            )
 
-        prev_state_ids = entry.state
-        prev_group = None
-        delta_ids = None
+            # XXX: can we update the state cache entry for the new state group? or
+            # could we set a flag on resolve_state_groups_for_events to tell it to
+            # always make a state group?
+
+        #
+        # now if it's not a state event, we're done
+        #
+
+        if not event.is_state():
+            return EventContext.with_state(
+                state_group_before_event=state_group_before_event,
+                state_group=state_group_before_event,
+                current_state_ids=state_ids_before_event,
+                prev_state_ids=state_ids_before_event,
+                prev_group=state_group_before_event_prev_group,
+                delta_ids=deltas_to_state_group_before_event,
+            )
 
-        if event.is_state():
-            # If this is a state event then we need to create a new state
-            # group for the state after this event.
+        #
+        # otherwise, we'll need to create a new state group for after the event
+        #
 
-            key = (event.type, event.state_key)
-            if key in prev_state_ids:
-                replaces = prev_state_ids[key]
+        key = (event.type, event.state_key)
+        if key in state_ids_before_event:
+            replaces = state_ids_before_event[key]
+            if replaces != event.event_id:
                 event.unsigned["replaces_state"] = replaces
 
-            current_state_ids = dict(prev_state_ids)
-            current_state_ids[key] = event.event_id
-
-            if entry.state_group:
-                # If the state at the event has a state group assigned then
-                # we can use that as the prev group
-                prev_group = entry.state_group
-                delta_ids = {key: event.event_id}
-            elif entry.prev_group:
-                # If the state at the event only has a prev group, then we can
-                # use that as a prev group too.
-                prev_group = entry.prev_group
-                delta_ids = dict(entry.delta_ids)
-                delta_ids[key] = event.event_id
-
-            state_group = yield self.state_store.store_state_group(
-                event.event_id,
-                event.room_id,
-                prev_group=prev_group,
-                delta_ids=delta_ids,
-                current_state_ids=current_state_ids,
-            )
-        else:
-            current_state_ids = prev_state_ids
-            prev_group = entry.prev_group
-            delta_ids = entry.delta_ids
-
-            if entry.state_group is None:
-                entry.state_group = yield self.state_store.store_state_group(
-                    event.event_id,
-                    event.room_id,
-                    prev_group=entry.prev_group,
-                    delta_ids=entry.delta_ids,
-                    current_state_ids=current_state_ids,
-                )
-                entry.state_id = entry.state_group
-
-            state_group = entry.state_group
-
-        context = EventContext.with_state(
-            state_group=state_group,
-            current_state_ids=current_state_ids,
-            prev_state_ids=prev_state_ids,
-            prev_group=prev_group,
+        state_ids_after_event = dict(state_ids_before_event)
+        state_ids_after_event[key] = event.event_id
+        delta_ids = {key: event.event_id}
+
+        state_group_after_event = yield self.state_store.store_state_group(
+            event.event_id,
+            event.room_id,
+            prev_group=state_group_before_event,
             delta_ids=delta_ids,
+            current_state_ids=state_ids_after_event,
         )
 
-        return context
+        return EventContext.with_state(
+            state_group=state_group_after_event,
+            state_group_before_event=state_group_before_event,
+            current_state_ids=state_ids_after_event,
+            prev_state_ids=state_ids_before_event,
+            prev_group=state_group_before_event,
+            delta_ids=delta_ids,
+        )
 
     @measure_func()
     @defer.inlineCallbacks
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 3132848034..9e1541988e 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -1231,7 +1231,7 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
             # if the event was rejected, just give it the same state as its
             # predecessor.
             if context.rejected:
-                state_groups[event.event_id] = context.prev_group
+                state_groups[event.event_id] = context.state_group_before_event
                 continue
 
             state_groups[event.event_id] = context.state_group
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index d56220f403..b4d92cf732 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -12,13 +12,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
+
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError, Codes
+from synapse.federation.federation_base import event_from_pdu_json
+from synapse.logging.context import LoggingContext, run_in_background
 from synapse.rest import admin
 from synapse.rest.client.v1 import login, room
 
 from tests import unittest
 
+logger = logging.getLogger(__name__)
+
 
 class FederationTestCase(unittest.HomeserverTestCase):
     servlets = [
@@ -79,3 +85,123 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertEqual(failure.code, 403, failure)
         self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure)
         self.assertEqual(failure.msg, "You are not invited to this room.")
+
+    def test_rejected_message_event_state(self):
+        """
+        Check that we store the state group correctly for rejected non-state events.
+
+        Regression test for #6289.
+        """
+        OTHER_SERVER = "otherserver"
+        OTHER_USER = "@otheruser:" + OTHER_SERVER
+
+        # create the room
+        user_id = self.register_user("kermit", "test")
+        tok = self.login("kermit", "test")
+        room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+        # pretend that another server has joined
+        join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
+
+        # check the state group
+        sg = self.successResultOf(
+            self.store._get_state_group_for_event(join_event.event_id)
+        )
+
+        # build and send an event which will be rejected
+        ev = event_from_pdu_json(
+            {
+                "type": EventTypes.Message,
+                "content": {},
+                "room_id": room_id,
+                "sender": "@yetanotheruser:" + OTHER_SERVER,
+                "depth": join_event["depth"] + 1,
+                "prev_events": [join_event.event_id],
+                "auth_events": [],
+                "origin_server_ts": self.clock.time_msec(),
+            },
+            join_event.format_version,
+        )
+
+        with LoggingContext(request="send_rejected"):
+            d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev)
+        self.get_success(d)
+
+        # that should have been rejected
+        e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True))
+        self.assertIsNotNone(e.rejected_reason)
+
+        # ... and the state group should be the same as before
+        sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id))
+
+        self.assertEqual(sg, sg2)
+
+    def test_rejected_state_event_state(self):
+        """
+        Check that we store the state group correctly for rejected state events.
+
+        Regression test for #6289.
+        """
+        OTHER_SERVER = "otherserver"
+        OTHER_USER = "@otheruser:" + OTHER_SERVER
+
+        # create the room
+        user_id = self.register_user("kermit", "test")
+        tok = self.login("kermit", "test")
+        room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+        # pretend that another server has joined
+        join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
+
+        # check the state group
+        sg = self.successResultOf(
+            self.store._get_state_group_for_event(join_event.event_id)
+        )
+
+        # build and send an event which will be rejected
+        ev = event_from_pdu_json(
+            {
+                "type": "org.matrix.test",
+                "state_key": "test_key",
+                "content": {},
+                "room_id": room_id,
+                "sender": "@yetanotheruser:" + OTHER_SERVER,
+                "depth": join_event["depth"] + 1,
+                "prev_events": [join_event.event_id],
+                "auth_events": [],
+                "origin_server_ts": self.clock.time_msec(),
+            },
+            join_event.format_version,
+        )
+
+        with LoggingContext(request="send_rejected"):
+            d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev)
+        self.get_success(d)
+
+        # that should have been rejected
+        e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True))
+        self.assertIsNotNone(e.rejected_reason)
+
+        # ... and the state group should be the same as before
+        sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id))
+
+        self.assertEqual(sg, sg2)
+
+    def _build_and_send_join_event(self, other_server, other_user, room_id):
+        join_event = self.get_success(
+            self.handler.on_make_join_request(other_server, room_id, other_user)
+        )
+        # the auth code requires that a signature exists, but doesn't check that
+        # signature... go figure.
+        join_event.signatures[other_server] = {"x": "y"}
+        with LoggingContext(request="send_join"):
+            d = run_in_background(
+                self.handler.on_send_join_request, other_server, join_event
+            )
+        self.get_success(d)
+
+        # sanity-check: the room should show that the new user is a member
+        r = self.get_success(self.store.get_current_state_ids(room_id))
+        self.assertEqual(r[(EventTypes.Member, other_user)], join_event.event_id)
+
+        return join_event
diff --git a/tests/test_state.py b/tests/test_state.py
index 38246555bd..176535947a 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -21,6 +21,7 @@ from synapse.api.auth import Auth
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.room_versions import RoomVersions
 from synapse.events import FrozenEvent
+from synapse.events.snapshot import EventContext
 from synapse.state import StateHandler, StateResolutionHandler
 
 from tests import unittest
@@ -198,16 +199,22 @@ class StateTestCase(unittest.TestCase):
 
         self.store.register_events(graph.walk())
 
-        context_store = {}
+        context_store = {}  # type: dict[str, EventContext]
 
         for event in graph.walk():
             context = yield self.state.compute_event_context(event)
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
-        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+        ctx_c = context_store["C"]
+        ctx_d = context_store["D"]
+
+        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
         self.assertEqual(2, len(prev_state_ids))
 
+        self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
+        self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
+
     @defer.inlineCallbacks
     def test_branch_basic_conflict(self):
         graph = Graph(
@@ -241,12 +248,19 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
-        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+        # C ends up winning the resolution between B and C
+
+        ctx_c = context_store["C"]
+        ctx_d = context_store["D"]
 
+        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
         self.assertSetEqual(
             {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()}
         )
 
+        self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
+        self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
+
     @defer.inlineCallbacks
     def test_branch_have_banned_conflict(self):
         graph = Graph(
@@ -292,11 +306,18 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
-        prev_state_ids = yield context_store["E"].get_prev_state_ids(self.store)
+        # C ends up winning the resolution between C and D because bans win over other
+        # changes
+
+        ctx_c = context_store["C"]
+        ctx_e = context_store["E"]
 
+        prev_state_ids = yield ctx_e.get_prev_state_ids(self.store)
         self.assertSetEqual(
             {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()}
         )
+        self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event)
+        self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group)
 
     @defer.inlineCallbacks
     def test_branch_have_perms_conflict(self):
@@ -360,12 +381,20 @@ class StateTestCase(unittest.TestCase):
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
-        prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+        # B ends up winning the resolution between B and C because power levels
+        # win over other changes.
 
+        ctx_b = context_store["B"]
+        ctx_d = context_store["D"]
+
+        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
         self.assertSetEqual(
             {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()}
         )
 
+        self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event)
+        self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
+
     def _add_depths(self, nodes, edges):
         def _get_depth(ev):
             node = nodes[ev]
@@ -390,13 +419,16 @@ class StateTestCase(unittest.TestCase):
 
         context = yield self.state.compute_event_context(event, old_state=old_state)
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        self.assertEqual(
-            set(e.event_id for e in old_state), set(current_state_ids.values())
+        current_state_ids = yield context.get_current_state_ids(self.store)
+        self.assertCountEqual(
+            (e.event_id for e in old_state), current_state_ids.values()
         )
 
-        self.assertIsNotNone(context.state_group)
+        self.assertIsNotNone(context.state_group_before_event)
+        self.assertEqual(context.state_group_before_event, context.state_group)
 
     @defer.inlineCallbacks
     def test_annotate_with_old_state(self):
@@ -411,11 +443,18 @@ class StateTestCase(unittest.TestCase):
         context = yield self.state.compute_event_context(event, old_state=old_state)
 
         prev_state_ids = yield context.get_prev_state_ids(self.store)
+        self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        self.assertEqual(
-            set(e.event_id for e in old_state), set(prev_state_ids.values())
+        current_state_ids = yield context.get_current_state_ids(self.store)
+        self.assertCountEqual(
+            (e.event_id for e in old_state + [event]), current_state_ids.values()
         )
 
+        self.assertIsNotNone(context.state_group_before_event)
+        self.assertNotEqual(context.state_group_before_event, context.state_group)
+        self.assertEqual(context.state_group_before_event, context.prev_group)
+        self.assertEqual({("state", ""): event.event_id}, context.delta_ids)
+
     @defer.inlineCallbacks
     def test_trivial_annotate_message(self):
         prev_event_id = "prev_event_id"
-- 
cgit 1.4.1


From 4fc53bf1fb97d52c19aa718e67f31d290218e3c1 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 5 Nov 2019 17:42:42 +0000
Subject: Newsfile

---
 changelog.d/6335.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6335.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6335.bugfix b/changelog.d/6335.bugfix
new file mode 100644
index 0000000000..a95f6b9eec
--- /dev/null
+++ b/changelog.d/6335.bugfix
@@ -0,0 +1 @@
+Fix bug where `rc_login` ratelimiting would prematurely kick in.
-- 
cgit 1.4.1


From d2f6a67cb4c8f1ea1a4ae563dd53139838b019c7 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Wed, 6 Nov 2019 12:03:12 +0000
Subject: Add changelog

---
 changelog.d/6338.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6338.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6338.bugfix b/changelog.d/6338.bugfix
new file mode 100644
index 0000000000..8e469f0fb6
--- /dev/null
+++ b/changelog.d/6338.bugfix
@@ -0,0 +1 @@
+Prevent the server taking a long time to start up when guest registration is enabled.
\ No newline at end of file
-- 
cgit 1.4.1


From affcc2cc3655531351048a4ad8ac67e22d1e398d Mon Sep 17 00:00:00 2001
From: V02460 
Date: Thu, 7 Nov 2019 10:43:51 +0100
Subject: Fix LruCache callback deduplication (#6213)

---
 changelog.d/6213.bugfix            |  1 +
 synapse/util/caches/descriptors.py | 48 +++++++++++++++++++++++++++++---------
 2 files changed, 38 insertions(+), 11 deletions(-)
 create mode 100644 changelog.d/6213.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix
new file mode 100644
index 0000000000..072264fba3
--- /dev/null
+++ b/changelog.d/6213.bugfix
@@ -0,0 +1 @@
+Fix LruCache callback deduplication.
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 0e8da27f53..84f5ae22c3 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -17,8 +17,8 @@ import functools
 import inspect
 import logging
 import threading
-from collections import namedtuple
-from typing import Any, cast
+from typing import Any, Tuple, Union, cast
+from weakref import WeakValueDictionary
 
 from six import itervalues
 
@@ -38,6 +38,8 @@ from . import register_cache
 
 logger = logging.getLogger(__name__)
 
+CacheKey = Union[Tuple, Any]
+
 
 class _CachedFunction(Protocol):
     invalidate = None  # type: Any
@@ -430,7 +432,7 @@ class CacheDescriptor(_CacheDescriptorBase):
             # Add our own `cache_context` to argument list if the wrapped function
             # has asked for one
             if self.add_cache_context:
-                kwargs["cache_context"] = _CacheContext(cache, cache_key)
+                kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)
 
             try:
                 cached_result_d = cache.get(cache_key, callback=invalidate_callback)
@@ -624,14 +626,38 @@ class CacheListDescriptor(_CacheDescriptorBase):
         return wrapped
 
 
-class _CacheContext(namedtuple("_CacheContext", ("cache", "key"))):
-    # We rely on _CacheContext implementing __eq__ and __hash__ sensibly,
-    # which namedtuple does for us (i.e. two _CacheContext are the same if
-    # their caches and keys match). This is important in particular to
-    # dedupe when we add callbacks to lru cache nodes, otherwise the number
-    # of callbacks would grow.
-    def invalidate(self):
-        self.cache.invalidate(self.key)
+class _CacheContext:
+    """Holds cache information from the cached function higher in the calling order.
+
+    Can be used to invalidate the higher level cache entry if something changes
+    on a lower level.
+    """
+
+    _cache_context_objects = (
+        WeakValueDictionary()
+    )  # type: WeakValueDictionary[Tuple[Cache, CacheKey], _CacheContext]
+
+    def __init__(self, cache, cache_key):  # type: (Cache, CacheKey) -> None
+        self._cache = cache
+        self._cache_key = cache_key
+
+    def invalidate(self):  # type: () -> None
+        """Invalidates the cache entry referred to by the context."""
+        self._cache.invalidate(self._cache_key)
+
+    @classmethod
+    def get_instance(cls, cache, cache_key):  # type: (Cache, CacheKey) -> _CacheContext
+        """Returns an instance constructed with the given arguments.
+
+        A new instance is only created if none already exists.
+        """
+
+        # We make sure there are no identical _CacheContext instances. This is
+        # important in particular to dedupe when we add callbacks to lru cache
+        # nodes, otherwise the number of callbacks would grow.
+        return cls._cache_context_objects.setdefault(
+            (cache, cache_key), cls(cache, cache_key)
+        )
 
 
 def cached(
-- 
cgit 1.4.1


From b03cddaeb99437311094fbe617ae2a6bde4c5615 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Thu, 7 Nov 2019 09:46:25 +0000
Subject: tweak changelog

---
 changelog.d/6213.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix
index 072264fba3..2bb2d08851 100644
--- a/changelog.d/6213.bugfix
+++ b/changelog.d/6213.bugfix
@@ -1 +1 @@
-Fix LruCache callback deduplication.
+Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460.
-- 
cgit 1.4.1


From 4f519d556e32ac29a960977df4ed14c42290af5e Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 7 Nov 2019 11:51:54 +0000
Subject: Changelog

---
 changelog.d/6340.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6340.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6340.feature b/changelog.d/6340.feature
new file mode 100644
index 0000000000..78a187a1dc
--- /dev/null
+++ b/changelog.d/6340.feature
@@ -0,0 +1 @@
+Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
-- 
cgit 1.4.1


From c5abb67e432b9279c838f7b9318144ca8f2b7c0d Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 7 Nov 2019 17:14:13 +0000
Subject: Python 3.8 for tox (#6341)

... and update INSTALL.md to include py3.8.

We'll also have to update the buildkite pipeline to run it
---
 INSTALL.md            | 2 +-
 changelog.d/6341.misc | 1 +
 tox.ini               | 2 +-
 3 files changed, 3 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6341.misc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index e7b429c05d..29e0abafd3 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
 System requirements:
 
 - POSIX-compliant system (tested on Linux & OS X)
-- Python 3.5, 3.6, or 3.7
+- Python 3.5, 3.6, 3.7 or 3.8.
 - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
 
 Synapse is written in Python but some of the libraries it uses are written in
diff --git a/changelog.d/6341.misc b/changelog.d/6341.misc
new file mode 100644
index 0000000000..359b9bf1d7
--- /dev/null
+++ b/changelog.d/6341.misc
@@ -0,0 +1 @@
+Add continuous integration for python 3.8.
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index afe9bc909b..62b350ea6a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = packaging, py35, py36, py37, check_codestyle, check_isort
+envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort
 
 [base]
 basepython = python3.7
-- 
cgit 1.4.1


From 318dd21b4767039014f73917a4dddb9fc885bc56 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Fri, 8 Nov 2019 15:45:08 +0000
Subject: Add changelog

---
 changelog.d/6333.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6333.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6333.bugfix b/changelog.d/6333.bugfix
new file mode 100644
index 0000000000..a25d6ef3cb
--- /dev/null
+++ b/changelog.d/6333.bugfix
@@ -0,0 +1 @@
+Prevent account data syncs getting lost across TCP replication.
\ No newline at end of file
-- 
cgit 1.4.1


From 20d687516fdc34a4be19dcbbadd8a9a9726203e4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 8 Nov 2019 16:17:02 +0000
Subject: newsfile

---
 changelog.d/6343.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6343.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6343.misc b/changelog.d/6343.misc
new file mode 100644
index 0000000000..d9a44389b9
--- /dev/null
+++ b/changelog.d/6343.misc
@@ -0,0 +1 @@
+Refactor some code in the event authentication path for clarity.
-- 
cgit 1.4.1


From 21056ad12a9dbfadad085802c7a0096d3b071681 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 11 Nov 2019 16:53:29 +0000
Subject: Changelog

---
 changelog.d/6349.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6349.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6349.feature b/changelog.d/6349.feature
new file mode 100644
index 0000000000..56c4fbf78e
--- /dev/null
+++ b/changelog.d/6349.feature
@@ -0,0 +1 @@
+Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)).
-- 
cgit 1.4.1


From 85f172ef968a9726cd62bc61fe98e39cdf017e15 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Tue, 12 Nov 2019 13:13:19 +0000
Subject: Add changelog

---
 changelog.d/6357.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6357.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6357.misc b/changelog.d/6357.misc
new file mode 100644
index 0000000000..a68df0f384
--- /dev/null
+++ b/changelog.d/6357.misc
@@ -0,0 +1 @@
+Correct spacing/case of various instances of the word "homeserver".
\ No newline at end of file
-- 
cgit 1.4.1


From c350bc2f92d87e46a40f917f65c9e10e0f4999fc Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 13 Nov 2019 19:09:20 +0000
Subject: Blacklist PurgeRoomTestCase (#6361)

---
 changelog.d/6361.misc          | 1 +
 tests/rest/admin/test_admin.py | 2 ++
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6361.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6361.misc b/changelog.d/6361.misc
new file mode 100644
index 0000000000..324d74ebf9
--- /dev/null
+++ b/changelog.d/6361.misc
@@ -0,0 +1 @@
+Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room.
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index d9f1b95cb0..9575058252 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -641,3 +641,5 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
             )
 
             self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
+
+    test_purge_room.skip = "Disabled because it's currently broken"
-- 
cgit 1.4.1


From 745a48625d9760374a7d683441185fa8bd2a2aac Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 14 Nov 2019 12:02:05 +0000
Subject: Fix guest -> real account upgrade with account validity enabled
 (#6359)

---
 changelog.d/6359.bugfix  | 1 +
 synapse/storage/_base.py | 9 +++------
 2 files changed, 4 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6359.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6359.bugfix b/changelog.d/6359.bugfix
new file mode 100644
index 0000000000..22bf5f642a
--- /dev/null
+++ b/changelog.d/6359.bugfix
@@ -0,0 +1 @@
+Fix bug where upgrading a guest account to a full user would fail when account validity is enabled.
\ No newline at end of file
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 1a2b7ebe25..ab596fa68d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -361,14 +361,11 @@ class SQLBaseStore(object):
                 expiration_ts,
             )
 
-        self._simple_insert_txn(
+        self._simple_upsert_txn(
             txn,
             "account_validity",
-            values={
-                "user_id": user_id,
-                "expiration_ts_ms": expiration_ts,
-                "email_sent": False,
-            },
+            keyvalues={"user_id": user_id},
+            values={"expiration_ts_ms": expiration_ts, "email_sent": False},
         )
 
     def start_profiling(self):
-- 
cgit 1.4.1


From 53b6559a8906ba56f9f50469e0c2bec430614a4e Mon Sep 17 00:00:00 2001
From: James 
Date: Fri, 15 Nov 2019 05:42:46 +1100
Subject: Add optional python dependencies to snap packaging (#6317)

Signed-off-by: James Hebden 
---
 changelog.d/6317.misc |  1 +
 snap/snapcraft.yaml   | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+)
 create mode 100644 changelog.d/6317.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6317.misc b/changelog.d/6317.misc
new file mode 100644
index 0000000000..a67d13fa72
--- /dev/null
+++ b/changelog.d/6317.misc
@@ -0,0 +1 @@
+Add optional python dependencies and dependant binary libraries to snapcraft packaging.
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 1f7df71db2..9e644e8567 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -20,3 +20,23 @@ parts:
     source: .
     plugin: python
     python-version: python3
+    python-packages:
+      - '.[all]'
+    build-packages:
+      - libffi-dev
+      - libturbojpeg0-dev
+      - libssl-dev
+      - libxslt1-dev
+      - libpq-dev
+      - zlib1g-dev
+    stage-packages:
+      - libasn1-8-heimdal
+      - libgssapi3-heimdal
+      - libhcrypto4-heimdal
+      - libheimbase1-heimdal
+      - libheimntlm0-heimdal
+      - libhx509-5-heimdal
+      - libkrb5-26-heimdal
+      - libldap-2.4-2
+      - libpq5
+      - libsasl2-2
-- 
cgit 1.4.1


From 657d614f6a53f3dbfd2858bd85d0f81563db0041 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 15 Nov 2019 14:02:34 +0000
Subject: Replace UPDATE with UPSERT on device_max_stream_id table (#6363)

---
 changelog.d/6363.bugfix                         |  1 +
 synapse/storage/data_stores/main/deviceinbox.py | 17 +++++++++++++++--
 2 files changed, 16 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6363.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6363.bugfix b/changelog.d/6363.bugfix
new file mode 100644
index 0000000000..d023b49181
--- /dev/null
+++ b/changelog.d/6363.bugfix
@@ -0,0 +1 @@
+Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors.
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py
index f04aad0743..96cd0fb77a 100644
--- a/synapse/storage/data_stores/main/deviceinbox.py
+++ b/synapse/storage/data_stores/main/deviceinbox.py
@@ -358,8 +358,21 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
     def _add_messages_to_local_device_inbox_txn(
         self, txn, stream_id, messages_by_user_then_device
     ):
-        sql = "UPDATE device_max_stream_id" " SET stream_id = ?" " WHERE stream_id < ?"
-        txn.execute(sql, (stream_id, stream_id))
+        # Compatible method of performing an upsert
+        sql = "SELECT stream_id FROM device_max_stream_id"
+
+        txn.execute(sql)
+        rows = txn.fetchone()
+        if rows:
+            db_stream_id = rows[0]
+            if db_stream_id < stream_id:
+                # Insert the new stream_id
+                sql = "UPDATE device_max_stream_id SET stream_id = ?"
+        else:
+            # No rows, perform an insert
+            sql = "INSERT INTO device_max_stream_id (stream_id) VALUES (?)"
+
+        txn.execute(sql, (stream_id,))
 
         local_by_user_then_device = {}
         for user_id, messages_by_device in messages_by_user_then_device.items():
-- 
cgit 1.4.1


From 4f5ca455bf17b52d70ab08be043178b4678cc4b8 Mon Sep 17 00:00:00 2001
From: Manuel Stahl <37705355+awesome-manuel@users.noreply.github.com>
Date: Wed, 20 Nov 2019 12:49:11 +0100
Subject: Move admin endpoints into separate files (#6308)

---
 changelog.d/6308.misc          |   1 +
 synapse/rest/admin/__init__.py | 567 +----------------------------------------
 synapse/rest/admin/groups.py   |  46 ++++
 synapse/rest/admin/rooms.py    | 157 ++++++++++++
 synapse/rest/admin/users.py    | 406 ++++++++++++++++++++++++++++-
 5 files changed, 622 insertions(+), 555 deletions(-)
 create mode 100644 changelog.d/6308.misc
 create mode 100644 synapse/rest/admin/groups.py
 create mode 100644 synapse/rest/admin/rooms.py

(limited to 'changelog.d')

diff --git a/changelog.d/6308.misc b/changelog.d/6308.misc
new file mode 100644
index 0000000000..72be63ba4b
--- /dev/null
+++ b/changelog.d/6308.misc
@@ -0,0 +1 @@
+Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH.
\ No newline at end of file
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 5c2a2eb593..68a59a3424 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -14,62 +14,39 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
-import hmac
 import logging
 import platform
 import re
 
-from six import text_type
-from six.moves import http_client
-
 import synapse
-from synapse.api.constants import Membership, UserTypes
 from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.server import JsonResource
-from synapse.http.servlet import (
-    RestServlet,
-    assert_params_in_dict,
-    parse_integer,
-    parse_json_object_from_request,
-    parse_string,
-)
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.rest.admin._base import (
     assert_requester_is_admin,
-    assert_user_is_admin,
     historical_admin_path_patterns,
 )
+from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
 from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
+from synapse.rest.admin.rooms import ShutdownRoomRestServlet
 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
-from synapse.rest.admin.users import UserAdminServlet
-from synapse.types import UserID, create_requester
-from synapse.util.async_helpers import maybe_awaitable
+from synapse.rest.admin.users import (
+    AccountValidityRenewServlet,
+    DeactivateAccountRestServlet,
+    GetUsersPaginatedRestServlet,
+    ResetPasswordRestServlet,
+    SearchUsersRestServlet,
+    UserAdminServlet,
+    UserRegisterServlet,
+    UsersRestServlet,
+    WhoisRestServlet,
+)
 from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger(__name__)
 
 
-class UsersRestServlet(RestServlet):
-    PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$")
-
-    def __init__(self, hs):
-        self.hs = hs
-        self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
-
-    async def on_GET(self, request, user_id):
-        target_user = UserID.from_string(user_id)
-        await assert_requester_is_admin(self.auth, request)
-
-        if not self.hs.is_mine(target_user):
-            raise SynapseError(400, "Can only users a local user")
-
-        ret = await self.handlers.admin_handler.get_users()
-
-        return 200, ret
-
-
 class VersionServlet(RestServlet):
     PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"),)
 
@@ -83,159 +60,6 @@ class VersionServlet(RestServlet):
         return 200, self.res
 
 
-class UserRegisterServlet(RestServlet):
-    """
-    Attributes:
-         NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
-         nonces (dict[str, int]): The nonces that we will accept. A dict of
-             nonce to the time it was generated, in int seconds.
-    """
-
-    PATTERNS = historical_admin_path_patterns("/register")
-    NONCE_TIMEOUT = 60
-
-    def __init__(self, hs):
-        self.handlers = hs.get_handlers()
-        self.reactor = hs.get_reactor()
-        self.nonces = {}
-        self.hs = hs
-
-    def _clear_old_nonces(self):
-        """
-        Clear out old nonces that are older than NONCE_TIMEOUT.
-        """
-        now = int(self.reactor.seconds())
-
-        for k, v in list(self.nonces.items()):
-            if now - v > self.NONCE_TIMEOUT:
-                del self.nonces[k]
-
-    def on_GET(self, request):
-        """
-        Generate a new nonce.
-        """
-        self._clear_old_nonces()
-
-        nonce = self.hs.get_secrets().token_hex(64)
-        self.nonces[nonce] = int(self.reactor.seconds())
-        return 200, {"nonce": nonce}
-
-    async def on_POST(self, request):
-        self._clear_old_nonces()
-
-        if not self.hs.config.registration_shared_secret:
-            raise SynapseError(400, "Shared secret registration is not enabled")
-
-        body = parse_json_object_from_request(request)
-
-        if "nonce" not in body:
-            raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON)
-
-        nonce = body["nonce"]
-
-        if nonce not in self.nonces:
-            raise SynapseError(400, "unrecognised nonce")
-
-        # Delete the nonce, so it can't be reused, even if it's invalid
-        del self.nonces[nonce]
-
-        if "username" not in body:
-            raise SynapseError(
-                400, "username must be specified", errcode=Codes.BAD_JSON
-            )
-        else:
-            if (
-                not isinstance(body["username"], text_type)
-                or len(body["username"]) > 512
-            ):
-                raise SynapseError(400, "Invalid username")
-
-            username = body["username"].encode("utf-8")
-            if b"\x00" in username:
-                raise SynapseError(400, "Invalid username")
-
-        if "password" not in body:
-            raise SynapseError(
-                400, "password must be specified", errcode=Codes.BAD_JSON
-            )
-        else:
-            if (
-                not isinstance(body["password"], text_type)
-                or len(body["password"]) > 512
-            ):
-                raise SynapseError(400, "Invalid password")
-
-            password = body["password"].encode("utf-8")
-            if b"\x00" in password:
-                raise SynapseError(400, "Invalid password")
-
-        admin = body.get("admin", None)
-        user_type = body.get("user_type", None)
-
-        if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
-            raise SynapseError(400, "Invalid user type")
-
-        got_mac = body["mac"]
-
-        want_mac = hmac.new(
-            key=self.hs.config.registration_shared_secret.encode(),
-            digestmod=hashlib.sha1,
-        )
-        want_mac.update(nonce.encode("utf8"))
-        want_mac.update(b"\x00")
-        want_mac.update(username)
-        want_mac.update(b"\x00")
-        want_mac.update(password)
-        want_mac.update(b"\x00")
-        want_mac.update(b"admin" if admin else b"notadmin")
-        if user_type:
-            want_mac.update(b"\x00")
-            want_mac.update(user_type.encode("utf8"))
-        want_mac = want_mac.hexdigest()
-
-        if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
-            raise SynapseError(403, "HMAC incorrect")
-
-        # Reuse the parts of RegisterRestServlet to reduce code duplication
-        from synapse.rest.client.v2_alpha.register import RegisterRestServlet
-
-        register = RegisterRestServlet(self.hs)
-
-        user_id = await register.registration_handler.register_user(
-            localpart=body["username"].lower(),
-            password=body["password"],
-            admin=bool(admin),
-            user_type=user_type,
-        )
-
-        result = await register._create_registration_details(user_id, body)
-        return 200, result
-
-
-class WhoisRestServlet(RestServlet):
-    PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)")
-
-    def __init__(self, hs):
-        self.hs = hs
-        self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
-
-    async def on_GET(self, request, user_id):
-        target_user = UserID.from_string(user_id)
-        requester = await self.auth.get_user_by_req(request)
-        auth_user = requester.user
-
-        if target_user != auth_user:
-            await assert_user_is_admin(self.auth, auth_user)
-
-        if not self.hs.is_mine(target_user):
-            raise SynapseError(400, "Can only whois a local user")
-
-        ret = await self.handlers.admin_handler.get_whois(target_user)
-
-        return 200, ret
-
-
 class PurgeHistoryRestServlet(RestServlet):
     PATTERNS = historical_admin_path_patterns(
         "/purge_history/(?P[^/]*)(/(?P[^/]+))?"
@@ -342,369 +166,6 @@ class PurgeHistoryStatusRestServlet(RestServlet):
         return 200, purge_status.asdict()
 
 
-class DeactivateAccountRestServlet(RestServlet):
-    PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)")
-
-    def __init__(self, hs):
-        self._deactivate_account_handler = hs.get_deactivate_account_handler()
-        self.auth = hs.get_auth()
-
-    async def on_POST(self, request, target_user_id):
-        await assert_requester_is_admin(self.auth, request)
-        body = parse_json_object_from_request(request, allow_empty_body=True)
-        erase = body.get("erase", False)
-        if not isinstance(erase, bool):
-            raise SynapseError(
-                http_client.BAD_REQUEST,
-                "Param 'erase' must be a boolean, if given",
-                Codes.BAD_JSON,
-            )
-
-        UserID.from_string(target_user_id)
-
-        result = await self._deactivate_account_handler.deactivate_account(
-            target_user_id, erase
-        )
-        if result:
-            id_server_unbind_result = "success"
-        else:
-            id_server_unbind_result = "no-support"
-
-        return 200, {"id_server_unbind_result": id_server_unbind_result}
-
-
-class ShutdownRoomRestServlet(RestServlet):
-    """Shuts down a room by removing all local users from the room and blocking
-    all future invites and joins to the room. Any local aliases will be repointed
-    to a new room created by `new_room_user_id` and kicked users will be auto
-    joined to the new room.
-    """
-
-    PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)")
-
-    DEFAULT_MESSAGE = (
-        "Sharing illegal content on this server is not permitted and rooms in"
-        " violation will be blocked."
-    )
-
-    def __init__(self, hs):
-        self.hs = hs
-        self.store = hs.get_datastore()
-        self.state = hs.get_state_handler()
-        self._room_creation_handler = hs.get_room_creation_handler()
-        self.event_creation_handler = hs.get_event_creation_handler()
-        self.room_member_handler = hs.get_room_member_handler()
-        self.auth = hs.get_auth()
-
-    async def on_POST(self, request, room_id):
-        requester = await self.auth.get_user_by_req(request)
-        await assert_user_is_admin(self.auth, requester.user)
-
-        content = parse_json_object_from_request(request)
-        assert_params_in_dict(content, ["new_room_user_id"])
-        new_room_user_id = content["new_room_user_id"]
-
-        room_creator_requester = create_requester(new_room_user_id)
-
-        message = content.get("message", self.DEFAULT_MESSAGE)
-        room_name = content.get("room_name", "Content Violation Notification")
-
-        info = await self._room_creation_handler.create_room(
-            room_creator_requester,
-            config={
-                "preset": "public_chat",
-                "name": room_name,
-                "power_level_content_override": {"users_default": -10},
-            },
-            ratelimit=False,
-        )
-        new_room_id = info["room_id"]
-
-        requester_user_id = requester.user.to_string()
-
-        logger.info(
-            "Shutting down room %r, joining to new room: %r", room_id, new_room_id
-        )
-
-        # This will work even if the room is already blocked, but that is
-        # desirable in case the first attempt at blocking the room failed below.
-        await self.store.block_room(room_id, requester_user_id)
-
-        users = await self.state.get_current_users_in_room(room_id)
-        kicked_users = []
-        failed_to_kick_users = []
-        for user_id in users:
-            if not self.hs.is_mine_id(user_id):
-                continue
-
-            logger.info("Kicking %r from %r...", user_id, room_id)
-
-            try:
-                target_requester = create_requester(user_id)
-                await self.room_member_handler.update_membership(
-                    requester=target_requester,
-                    target=target_requester.user,
-                    room_id=room_id,
-                    action=Membership.LEAVE,
-                    content={},
-                    ratelimit=False,
-                    require_consent=False,
-                )
-
-                await self.room_member_handler.forget(target_requester.user, room_id)
-
-                await self.room_member_handler.update_membership(
-                    requester=target_requester,
-                    target=target_requester.user,
-                    room_id=new_room_id,
-                    action=Membership.JOIN,
-                    content={},
-                    ratelimit=False,
-                    require_consent=False,
-                )
-
-                kicked_users.append(user_id)
-            except Exception:
-                logger.exception(
-                    "Failed to leave old room and join new room for %r", user_id
-                )
-                failed_to_kick_users.append(user_id)
-
-        await self.event_creation_handler.create_and_send_nonmember_event(
-            room_creator_requester,
-            {
-                "type": "m.room.message",
-                "content": {"body": message, "msgtype": "m.text"},
-                "room_id": new_room_id,
-                "sender": new_room_user_id,
-            },
-            ratelimit=False,
-        )
-
-        aliases_for_room = await maybe_awaitable(
-            self.store.get_aliases_for_room(room_id)
-        )
-
-        await self.store.update_aliases_for_room(
-            room_id, new_room_id, requester_user_id
-        )
-
-        return (
-            200,
-            {
-                "kicked_users": kicked_users,
-                "failed_to_kick_users": failed_to_kick_users,
-                "local_aliases": aliases_for_room,
-                "new_room_id": new_room_id,
-            },
-        )
-
-
-class ResetPasswordRestServlet(RestServlet):
-    """Post request to allow an administrator reset password for a user.
-    This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/reset_password/
-            @user:to_reset_password?access_token=admin_access_token
-        JsonBodyToSend:
-            {
-                "new_password": "secret"
-            }
-        Returns:
-            200 OK with empty object if success otherwise an error.
-        """
-
-    PATTERNS = historical_admin_path_patterns(
-        "/reset_password/(?P[^/]*)"
-    )
-
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
-        self.hs = hs
-        self.auth = hs.get_auth()
-        self._set_password_handler = hs.get_set_password_handler()
-
-    async def on_POST(self, request, target_user_id):
-        """Post request to allow an administrator reset password for a user.
-        This needs user to have administrator access in Synapse.
-        """
-        requester = await self.auth.get_user_by_req(request)
-        await assert_user_is_admin(self.auth, requester.user)
-
-        UserID.from_string(target_user_id)
-
-        params = parse_json_object_from_request(request)
-        assert_params_in_dict(params, ["new_password"])
-        new_password = params["new_password"]
-
-        await self._set_password_handler.set_password(
-            target_user_id, new_password, requester
-        )
-        return 200, {}
-
-
-class GetUsersPaginatedRestServlet(RestServlet):
-    """Get request to get specific number of users from Synapse.
-    This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/users_paginate/
-            @admin:user?access_token=admin_access_token&start=0&limit=10
-        Returns:
-            200 OK with json object {list[dict[str, Any]], count} or empty object.
-        """
-
-    PATTERNS = historical_admin_path_patterns(
-        "/users_paginate/(?P[^/]*)"
-    )
-
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
-        self.hs = hs
-        self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
-
-    async def on_GET(self, request, target_user_id):
-        """Get request to get specific number of users from Synapse.
-        This needs user to have administrator access in Synapse.
-        """
-        await assert_requester_is_admin(self.auth, request)
-
-        target_user = UserID.from_string(target_user_id)
-
-        if not self.hs.is_mine(target_user):
-            raise SynapseError(400, "Can only users a local user")
-
-        order = "name"  # order by name in user table
-        start = parse_integer(request, "start", required=True)
-        limit = parse_integer(request, "limit", required=True)
-
-        logger.info("limit: %s, start: %s", limit, start)
-
-        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
-        return 200, ret
-
-    async def on_POST(self, request, target_user_id):
-        """Post request to get specific number of users from Synapse..
-        This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/users_paginate/
-            @admin:user?access_token=admin_access_token
-        JsonBodyToSend:
-            {
-                "start": "0",
-                "limit": "10
-            }
-        Returns:
-            200 OK with json object {list[dict[str, Any]], count} or empty object.
-        """
-        await assert_requester_is_admin(self.auth, request)
-        UserID.from_string(target_user_id)
-
-        order = "name"  # order by name in user table
-        params = parse_json_object_from_request(request)
-        assert_params_in_dict(params, ["limit", "start"])
-        limit = params["limit"]
-        start = params["start"]
-        logger.info("limit: %s, start: %s", limit, start)
-
-        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
-        return 200, ret
-
-
-class SearchUsersRestServlet(RestServlet):
-    """Get request to search user table for specific users according to
-    search term.
-    This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/search_users/
-            @admin:user?access_token=admin_access_token&term=alice
-        Returns:
-            200 OK with json object {list[dict[str, Any]], count} or empty object.
-    """
-
-    PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)")
-
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
-        self.hs = hs
-        self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
-
-    async def on_GET(self, request, target_user_id):
-        """Get request to search user table for specific users according to
-        search term.
-        This needs user to have a administrator access in Synapse.
-        """
-        await assert_requester_is_admin(self.auth, request)
-
-        target_user = UserID.from_string(target_user_id)
-
-        # To allow all users to get the users list
-        # if not is_admin and target_user != auth_user:
-        #     raise AuthError(403, "You are not a server admin")
-
-        if not self.hs.is_mine(target_user):
-            raise SynapseError(400, "Can only users a local user")
-
-        term = parse_string(request, "term", required=True)
-        logger.info("term: %s ", term)
-
-        ret = await self.handlers.admin_handler.search_users(term)
-        return 200, ret
-
-
-class DeleteGroupAdminRestServlet(RestServlet):
-    """Allows deleting of local groups
-    """
-
-    PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)")
-
-    def __init__(self, hs):
-        self.group_server = hs.get_groups_server_handler()
-        self.is_mine_id = hs.is_mine_id
-        self.auth = hs.get_auth()
-
-    async def on_POST(self, request, group_id):
-        requester = await self.auth.get_user_by_req(request)
-        await assert_user_is_admin(self.auth, requester.user)
-
-        if not self.is_mine_id(group_id):
-            raise SynapseError(400, "Can only delete local groups")
-
-        await self.group_server.delete_group(group_id, requester.user.to_string())
-        return 200, {}
-
-
-class AccountValidityRenewServlet(RestServlet):
-    PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
-
-    def __init__(self, hs):
-        """
-        Args:
-            hs (synapse.server.HomeServer): server
-        """
-        self.hs = hs
-        self.account_activity_handler = hs.get_account_validity_handler()
-        self.auth = hs.get_auth()
-
-    async def on_POST(self, request):
-        await assert_requester_is_admin(self.auth, request)
-
-        body = parse_json_object_from_request(request)
-
-        if "user_id" not in body:
-            raise SynapseError(400, "Missing property 'user_id' in the request body")
-
-        expiration_ts = await self.account_activity_handler.renew_account_for_user(
-            body["user_id"],
-            body.get("expiration_ts"),
-            not body.get("enable_renewal_emails", True),
-        )
-
-        res = {"expiration_ts": expiration_ts}
-        return 200, res
-
-
 ########################################################################################
 #
 # please don't add more servlets here: this file is already long and unwieldy. Put
diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py
new file mode 100644
index 0000000000..0b54ca09f4
--- /dev/null
+++ b/synapse/rest/admin/groups.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import RestServlet
+from synapse.rest.admin._base import (
+    assert_user_is_admin,
+    historical_admin_path_patterns,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class DeleteGroupAdminRestServlet(RestServlet):
+    """Allows deleting of local groups
+    """
+
+    PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)")
+
+    def __init__(self, hs):
+        self.group_server = hs.get_groups_server_handler()
+        self.is_mine_id = hs.is_mine_id
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, group_id):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        if not self.is_mine_id(group_id):
+            raise SynapseError(400, "Can only delete local groups")
+
+        await self.group_server.delete_group(group_id, requester.user.to_string())
+        return 200, {}
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
new file mode 100644
index 0000000000..f7cc5e9be9
--- /dev/null
+++ b/synapse/rest/admin/rooms.py
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from synapse.api.constants import Membership
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
+from synapse.rest.admin._base import (
+    assert_user_is_admin,
+    historical_admin_path_patterns,
+)
+from synapse.types import create_requester
+from synapse.util.async_helpers import maybe_awaitable
+
+logger = logging.getLogger(__name__)
+
+
+class ShutdownRoomRestServlet(RestServlet):
+    """Shuts down a room by removing all local users from the room and blocking
+    all future invites and joins to the room. Any local aliases will be repointed
+    to a new room created by `new_room_user_id` and kicked users will be auto
+    joined to the new room.
+    """
+
+    PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)")
+
+    DEFAULT_MESSAGE = (
+        "Sharing illegal content on this server is not permitted and rooms in"
+        " violation will be blocked."
+    )
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.store = hs.get_datastore()
+        self.state = hs.get_state_handler()
+        self._room_creation_handler = hs.get_room_creation_handler()
+        self.event_creation_handler = hs.get_event_creation_handler()
+        self.room_member_handler = hs.get_room_member_handler()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, room_id):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        content = parse_json_object_from_request(request)
+        assert_params_in_dict(content, ["new_room_user_id"])
+        new_room_user_id = content["new_room_user_id"]
+
+        room_creator_requester = create_requester(new_room_user_id)
+
+        message = content.get("message", self.DEFAULT_MESSAGE)
+        room_name = content.get("room_name", "Content Violation Notification")
+
+        info = await self._room_creation_handler.create_room(
+            room_creator_requester,
+            config={
+                "preset": "public_chat",
+                "name": room_name,
+                "power_level_content_override": {"users_default": -10},
+            },
+            ratelimit=False,
+        )
+        new_room_id = info["room_id"]
+
+        requester_user_id = requester.user.to_string()
+
+        logger.info(
+            "Shutting down room %r, joining to new room: %r", room_id, new_room_id
+        )
+
+        # This will work even if the room is already blocked, but that is
+        # desirable in case the first attempt at blocking the room failed below.
+        await self.store.block_room(room_id, requester_user_id)
+
+        users = await self.state.get_current_users_in_room(room_id)
+        kicked_users = []
+        failed_to_kick_users = []
+        for user_id in users:
+            if not self.hs.is_mine_id(user_id):
+                continue
+
+            logger.info("Kicking %r from %r...", user_id, room_id)
+
+            try:
+                target_requester = create_requester(user_id)
+                await self.room_member_handler.update_membership(
+                    requester=target_requester,
+                    target=target_requester.user,
+                    room_id=room_id,
+                    action=Membership.LEAVE,
+                    content={},
+                    ratelimit=False,
+                    require_consent=False,
+                )
+
+                await self.room_member_handler.forget(target_requester.user, room_id)
+
+                await self.room_member_handler.update_membership(
+                    requester=target_requester,
+                    target=target_requester.user,
+                    room_id=new_room_id,
+                    action=Membership.JOIN,
+                    content={},
+                    ratelimit=False,
+                    require_consent=False,
+                )
+
+                kicked_users.append(user_id)
+            except Exception:
+                logger.exception(
+                    "Failed to leave old room and join new room for %r", user_id
+                )
+                failed_to_kick_users.append(user_id)
+
+        await self.event_creation_handler.create_and_send_nonmember_event(
+            room_creator_requester,
+            {
+                "type": "m.room.message",
+                "content": {"body": message, "msgtype": "m.text"},
+                "room_id": new_room_id,
+                "sender": new_room_user_id,
+            },
+            ratelimit=False,
+        )
+
+        aliases_for_room = await maybe_awaitable(
+            self.store.get_aliases_for_room(room_id)
+        )
+
+        await self.store.update_aliases_for_room(
+            room_id, new_room_id, requester_user_id
+        )
+
+        return (
+            200,
+            {
+                "kicked_users": kicked_users,
+                "failed_to_kick_users": failed_to_kick_users,
+                "local_aliases": aliases_for_room,
+                "new_room_id": new_room_id,
+            },
+        )
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index d5d124a0dc..58a83f93af 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -12,17 +12,419 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import hashlib
+import hmac
+import logging
 import re
 
-from synapse.api.errors import SynapseError
+from six import text_type
+from six.moves import http_client
+
+from synapse.api.constants import UserTypes
+from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
+    parse_integer,
     parse_json_object_from_request,
+    parse_string,
+)
+from synapse.rest.admin._base import (
+    assert_requester_is_admin,
+    assert_user_is_admin,
+    historical_admin_path_patterns,
 )
-from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin
 from synapse.types import UserID
 
+logger = logging.getLogger(__name__)
+
+
+class UsersRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$")
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.admin_handler = hs.get_handlers().admin_handler
+
+    async def on_GET(self, request, user_id):
+        target_user = UserID.from_string(user_id)
+        await assert_requester_is_admin(self.auth, request)
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "Can only users a local user")
+
+        ret = await self.admin_handler.get_users()
+
+        return 200, ret
+
+
+class GetUsersPaginatedRestServlet(RestServlet):
+    """Get request to get specific number of users from Synapse.
+    This needs user to have administrator access in Synapse.
+        Example:
+            http://localhost:8008/_synapse/admin/v1/users_paginate/
+            @admin:user?access_token=admin_access_token&start=0&limit=10
+        Returns:
+            200 OK with json object {list[dict[str, Any]], count} or empty object.
+        """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/users_paginate/(?P[^/]*)"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.handlers = hs.get_handlers()
+
+    async def on_GET(self, request, target_user_id):
+        """Get request to get specific number of users from Synapse.
+        This needs user to have administrator access in Synapse.
+        """
+        await assert_requester_is_admin(self.auth, request)
+
+        target_user = UserID.from_string(target_user_id)
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "Can only users a local user")
+
+        order = "name"  # order by name in user table
+        start = parse_integer(request, "start", required=True)
+        limit = parse_integer(request, "limit", required=True)
+
+        logger.info("limit: %s, start: %s", limit, start)
+
+        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
+        return 200, ret
+
+    async def on_POST(self, request, target_user_id):
+        """Post request to get specific number of users from Synapse..
+        This needs user to have administrator access in Synapse.
+        Example:
+            http://localhost:8008/_synapse/admin/v1/users_paginate/
+            @admin:user?access_token=admin_access_token
+        JsonBodyToSend:
+            {
+                "start": "0",
+                "limit": "10
+            }
+        Returns:
+            200 OK with json object {list[dict[str, Any]], count} or empty object.
+        """
+        await assert_requester_is_admin(self.auth, request)
+        UserID.from_string(target_user_id)
+
+        order = "name"  # order by name in user table
+        params = parse_json_object_from_request(request)
+        assert_params_in_dict(params, ["limit", "start"])
+        limit = params["limit"]
+        start = params["start"]
+        logger.info("limit: %s, start: %s", limit, start)
+
+        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
+        return 200, ret
+
+
+class UserRegisterServlet(RestServlet):
+    """
+    Attributes:
+         NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
+         nonces (dict[str, int]): The nonces that we will accept. A dict of
+             nonce to the time it was generated, in int seconds.
+    """
+
+    PATTERNS = historical_admin_path_patterns("/register")
+    NONCE_TIMEOUT = 60
+
+    def __init__(self, hs):
+        self.handlers = hs.get_handlers()
+        self.reactor = hs.get_reactor()
+        self.nonces = {}
+        self.hs = hs
+
+    def _clear_old_nonces(self):
+        """
+        Clear out old nonces that are older than NONCE_TIMEOUT.
+        """
+        now = int(self.reactor.seconds())
+
+        for k, v in list(self.nonces.items()):
+            if now - v > self.NONCE_TIMEOUT:
+                del self.nonces[k]
+
+    def on_GET(self, request):
+        """
+        Generate a new nonce.
+        """
+        self._clear_old_nonces()
+
+        nonce = self.hs.get_secrets().token_hex(64)
+        self.nonces[nonce] = int(self.reactor.seconds())
+        return 200, {"nonce": nonce}
+
+    async def on_POST(self, request):
+        self._clear_old_nonces()
+
+        if not self.hs.config.registration_shared_secret:
+            raise SynapseError(400, "Shared secret registration is not enabled")
+
+        body = parse_json_object_from_request(request)
+
+        if "nonce" not in body:
+            raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON)
+
+        nonce = body["nonce"]
+
+        if nonce not in self.nonces:
+            raise SynapseError(400, "unrecognised nonce")
+
+        # Delete the nonce, so it can't be reused, even if it's invalid
+        del self.nonces[nonce]
+
+        if "username" not in body:
+            raise SynapseError(
+                400, "username must be specified", errcode=Codes.BAD_JSON
+            )
+        else:
+            if (
+                not isinstance(body["username"], text_type)
+                or len(body["username"]) > 512
+            ):
+                raise SynapseError(400, "Invalid username")
+
+            username = body["username"].encode("utf-8")
+            if b"\x00" in username:
+                raise SynapseError(400, "Invalid username")
+
+        if "password" not in body:
+            raise SynapseError(
+                400, "password must be specified", errcode=Codes.BAD_JSON
+            )
+        else:
+            if (
+                not isinstance(body["password"], text_type)
+                or len(body["password"]) > 512
+            ):
+                raise SynapseError(400, "Invalid password")
+
+            password = body["password"].encode("utf-8")
+            if b"\x00" in password:
+                raise SynapseError(400, "Invalid password")
+
+        admin = body.get("admin", None)
+        user_type = body.get("user_type", None)
+
+        if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
+            raise SynapseError(400, "Invalid user type")
+
+        got_mac = body["mac"]
+
+        want_mac = hmac.new(
+            key=self.hs.config.registration_shared_secret.encode(),
+            digestmod=hashlib.sha1,
+        )
+        want_mac.update(nonce.encode("utf8"))
+        want_mac.update(b"\x00")
+        want_mac.update(username)
+        want_mac.update(b"\x00")
+        want_mac.update(password)
+        want_mac.update(b"\x00")
+        want_mac.update(b"admin" if admin else b"notadmin")
+        if user_type:
+            want_mac.update(b"\x00")
+            want_mac.update(user_type.encode("utf8"))
+        want_mac = want_mac.hexdigest()
+
+        if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
+            raise SynapseError(403, "HMAC incorrect")
+
+        # Reuse the parts of RegisterRestServlet to reduce code duplication
+        from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+
+        register = RegisterRestServlet(self.hs)
+
+        user_id = await register.registration_handler.register_user(
+            localpart=body["username"].lower(),
+            password=body["password"],
+            admin=bool(admin),
+            user_type=user_type,
+        )
+
+        result = await register._create_registration_details(user_id, body)
+        return 200, result
+
+
+class WhoisRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)")
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.handlers = hs.get_handlers()
+
+    async def on_GET(self, request, user_id):
+        target_user = UserID.from_string(user_id)
+        requester = await self.auth.get_user_by_req(request)
+        auth_user = requester.user
+
+        if target_user != auth_user:
+            await assert_user_is_admin(self.auth, auth_user)
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "Can only whois a local user")
+
+        ret = await self.handlers.admin_handler.get_whois(target_user)
+
+        return 200, ret
+
+
+class DeactivateAccountRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)")
+
+    def __init__(self, hs):
+        self._deactivate_account_handler = hs.get_deactivate_account_handler()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, target_user_id):
+        await assert_requester_is_admin(self.auth, request)
+        body = parse_json_object_from_request(request, allow_empty_body=True)
+        erase = body.get("erase", False)
+        if not isinstance(erase, bool):
+            raise SynapseError(
+                http_client.BAD_REQUEST,
+                "Param 'erase' must be a boolean, if given",
+                Codes.BAD_JSON,
+            )
+
+        UserID.from_string(target_user_id)
+
+        result = await self._deactivate_account_handler.deactivate_account(
+            target_user_id, erase
+        )
+        if result:
+            id_server_unbind_result = "success"
+        else:
+            id_server_unbind_result = "no-support"
+
+        return 200, {"id_server_unbind_result": id_server_unbind_result}
+
+
+class AccountValidityRenewServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        self.hs = hs
+        self.account_activity_handler = hs.get_account_validity_handler()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request):
+        await assert_requester_is_admin(self.auth, request)
+
+        body = parse_json_object_from_request(request)
+
+        if "user_id" not in body:
+            raise SynapseError(400, "Missing property 'user_id' in the request body")
+
+        expiration_ts = await self.account_activity_handler.renew_account_for_user(
+            body["user_id"],
+            body.get("expiration_ts"),
+            not body.get("enable_renewal_emails", True),
+        )
+
+        res = {"expiration_ts": expiration_ts}
+        return 200, res
+
+
+class ResetPasswordRestServlet(RestServlet):
+    """Post request to allow an administrator reset password for a user.
+    This needs user to have administrator access in Synapse.
+        Example:
+            http://localhost:8008/_synapse/admin/v1/reset_password/
+            @user:to_reset_password?access_token=admin_access_token
+        JsonBodyToSend:
+            {
+                "new_password": "secret"
+            }
+        Returns:
+            200 OK with empty object if success otherwise an error.
+        """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/reset_password/(?P[^/]*)"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self._set_password_handler = hs.get_set_password_handler()
+
+    async def on_POST(self, request, target_user_id):
+        """Post request to allow an administrator reset password for a user.
+        This needs user to have administrator access in Synapse.
+        """
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        UserID.from_string(target_user_id)
+
+        params = parse_json_object_from_request(request)
+        assert_params_in_dict(params, ["new_password"])
+        new_password = params["new_password"]
+
+        await self._set_password_handler.set_password(
+            target_user_id, new_password, requester
+        )
+        return 200, {}
+
+
+class SearchUsersRestServlet(RestServlet):
+    """Get request to search user table for specific users according to
+    search term.
+    This needs user to have administrator access in Synapse.
+        Example:
+            http://localhost:8008/_synapse/admin/v1/search_users/
+            @admin:user?access_token=admin_access_token&term=alice
+        Returns:
+            200 OK with json object {list[dict[str, Any]], count} or empty object.
+    """
+
+    PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.handlers = hs.get_handlers()
+
+    async def on_GET(self, request, target_user_id):
+        """Get request to search user table for specific users according to
+        search term.
+        This needs user to have a administrator access in Synapse.
+        """
+        await assert_requester_is_admin(self.auth, request)
+
+        target_user = UserID.from_string(target_user_id)
+
+        # To allow all users to get the users list
+        # if not is_admin and target_user != auth_user:
+        #     raise AuthError(403, "You are not a server admin")
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "Can only users a local user")
+
+        term = parse_string(request, "term", required=True)
+        logger.info("term: %s ", term)
+
+        ret = await self.handlers.admin_handler.search_users(term)
+        return 200, ret
+
 
 class UserAdminServlet(RestServlet):
     """
-- 
cgit 1.4.1


From 234f55f3c4295f08399b725cda8a8aa4b559f1f5 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 20 Nov 2019 13:32:31 +0000
Subject: Docker: Change permissions for data dir before attempting to write to
 it (#6389)

---
 changelog.d/6389.bugfix | 1 +
 docker/start.py         | 6 +++---
 2 files changed, 4 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6389.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6389.bugfix b/changelog.d/6389.bugfix
new file mode 100644
index 0000000000..c553622b02
--- /dev/null
+++ b/changelog.d/6389.bugfix
@@ -0,0 +1 @@
+Fix permission denied error when trying to generate a config file with the docker image.
\ No newline at end of file
diff --git a/docker/start.py b/docker/start.py
index 6e1cb807a1..97fd247f8f 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -169,11 +169,11 @@ def run_generate_config(environ, ownership):
     # log("running %s" % (args, ))
 
     if ownership is not None:
-        args = ["su-exec", ownership] + args
-        os.execv("/sbin/su-exec", args)
-
         # make sure that synapse has perms to write to the data dir.
         subprocess.check_output(["chown", ownership, data_dir])
+
+        args = ["su-exec", ownership] + args
+        os.execv("/sbin/su-exec", args)
     else:
         os.execv("/usr/local/bin/python", args)
 
-- 
cgit 1.4.1


From 41e4566682946fca8600214969c4e9562b5a9315 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Wed, 20 Nov 2019 14:12:42 +0000
Subject: 1.6.0rc1

---
 CHANGES.md               | 74 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/5727.feature |  1 -
 changelog.d/6140.misc    |  1 -
 changelog.d/6164.doc     |  1 -
 changelog.d/6213.bugfix  |  1 -
 changelog.d/6218.misc    |  1 -
 changelog.d/6220.feature |  1 -
 changelog.d/6232.bugfix  |  1 -
 changelog.d/6235.bugfix  |  1 -
 changelog.d/6238.feature |  1 -
 changelog.d/6240.misc    |  1 -
 changelog.d/6250.misc    |  1 -
 changelog.d/6251.misc    |  1 -
 changelog.d/6253.bugfix  |  1 -
 changelog.d/6254.bugfix  |  1 -
 changelog.d/6257.doc     |  1 -
 changelog.d/6259.misc    |  1 -
 changelog.d/6263.misc    |  1 -
 changelog.d/6269.misc    |  1 -
 changelog.d/6270.misc    |  1 -
 changelog.d/6271.misc    |  1 -
 changelog.d/6272.doc     |  1 -
 changelog.d/6273.doc     |  1 -
 changelog.d/6274.misc    |  1 -
 changelog.d/6275.misc    |  1 -
 changelog.d/6276.misc    |  1 -
 changelog.d/6277.misc    |  1 -
 changelog.d/6278.bugfix  |  1 -
 changelog.d/6279.misc    |  1 -
 changelog.d/6280.misc    |  1 -
 changelog.d/6284.bugfix  |  1 -
 changelog.d/6291.misc    |  1 -
 changelog.d/6294.misc    |  1 -
 changelog.d/6295.misc    |  1 -
 changelog.d/6298.misc    |  1 -
 changelog.d/6300.misc    |  1 -
 changelog.d/6301.feature |  1 -
 changelog.d/6304.misc    |  1 -
 changelog.d/6305.misc    |  1 -
 changelog.d/6306.bugfix  |  1 -
 changelog.d/6307.bugfix  |  1 -
 changelog.d/6308.misc    |  1 -
 changelog.d/6310.feature |  1 -
 changelog.d/6312.misc    |  1 -
 changelog.d/6313.bugfix  |  1 -
 changelog.d/6314.misc    |  1 -
 changelog.d/6317.misc    |  1 -
 changelog.d/6318.misc    |  1 -
 changelog.d/6319.misc    |  1 -
 changelog.d/6320.bugfix  |  1 -
 changelog.d/6330.misc    |  1 -
 changelog.d/6335.bugfix  |  1 -
 changelog.d/6336.misc    |  1 -
 changelog.d/6338.bugfix  |  1 -
 changelog.d/6340.feature |  1 -
 changelog.d/6341.misc    |  1 -
 changelog.d/6357.misc    |  1 -
 changelog.d/6359.bugfix  |  1 -
 changelog.d/6361.misc    |  1 -
 changelog.d/6363.bugfix  |  1 -
 changelog.d/6389.bugfix  |  1 -
 synapse/__init__.py      |  2 +-
 62 files changed, 75 insertions(+), 61 deletions(-)
 delete mode 100644 changelog.d/5727.feature
 delete mode 100644 changelog.d/6140.misc
 delete mode 100644 changelog.d/6164.doc
 delete mode 100644 changelog.d/6213.bugfix
 delete mode 100644 changelog.d/6218.misc
 delete mode 100644 changelog.d/6220.feature
 delete mode 100644 changelog.d/6232.bugfix
 delete mode 100644 changelog.d/6235.bugfix
 delete mode 100644 changelog.d/6238.feature
 delete mode 100644 changelog.d/6240.misc
 delete mode 100644 changelog.d/6250.misc
 delete mode 100644 changelog.d/6251.misc
 delete mode 100644 changelog.d/6253.bugfix
 delete mode 100644 changelog.d/6254.bugfix
 delete mode 100644 changelog.d/6257.doc
 delete mode 100644 changelog.d/6259.misc
 delete mode 100644 changelog.d/6263.misc
 delete mode 100644 changelog.d/6269.misc
 delete mode 100644 changelog.d/6270.misc
 delete mode 100644 changelog.d/6271.misc
 delete mode 100644 changelog.d/6272.doc
 delete mode 100644 changelog.d/6273.doc
 delete mode 100644 changelog.d/6274.misc
 delete mode 100644 changelog.d/6275.misc
 delete mode 100644 changelog.d/6276.misc
 delete mode 100644 changelog.d/6277.misc
 delete mode 100644 changelog.d/6278.bugfix
 delete mode 100644 changelog.d/6279.misc
 delete mode 100644 changelog.d/6280.misc
 delete mode 100644 changelog.d/6284.bugfix
 delete mode 100644 changelog.d/6291.misc
 delete mode 100644 changelog.d/6294.misc
 delete mode 100644 changelog.d/6295.misc
 delete mode 100644 changelog.d/6298.misc
 delete mode 100644 changelog.d/6300.misc
 delete mode 100644 changelog.d/6301.feature
 delete mode 100644 changelog.d/6304.misc
 delete mode 100644 changelog.d/6305.misc
 delete mode 100644 changelog.d/6306.bugfix
 delete mode 100644 changelog.d/6307.bugfix
 delete mode 100644 changelog.d/6308.misc
 delete mode 100644 changelog.d/6310.feature
 delete mode 100644 changelog.d/6312.misc
 delete mode 100644 changelog.d/6313.bugfix
 delete mode 100644 changelog.d/6314.misc
 delete mode 100644 changelog.d/6317.misc
 delete mode 100644 changelog.d/6318.misc
 delete mode 100644 changelog.d/6319.misc
 delete mode 100644 changelog.d/6320.bugfix
 delete mode 100644 changelog.d/6330.misc
 delete mode 100644 changelog.d/6335.bugfix
 delete mode 100644 changelog.d/6336.misc
 delete mode 100644 changelog.d/6338.bugfix
 delete mode 100644 changelog.d/6340.feature
 delete mode 100644 changelog.d/6341.misc
 delete mode 100644 changelog.d/6357.misc
 delete mode 100644 changelog.d/6359.bugfix
 delete mode 100644 changelog.d/6361.misc
 delete mode 100644 changelog.d/6363.bugfix
 delete mode 100644 changelog.d/6389.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index 9312dc2941..f4f61db5d4 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,77 @@
+Synapse 1.6.0rc1 (2019-11-20)
+=============================
+
+Features
+--------
+
+- Add federation support for cross-signing. ([\#5727](https://github.com/matrix-org/synapse/issues/5727))
+- Increase default room version from 4 to 5, thereby enforcing server key validity period checks. ([\#6220](https://github.com/matrix-org/synapse/issues/6220))
+- Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. ([\#6238](https://github.com/matrix-org/synapse/issues/6238))
+- Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). ([\#6301](https://github.com/matrix-org/synapse/issues/6301), [\#6310](https://github.com/matrix-org/synapse/issues/6310), [\#6340](https://github.com/matrix-org/synapse/issues/6340))
+
+
+Bugfixes
+--------
+
+- Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. ([\#6213](https://github.com/matrix-org/synapse/issues/6213))
+- Remove a room from a server's public rooms list on room upgrade. ([\#6232](https://github.com/matrix-org/synapse/issues/6232), [\#6235](https://github.com/matrix-org/synapse/issues/6235))
+- Delete keys from key backup when deleting backup versions. ([\#6253](https://github.com/matrix-org/synapse/issues/6253))
+- Make notification of cross-signing signatures work with workers. ([\#6254](https://github.com/matrix-org/synapse/issues/6254))
+- Fix exception when remote servers attempt to join a room that they're not allowed to join. ([\#6278](https://github.com/matrix-org/synapse/issues/6278))
+- Prevent errors from appearing on Synapse startup if `git` is not installed. ([\#6284](https://github.com/matrix-org/synapse/issues/6284))
+- Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306))
+- Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307))
+- Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313))
+- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
+- Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335))
+- Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338))
+- Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359))
+- Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. ([\#6363](https://github.com/matrix-org/synapse/issues/6363))
+- Fix permission denied error when trying to generate a config file with the docker image. ([\#6389](https://github.com/matrix-org/synapse/issues/6389))
+
+
+Improved Documentation
+----------------------
+
+- Contributor documentation now mentions script to run linters. ([\#6164](https://github.com/matrix-org/synapse/issues/6164))
+- Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. ([\#6257](https://github.com/matrix-org/synapse/issues/6257))
+- Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. ([\#6272](https://github.com/matrix-org/synapse/issues/6272))
+- Fix a small typo in `account_threepid_delegates` configuration option. ([\#6273](https://github.com/matrix-org/synapse/issues/6273))
+
+
+Internal Changes
+----------------
+
+- Add a CI job to test the `synapse_port_db` script. ([\#6140](https://github.com/matrix-org/synapse/issues/6140), [\#6276](https://github.com/matrix-org/synapse/issues/6276))
+- Convert EventContext to an attrs. ([\#6218](https://github.com/matrix-org/synapse/issues/6218))
+- Move `persist_events` out from main data store. ([\#6240](https://github.com/matrix-org/synapse/issues/6240), [\#6300](https://github.com/matrix-org/synapse/issues/6300))
+- Reduce verbosity of user/room stats. ([\#6250](https://github.com/matrix-org/synapse/issues/6250))
+- Reduce impact of debug logging. ([\#6251](https://github.com/matrix-org/synapse/issues/6251))
+- Expose some homeserver functionality to spam checkers. ([\#6259](https://github.com/matrix-org/synapse/issues/6259))
+- Change cache descriptors to always return deferreds. ([\#6263](https://github.com/matrix-org/synapse/issues/6263), [\#6291](https://github.com/matrix-org/synapse/issues/6291))
+- Fix incorrect comment regarding the functionality of an `if` statement. ([\#6269](https://github.com/matrix-org/synapse/issues/6269))
+- Update CI to run `isort` over the `scripts` and `scripts-dev` directories. ([\#6270](https://github.com/matrix-org/synapse/issues/6270))
+- Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. ([\#6271](https://github.com/matrix-org/synapse/issues/6271), [\#6314](https://github.com/matrix-org/synapse/issues/6314))
+- Port replication http server endpoints to async/await. ([\#6274](https://github.com/matrix-org/synapse/issues/6274))
+- Port room rest handlers to async/await. ([\#6275](https://github.com/matrix-org/synapse/issues/6275))
+- Remove redundant CLI parameters on CI's `flake8` step. ([\#6277](https://github.com/matrix-org/synapse/issues/6277))
+- Port `federation_server.py` to async/await. ([\#6279](https://github.com/matrix-org/synapse/issues/6279))
+- Port receipt and read markers to async/wait. ([\#6280](https://github.com/matrix-org/synapse/issues/6280))
+- Split out state storage into separate data store. ([\#6294](https://github.com/matrix-org/synapse/issues/6294), [\#6295](https://github.com/matrix-org/synapse/issues/6295))
+- Refactor EventContext for clarity. ([\#6298](https://github.com/matrix-org/synapse/issues/6298))
+- Update the version of black used to 19.10b0. ([\#6304](https://github.com/matrix-org/synapse/issues/6304))
+- Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305))
+- Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308))
+- Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312))
+- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
+- Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336))
+- Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319))
+- Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330))
+- Add continuous integration for python 3.8. ([\#6341](https://github.com/matrix-org/synapse/issues/6341))
+- Correct spacing/case of various instances of the word "homeserver". ([\#6357](https://github.com/matrix-org/synapse/issues/6357))
+- Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. ([\#6361](https://github.com/matrix-org/synapse/issues/6361))
+
+
 Synapse 1.5.1 (2019-11-06)
 ==========================
 
diff --git a/changelog.d/5727.feature b/changelog.d/5727.feature
deleted file mode 100644
index 819bebf2d7..0000000000
--- a/changelog.d/5727.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add federation support for cross-signing.
diff --git a/changelog.d/6140.misc b/changelog.d/6140.misc
deleted file mode 100644
index 0feb97ec61..0000000000
--- a/changelog.d/6140.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a CI job to test the `synapse_port_db` script.
\ No newline at end of file
diff --git a/changelog.d/6164.doc b/changelog.d/6164.doc
deleted file mode 100644
index f9395b02b3..0000000000
--- a/changelog.d/6164.doc
+++ /dev/null
@@ -1 +0,0 @@
-Contributor documentation now mentions script to run linters.
diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix
deleted file mode 100644
index 2bb2d08851..0000000000
--- a/changelog.d/6213.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460.
diff --git a/changelog.d/6218.misc b/changelog.d/6218.misc
deleted file mode 100644
index 49d10c36cf..0000000000
--- a/changelog.d/6218.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert EventContext to an attrs.
diff --git a/changelog.d/6220.feature b/changelog.d/6220.feature
deleted file mode 100644
index 8343e9912b..0000000000
--- a/changelog.d/6220.feature
+++ /dev/null
@@ -1 +0,0 @@
-Increase default room version from 4 to 5, thereby enforcing server key validity period checks.
diff --git a/changelog.d/6232.bugfix b/changelog.d/6232.bugfix
deleted file mode 100644
index 12718ba934..0000000000
--- a/changelog.d/6232.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Remove a room from a server's public rooms list on room upgrade.
\ No newline at end of file
diff --git a/changelog.d/6235.bugfix b/changelog.d/6235.bugfix
deleted file mode 100644
index 12718ba934..0000000000
--- a/changelog.d/6235.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Remove a room from a server's public rooms list on room upgrade.
\ No newline at end of file
diff --git a/changelog.d/6238.feature b/changelog.d/6238.feature
deleted file mode 100644
index d225ac33b6..0000000000
--- a/changelog.d/6238.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars.
diff --git a/changelog.d/6240.misc b/changelog.d/6240.misc
deleted file mode 100644
index 0b3d7a14a1..0000000000
--- a/changelog.d/6240.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move `persist_events` out from main data store.
diff --git a/changelog.d/6250.misc b/changelog.d/6250.misc
deleted file mode 100644
index 12e3fe66b0..0000000000
--- a/changelog.d/6250.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce verbosity of user/room stats.
diff --git a/changelog.d/6251.misc b/changelog.d/6251.misc
deleted file mode 100644
index 371c6983be..0000000000
--- a/changelog.d/6251.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce impact of debug logging.
diff --git a/changelog.d/6253.bugfix b/changelog.d/6253.bugfix
deleted file mode 100644
index 266fae381c..0000000000
--- a/changelog.d/6253.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Delete keys from key backup when deleting backup versions.
diff --git a/changelog.d/6254.bugfix b/changelog.d/6254.bugfix
deleted file mode 100644
index 3181484b88..0000000000
--- a/changelog.d/6254.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Make notification of cross-signing signatures work with workers.
diff --git a/changelog.d/6257.doc b/changelog.d/6257.doc
deleted file mode 100644
index e985afde0e..0000000000
--- a/changelog.d/6257.doc
+++ /dev/null
@@ -1 +0,0 @@
-Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate.
diff --git a/changelog.d/6259.misc b/changelog.d/6259.misc
deleted file mode 100644
index 3ff81b1ac7..0000000000
--- a/changelog.d/6259.misc
+++ /dev/null
@@ -1 +0,0 @@
-Expose some homeserver functionality to spam checkers.
diff --git a/changelog.d/6263.misc b/changelog.d/6263.misc
deleted file mode 100644
index 7b1bb4b679..0000000000
--- a/changelog.d/6263.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change cache descriptors to always return deferreds.
diff --git a/changelog.d/6269.misc b/changelog.d/6269.misc
deleted file mode 100644
index 9fd333cc89..0000000000
--- a/changelog.d/6269.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix incorrect comment regarding the functionality of an `if` statement.
\ No newline at end of file
diff --git a/changelog.d/6270.misc b/changelog.d/6270.misc
deleted file mode 100644
index d1c5811323..0000000000
--- a/changelog.d/6270.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update CI to run `isort` over the `scripts` and `scripts-dev` directories.
\ No newline at end of file
diff --git a/changelog.d/6271.misc b/changelog.d/6271.misc
deleted file mode 100644
index 2369760272..0000000000
--- a/changelog.d/6271.misc
+++ /dev/null
@@ -1 +0,0 @@
-Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated.
\ No newline at end of file
diff --git a/changelog.d/6272.doc b/changelog.d/6272.doc
deleted file mode 100644
index 232180bcdc..0000000000
--- a/changelog.d/6272.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update `INSTALL.md` Email section to talk about `account_threepid_delegates`.
\ No newline at end of file
diff --git a/changelog.d/6273.doc b/changelog.d/6273.doc
deleted file mode 100644
index 21a41d987d..0000000000
--- a/changelog.d/6273.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a small typo in `account_threepid_delegates` configuration option.
\ No newline at end of file
diff --git a/changelog.d/6274.misc b/changelog.d/6274.misc
deleted file mode 100644
index eb4966124f..0000000000
--- a/changelog.d/6274.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port replication http server endpoints to async/await.
diff --git a/changelog.d/6275.misc b/changelog.d/6275.misc
deleted file mode 100644
index f57e2c4adb..0000000000
--- a/changelog.d/6275.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port room rest handlers to async/await.
diff --git a/changelog.d/6276.misc b/changelog.d/6276.misc
deleted file mode 100644
index 4a4428251e..0000000000
--- a/changelog.d/6276.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a CI job to test the `synapse_port_db` script.
diff --git a/changelog.d/6277.misc b/changelog.d/6277.misc
deleted file mode 100644
index 490713577f..0000000000
--- a/changelog.d/6277.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant CLI parameters on CI's `flake8` step.
\ No newline at end of file
diff --git a/changelog.d/6278.bugfix b/changelog.d/6278.bugfix
deleted file mode 100644
index c107270461..0000000000
--- a/changelog.d/6278.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix exception when remote servers attempt to join a room that they're not allowed to join.
diff --git a/changelog.d/6279.misc b/changelog.d/6279.misc
deleted file mode 100644
index 5f5144a9ee..0000000000
--- a/changelog.d/6279.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `federation_server.py` to async/await.
diff --git a/changelog.d/6280.misc b/changelog.d/6280.misc
deleted file mode 100644
index 96a0eb21b2..0000000000
--- a/changelog.d/6280.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port receipt and read markers to async/wait.
diff --git a/changelog.d/6284.bugfix b/changelog.d/6284.bugfix
deleted file mode 100644
index cf15053d2d..0000000000
--- a/changelog.d/6284.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent errors from appearing on Synapse startup if `git` is not installed.
\ No newline at end of file
diff --git a/changelog.d/6291.misc b/changelog.d/6291.misc
deleted file mode 100644
index 7b1bb4b679..0000000000
--- a/changelog.d/6291.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change cache descriptors to always return deferreds.
diff --git a/changelog.d/6294.misc b/changelog.d/6294.misc
deleted file mode 100644
index a3e6b8296e..0000000000
--- a/changelog.d/6294.misc
+++ /dev/null
@@ -1 +0,0 @@
-Split out state storage into separate data store.
diff --git a/changelog.d/6295.misc b/changelog.d/6295.misc
deleted file mode 100644
index a3e6b8296e..0000000000
--- a/changelog.d/6295.misc
+++ /dev/null
@@ -1 +0,0 @@
-Split out state storage into separate data store.
diff --git a/changelog.d/6298.misc b/changelog.d/6298.misc
deleted file mode 100644
index d4190730b2..0000000000
--- a/changelog.d/6298.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor EventContext for clarity.
\ No newline at end of file
diff --git a/changelog.d/6300.misc b/changelog.d/6300.misc
deleted file mode 100644
index 0b3d7a14a1..0000000000
--- a/changelog.d/6300.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move `persist_events` out from main data store.
diff --git a/changelog.d/6301.feature b/changelog.d/6301.feature
deleted file mode 100644
index 78a187a1dc..0000000000
--- a/changelog.d/6301.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
diff --git a/changelog.d/6304.misc b/changelog.d/6304.misc
deleted file mode 100644
index 20372b4f7c..0000000000
--- a/changelog.d/6304.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update the version of black used to 19.10b0.
diff --git a/changelog.d/6305.misc b/changelog.d/6305.misc
deleted file mode 100644
index f047fc3062..0000000000
--- a/changelog.d/6305.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some documentation about worker replication.
diff --git a/changelog.d/6306.bugfix b/changelog.d/6306.bugfix
deleted file mode 100644
index c7dcbcdce8..0000000000
--- a/changelog.d/6306.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash.
diff --git a/changelog.d/6307.bugfix b/changelog.d/6307.bugfix
deleted file mode 100644
index f2917c5053..0000000000
--- a/changelog.d/6307.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `/purge_room` admin API.
diff --git a/changelog.d/6308.misc b/changelog.d/6308.misc
deleted file mode 100644
index 72be63ba4b..0000000000
--- a/changelog.d/6308.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH.
\ No newline at end of file
diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature
deleted file mode 100644
index 78a187a1dc..0000000000
--- a/changelog.d/6310.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
diff --git a/changelog.d/6312.misc b/changelog.d/6312.misc
deleted file mode 100644
index 55e3e1654d..0000000000
--- a/changelog.d/6312.misc
+++ /dev/null
@@ -1 +0,0 @@
-Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only.
diff --git a/changelog.d/6313.bugfix b/changelog.d/6313.bugfix
deleted file mode 100644
index f4d4a97f00..0000000000
--- a/changelog.d/6313.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0.
diff --git a/changelog.d/6314.misc b/changelog.d/6314.misc
deleted file mode 100644
index 2369760272..0000000000
--- a/changelog.d/6314.misc
+++ /dev/null
@@ -1 +0,0 @@
-Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated.
\ No newline at end of file
diff --git a/changelog.d/6317.misc b/changelog.d/6317.misc
deleted file mode 100644
index a67d13fa72..0000000000
--- a/changelog.d/6317.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add optional python dependencies and dependant binary libraries to snapcraft packaging.
diff --git a/changelog.d/6318.misc b/changelog.d/6318.misc
deleted file mode 100644
index 63527ccef4..0000000000
--- a/changelog.d/6318.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove the dependency on psutil and replace functionality with the stdlib `resource` module.
diff --git a/changelog.d/6319.misc b/changelog.d/6319.misc
deleted file mode 100644
index 9711ef21ed..0000000000
--- a/changelog.d/6319.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve documentation for EventContext fields.
diff --git a/changelog.d/6320.bugfix b/changelog.d/6320.bugfix
deleted file mode 100644
index 2c3fad5655..0000000000
--- a/changelog.d/6320.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug which casued rejected events to be persisted with the wrong room state.
diff --git a/changelog.d/6330.misc b/changelog.d/6330.misc
deleted file mode 100644
index 6239cba263..0000000000
--- a/changelog.d/6330.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some checks that we aren't using state from rejected events.
diff --git a/changelog.d/6335.bugfix b/changelog.d/6335.bugfix
deleted file mode 100644
index a95f6b9eec..0000000000
--- a/changelog.d/6335.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `rc_login` ratelimiting would prematurely kick in.
diff --git a/changelog.d/6336.misc b/changelog.d/6336.misc
deleted file mode 100644
index 63527ccef4..0000000000
--- a/changelog.d/6336.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove the dependency on psutil and replace functionality with the stdlib `resource` module.
diff --git a/changelog.d/6338.bugfix b/changelog.d/6338.bugfix
deleted file mode 100644
index 8e469f0fb6..0000000000
--- a/changelog.d/6338.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent the server taking a long time to start up when guest registration is enabled.
\ No newline at end of file
diff --git a/changelog.d/6340.feature b/changelog.d/6340.feature
deleted file mode 100644
index 78a187a1dc..0000000000
--- a/changelog.d/6340.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
diff --git a/changelog.d/6341.misc b/changelog.d/6341.misc
deleted file mode 100644
index 359b9bf1d7..0000000000
--- a/changelog.d/6341.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add continuous integration for python 3.8.
\ No newline at end of file
diff --git a/changelog.d/6357.misc b/changelog.d/6357.misc
deleted file mode 100644
index a68df0f384..0000000000
--- a/changelog.d/6357.misc
+++ /dev/null
@@ -1 +0,0 @@
-Correct spacing/case of various instances of the word "homeserver".
\ No newline at end of file
diff --git a/changelog.d/6359.bugfix b/changelog.d/6359.bugfix
deleted file mode 100644
index 22bf5f642a..0000000000
--- a/changelog.d/6359.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where upgrading a guest account to a full user would fail when account validity is enabled.
\ No newline at end of file
diff --git a/changelog.d/6361.misc b/changelog.d/6361.misc
deleted file mode 100644
index 324d74ebf9..0000000000
--- a/changelog.d/6361.misc
+++ /dev/null
@@ -1 +0,0 @@
-Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room.
diff --git a/changelog.d/6363.bugfix b/changelog.d/6363.bugfix
deleted file mode 100644
index d023b49181..0000000000
--- a/changelog.d/6363.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors.
\ No newline at end of file
diff --git a/changelog.d/6389.bugfix b/changelog.d/6389.bugfix
deleted file mode 100644
index c553622b02..0000000000
--- a/changelog.d/6389.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix permission denied error when trying to generate a config file with the docker image.
\ No newline at end of file
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 1c27d68009..1d962f5dc8 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.5.1"
+__version__ = "1.6.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 486be06f4877842dfb109caac42ab052e09fd5b0 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 20 Nov 2019 15:08:03 +0000
Subject: Changelog

---
 changelog.d/6392.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6392.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6392.misc b/changelog.d/6392.misc
new file mode 100644
index 0000000000..a00257944f
--- /dev/null
+++ b/changelog.d/6392.misc
@@ -0,0 +1 @@
+Add a test scenario to make sure room history purges don't break `/messages` in the future.
-- 
cgit 1.4.1


From 49243c55a4c0a9dd82d3ba95f111bc2df430b587 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 20 Nov 2019 16:09:11 +0000
Subject: Update changelog since this isn't going to be featured in 1.6.0

---
 changelog.d/6329.feature | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6329.feature b/changelog.d/6329.feature
index 78a187a1dc..48263cdd86 100644
--- a/changelog.d/6329.feature
+++ b/changelog.d/6329.feature
@@ -1 +1 @@
-Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)).
+Filter state, events_before and events_after in /context requests.
-- 
cgit 1.4.1


From b2f8c21a9b9389251c9343166c63b003fad278a2 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 20 Nov 2019 16:10:27 +0000
Subject: Format changelog

---
 changelog.d/6329.feature | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6329.feature b/changelog.d/6329.feature
index 48263cdd86..c27dbb06a4 100644
--- a/changelog.d/6329.feature
+++ b/changelog.d/6329.feature
@@ -1 +1 @@
-Filter state, events_before and events_after in /context requests.
+Filter `state`, `events_before` and `events_after` in `/context` requests.
-- 
cgit 1.4.1


From 3916e1b97a1ffc481dfdf66f7da58201a52140a9 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 21 Nov 2019 12:00:14 +0000
Subject: Clean up newline quote marks around the codebase (#6362)

---
 changelog.d/6362.misc                                | 1 +
 synapse/app/federation_sender.py                     | 2 +-
 synapse/appservice/api.py                            | 2 +-
 synapse/config/appservice.py                         | 2 +-
 synapse/config/room_directory.py                     | 2 +-
 synapse/config/server.py                             | 6 +++---
 synapse/federation/persistence.py                    | 4 ++--
 synapse/federation/sender/__init__.py                | 2 +-
 synapse/federation/sender/transaction_manager.py     | 4 ++--
 synapse/handlers/directory.py                        | 2 +-
 synapse/http/servlet.py                              | 2 +-
 synapse/push/httppusher.py                           | 5 ++---
 synapse/push/mailer.py                               | 4 ++--
 synapse/rest/media/v1/preview_url_resource.py        | 2 +-
 synapse/server_notices/consent_server_notices.py     | 2 +-
 synapse/storage/_base.py                             | 2 +-
 synapse/storage/data_stores/main/deviceinbox.py      | 2 +-
 synapse/storage/data_stores/main/end_to_end_keys.py  | 6 +++---
 synapse/storage/data_stores/main/events.py           | 8 +++-----
 synapse/storage/data_stores/main/filtering.py        | 2 +-
 synapse/storage/data_stores/main/media_repository.py | 6 +++---
 synapse/storage/data_stores/main/registration.py     | 4 +---
 synapse/storage/data_stores/main/stream.py           | 2 +-
 synapse/storage/data_stores/main/tags.py             | 4 +---
 synapse/storage/prepare_database.py                  | 2 +-
 synapse/streams/config.py                            | 9 ++++++---
 26 files changed, 43 insertions(+), 46 deletions(-)
 create mode 100644 changelog.d/6362.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6362.misc b/changelog.d/6362.misc
new file mode 100644
index 0000000000..b79a5bea99
--- /dev/null
+++ b/changelog.d/6362.misc
@@ -0,0 +1 @@
+Clean up some unnecessary quotation marks around the codebase.
\ No newline at end of file
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 139221ad34..448e45e00f 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -69,7 +69,7 @@ class FederationSenderSlaveStore(
         self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
 
     def _get_federation_out_pos(self, db_conn):
-        sql = "SELECT stream_id FROM federation_stream_position" " WHERE type = ?"
+        sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
         sql = self.database_engine.convert_param_style(sql)
 
         txn = db_conn.cursor()
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 3e25bf5747..57174da021 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -185,7 +185,7 @@ class ApplicationServiceApi(SimpleHttpClient):
 
                 if not _is_valid_3pe_metadata(info):
                     logger.warning(
-                        "query_3pe_protocol to %s did not return a" " valid result", uri
+                        "query_3pe_protocol to %s did not return a valid result", uri
                     )
                     return None
 
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index e77d3387ff..ca43e96bd1 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -134,7 +134,7 @@ def _load_appservice(hostname, as_info, config_filename):
             for regex_obj in as_info["namespaces"][ns]:
                 if not isinstance(regex_obj, dict):
                     raise ValueError(
-                        "Expected namespace entry in %s to be an object," " but got %s",
+                        "Expected namespace entry in %s to be an object, but got %s",
                         ns,
                         regex_obj,
                     )
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 7c9f05bde4..7ac7699676 100644
--- a/synapse/config/room_directory.py
+++ b/synapse/config/room_directory.py
@@ -170,7 +170,7 @@ class _RoomDirectoryRule(object):
             self.action = action
         else:
             raise ConfigError(
-                "%s rules can only have action of 'allow'" " or 'deny'" % (option_name,)
+                "%s rules can only have action of 'allow' or 'deny'" % (option_name,)
             )
 
         self._alias_matches_all = alias == "*"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 00d01c43af..11336d7549 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -223,7 +223,7 @@ class ServerConfig(Config):
             self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
         except Exception as e:
             raise ConfigError(
-                "Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e
+                "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
             )
 
         if self.public_baseurl is not None:
@@ -787,14 +787,14 @@ class ServerConfig(Config):
             "--print-pidfile",
             action="store_true",
             default=None,
-            help="Print the path to the pidfile just" " before daemonizing",
+            help="Print the path to the pidfile just before daemonizing",
         )
         server_group.add_argument(
             "--manhole",
             metavar="PORT",
             dest="manhole",
             type=int,
-            help="Turn on the twisted telnet manhole" " service on the given port.",
+            help="Turn on the twisted telnet manhole service on the given port.",
         )
 
 
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 44edcabed4..d68b4bd670 100644
--- a/synapse/federation/persistence.py
+++ b/synapse/federation/persistence.py
@@ -44,7 +44,7 @@ class TransactionActions(object):
             response code and response body.
         """
         if not transaction.transaction_id:
-            raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
+            raise RuntimeError("Cannot persist a transaction with no transaction_id")
 
         return self.store.get_received_txn_response(transaction.transaction_id, origin)
 
@@ -56,7 +56,7 @@ class TransactionActions(object):
             Deferred
         """
         if not transaction.transaction_id:
-            raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
+            raise RuntimeError("Cannot persist a transaction with no transaction_id")
 
         return self.store.set_received_txn_response(
             transaction.transaction_id, origin, code, response
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 2b2ee8612a..4ebb0e8bc0 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -49,7 +49,7 @@ sent_pdus_destination_dist_count = Counter(
 
 sent_pdus_destination_dist_total = Counter(
     "synapse_federation_client_sent_pdu_destinations:total",
-    "" "Total number of PDUs queued for sending across all destinations",
+    "Total number of PDUs queued for sending across all destinations",
 )
 
 
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 67b3e1ab6e..5fed626d5b 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -84,7 +84,7 @@ class TransactionManager(object):
             txn_id = str(self._next_txn_id)
 
             logger.debug(
-                "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
+                "TX [%s] {%s} Attempting new transaction (pdus: %d, edus: %d)",
                 destination,
                 txn_id,
                 len(pdus),
@@ -103,7 +103,7 @@ class TransactionManager(object):
             self._next_txn_id += 1
 
             logger.info(
-                "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
+                "TX [%s] {%s} Sending transaction [%s], (PDUs: %d, EDUs: %d)",
                 destination,
                 txn_id,
                 transaction.transaction_id,
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 69051101a6..a07d2f1a17 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -119,7 +119,7 @@ class DirectoryHandler(BaseHandler):
             if not service.is_interested_in_alias(room_alias.to_string()):
                 raise SynapseError(
                     400,
-                    "This application service has not reserved" " this kind of alias.",
+                    "This application service has not reserved this kind of alias.",
                     errcode=Codes.EXCLUSIVE,
                 )
         else:
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index e9a5e46ced..13fcb408a6 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -96,7 +96,7 @@ def parse_boolean_from_args(args, name, default=None, required=False):
             return {b"true": True, b"false": False}[args[name][0]]
         except Exception:
             message = (
-                "Boolean query parameter %r must be one of" " ['true', 'false']"
+                "Boolean query parameter %r must be one of ['true', 'false']"
             ) % (name,)
             raise SynapseError(400, message)
     else:
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index e994037be6..d0879b0490 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -246,7 +246,7 @@ class HttpPusher(object):
                     # fixed, we don't suddenly deliver a load
                     # of old notifications.
                     logger.warning(
-                        "Giving up on a notification to user %s, " "pushkey %s",
+                        "Giving up on a notification to user %s, pushkey %s",
                         self.user_id,
                         self.pushkey,
                     )
@@ -299,8 +299,7 @@ class HttpPusher(object):
                     # for sanity, we only remove the pushkey if it
                     # was the one we actually sent...
                     logger.warning(
-                        ("Ignoring rejected pushkey %s because we" " didn't send it"),
-                        pk,
+                        ("Ignoring rejected pushkey %s because we didn't send it"), pk,
                     )
                 else:
                     logger.info("Pushkey %s was rejected: removing", pk)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 1d15a06a58..b13b646bfd 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -43,7 +43,7 @@ logger = logging.getLogger(__name__)
 
 
 MESSAGE_FROM_PERSON_IN_ROOM = (
-    "You have a message on %(app)s from %(person)s " "in the %(room)s room..."
+    "You have a message on %(app)s from %(person)s in the %(room)s room..."
 )
 MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..."
 MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..."
@@ -55,7 +55,7 @@ MESSAGES_FROM_PERSON_AND_OTHERS = (
     "You have messages on %(app)s from %(person)s and others..."
 )
 INVITE_FROM_PERSON_TO_ROOM = (
-    "%(person)s has invited you to join the " "%(room)s room on %(app)s..."
+    "%(person)s has invited you to join the %(room)s room on %(app)s..."
 )
 INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..."
 
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 15c15a12f5..a23d6f5c75 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -122,7 +122,7 @@ class PreviewUrlResource(DirectServeResource):
                 pattern = entry[attrib]
                 value = getattr(url_tuple, attrib)
                 logger.debug(
-                    "Matching attrib '%s' with value '%s' against" " pattern '%s'",
+                    "Matching attrib '%s' with value '%s' against pattern '%s'",
                     attrib,
                     value,
                     pattern,
diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py
index 415e9c17d8..5736c56032 100644
--- a/synapse/server_notices/consent_server_notices.py
+++ b/synapse/server_notices/consent_server_notices.py
@@ -54,7 +54,7 @@ class ConsentServerNotices(object):
                 )
             if "body" not in self._server_notice_content:
                 raise ConfigError(
-                    "user_consent server_notice_consent must contain a 'body' " "key."
+                    "user_consent server_notice_consent must contain a 'body' key."
                 )
 
             self._consent_uri_builder = ConsentURIBuilder(hs.config)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index ab596fa68d..6b8a9cd89a 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -851,7 +851,7 @@ class SQLBaseStore(object):
             allvalues.update(values)
             latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
 
-        sql = ("INSERT INTO %s (%s) VALUES (%s) " "ON CONFLICT (%s) DO %s") % (
+        sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
             table,
             ", ".join(k for k in allvalues),
             ", ".join("?" for _ in allvalues),
diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py
index 96cd0fb77a..a23744f11c 100644
--- a/synapse/storage/data_stores/main/deviceinbox.py
+++ b/synapse/storage/data_stores/main/deviceinbox.py
@@ -380,7 +380,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
             devices = list(messages_by_device.keys())
             if len(devices) == 1 and devices[0] == "*":
                 # Handle wildcard device_ids.
-                sql = "SELECT device_id FROM devices" " WHERE user_id = ?"
+                sql = "SELECT device_id FROM devices WHERE user_id = ?"
                 txn.execute(sql, (user_id,))
                 message_json = json.dumps(messages_by_device["*"])
                 for row in txn:
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index 073412a78d..d8ad59ad93 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -138,9 +138,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 result.setdefault(user_id, {})[device_id] = None
 
         # get signatures on the device
-        signature_sql = (
-            "SELECT * " "  FROM e2e_cross_signing_signatures " " WHERE %s"
-        ) % (" OR ".join("(" + q + ")" for q in signature_query_clauses))
+        signature_sql = ("SELECT *  FROM e2e_cross_signing_signatures WHERE %s") % (
+            " OR ".join("(" + q + ")" for q in signature_query_clauses)
+        )
 
         txn.execute(signature_sql, signature_query_params)
         rows = self.cursor_to_dict(txn)
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 878f7568a6..627c0b67f1 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -713,9 +713,7 @@ class EventsStore(
 
                 metadata_json = encode_json(event.internal_metadata.get_dict())
 
-                sql = (
-                    "UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
-                )
+                sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?"
                 txn.execute(sql, (metadata_json, event.event_id))
 
                 # Add an entry to the ex_outlier_stream table to replicate the
@@ -732,7 +730,7 @@ class EventsStore(
                     },
                 )
 
-                sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
+                sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
                 txn.execute(sql, (False, event.event_id))
 
                 # Update the event_backward_extremities table now that this
@@ -1479,7 +1477,7 @@ class EventsStore(
 
         # We do joins against events_to_purge for e.g. calculating state
         # groups to purge, etc., so lets make an index.
-        txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)")
+        txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)")
 
         txn.execute("SELECT event_id, should_delete FROM events_to_purge")
         event_rows = txn.fetchall()
diff --git a/synapse/storage/data_stores/main/filtering.py b/synapse/storage/data_stores/main/filtering.py
index a2a2a67927..f05ace299a 100644
--- a/synapse/storage/data_stores/main/filtering.py
+++ b/synapse/storage/data_stores/main/filtering.py
@@ -55,7 +55,7 @@ class FilteringStore(SQLBaseStore):
             if filter_id_response is not None:
                 return filter_id_response[0]
 
-            sql = "SELECT MAX(filter_id) FROM user_filters " "WHERE user_id = ?"
+            sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?"
             txn.execute(sql, (user_localpart,))
             max_id = txn.fetchone()[0]
             if max_id is None:
diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py
index 84b5f3ad5e..0f2887bdce 100644
--- a/synapse/storage/data_stores/main/media_repository.py
+++ b/synapse/storage/data_stores/main/media_repository.py
@@ -337,7 +337,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         if len(media_ids) == 0:
             return
 
-        sql = "DELETE FROM local_media_repository_url_cache" " WHERE media_id = ?"
+        sql = "DELETE FROM local_media_repository_url_cache WHERE media_id = ?"
 
         def _delete_url_cache_txn(txn):
             txn.executemany(sql, [(media_id,) for media_id in media_ids])
@@ -365,11 +365,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             return
 
         def _delete_url_cache_media_txn(txn):
-            sql = "DELETE FROM local_media_repository" " WHERE media_id = ?"
+            sql = "DELETE FROM local_media_repository WHERE media_id = ?"
 
             txn.executemany(sql, [(media_id,) for media_id in media_ids])
 
-            sql = "DELETE FROM local_media_repository_thumbnails" " WHERE media_id = ?"
+            sql = "DELETE FROM local_media_repository_thumbnails WHERE media_id = ?"
 
             txn.executemany(sql, [(media_id,) for media_id in media_ids])
 
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index ee1b2b2bbf..6a594c160c 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -377,9 +377,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         """
 
         def f(txn):
-            sql = (
-                "SELECT name, password_hash FROM users" " WHERE lower(name) = lower(?)"
-            )
+            sql = "SELECT name, password_hash FROM users WHERE lower(name) = lower(?)"
             txn.execute(sql, (user_id,))
             return dict(txn)
 
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py
index 8780fdd989..9ae4a913a1 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/data_stores/main/stream.py
@@ -616,7 +616,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     def _get_max_topological_txn(self, txn, room_id):
         txn.execute(
-            "SELECT MAX(topological_ordering) FROM events" " WHERE room_id = ?",
+            "SELECT MAX(topological_ordering) FROM events WHERE room_id = ?",
             (room_id,),
         )
 
diff --git a/synapse/storage/data_stores/main/tags.py b/synapse/storage/data_stores/main/tags.py
index 10d1887f75..aa24339717 100644
--- a/synapse/storage/data_stores/main/tags.py
+++ b/synapse/storage/data_stores/main/tags.py
@@ -83,9 +83,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
         )
 
         def get_tag_content(txn, tag_ids):
-            sql = (
-                "SELECT tag, content" " FROM room_tags" " WHERE user_id=? AND room_id=?"
-            )
+            sql = "SELECT tag, content FROM room_tags WHERE user_id=? AND room_id=?"
             results = []
             for stream_id, user_id, room_id in tag_ids:
                 txn.execute(sql, (user_id, room_id))
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 2e7753820e..731e1c9d9c 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -447,7 +447,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams)
         # Mark as done.
         cur.execute(
             database_engine.convert_param_style(
-                "INSERT INTO applied_module_schemas (module_name, file)" " VALUES (?,?)"
+                "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)"
             ),
             (modname, name),
         )
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index 02994ab2a5..cd56cd91ed 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -88,9 +88,12 @@ class PaginationConfig(object):
             raise SynapseError(400, "Invalid request.")
 
     def __repr__(self):
-        return (
-            "PaginationConfig(from_tok=%r, to_tok=%r," " direction=%r, limit=%r)"
-        ) % (self.from_token, self.to_token, self.direction, self.limit)
+        return ("PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)") % (
+            self.from_token,
+            self.to_token,
+            self.direction,
+            self.limit,
+        )
 
     def get_source_config(self, source_name):
         keyname = "%s_key" % source_name
-- 
cgit 1.4.1


From 24cc31ee967e5c387a137e22b428dcea17fc9fa5 Mon Sep 17 00:00:00 2001
From: Aaron Raimist 
Date: Thu, 21 Nov 2019 11:38:14 -0600
Subject: Fix link to user_dir_populate.sql in the user directory docs (#6388)

---
 changelog.d/6388.doc   | 1 +
 docs/user_directory.md | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6388.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6388.doc b/changelog.d/6388.doc
new file mode 100644
index 0000000000..c777cb6b8f
--- /dev/null
+++ b/changelog.d/6388.doc
@@ -0,0 +1 @@
+Fix link in the user directory documentation.
diff --git a/docs/user_directory.md b/docs/user_directory.md
index e64aa453cc..37dc71e751 100644
--- a/docs/user_directory.md
+++ b/docs/user_directory.md
@@ -7,7 +7,6 @@ who are present in a publicly viewable room present on the server.
 
 The directory info is stored in various tables, which can (typically after
 DB corruption) get stale or out of sync.  If this happens, for now the
-solution to fix it is to execute the SQL here
-https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
+solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
 and then restart synapse. This should then start a background task to
 flush the current tables and regenerate the directory.
-- 
cgit 1.4.1


From 265c0bd2fe54db7f8a7dab05f41b27ce9a450563 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 22 Nov 2019 19:54:05 +0000
Subject: Add working build command for docker image (#6390)

* Add working build command for docker image

* Add changelog
---
 changelog.d/6390.doc |  1 +
 docker/README.md     | 12 ++++++++++++
 2 files changed, 13 insertions(+)
 create mode 100644 changelog.d/6390.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6390.doc b/changelog.d/6390.doc
new file mode 100644
index 0000000000..093411bec1
--- /dev/null
+++ b/changelog.d/6390.doc
@@ -0,0 +1 @@
+Add build instructions to the docker readme.
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
index 24dfa77dcc..9f112a01d0 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -130,3 +130,15 @@ docker run -it --rm \
 This will generate the same configuration file as the legacy mode used, but
 will store it in `/data/homeserver.yaml` instead of a temporary location. You
 can then use it as shown above at [Running synapse](#running-synapse).
+
+## Building the image
+
+If you need to build the image from a Synapse checkout, use the following `docker
+ build` command from the repo's root:
+ 
+```
+docker build -t matrixdotorg/synapse -f docker/Dockerfile .
+```
+
+You can choose to build a different docker image by changing the value of the `-f` flag to
+point to another Dockerfile.
-- 
cgit 1.4.1


From b7367c339db153824fb47728d3eebe2f944530e6 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 25 Nov 2019 13:26:59 +0000
Subject: Fix exceptions from background database update for event labels.
 (#6407)

Add some exception handling here so that events whose json cannot be parsed are
ignored rather than getting us stuck in a loop.

Fixes #6404.
---
 changelog.d/6407.bugfix                            |  1 +
 .../storage/data_stores/main/events_bg_updates.py  | 43 +++++++++++++---------
 2 files changed, 26 insertions(+), 18 deletions(-)
 create mode 100644 changelog.d/6407.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6407.bugfix b/changelog.d/6407.bugfix
new file mode 100644
index 0000000000..0fdbf2a781
--- /dev/null
+++ b/changelog.d/6407.bugfix
@@ -0,0 +1 @@
+Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions.
diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py
index 0ed59ef48e..aa87f9abc5 100644
--- a/synapse/storage/data_stores/main/events_bg_updates.py
+++ b/synapse/storage/data_stores/main/events_bg_updates.py
@@ -530,24 +530,31 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
             nbrows = 0
             last_row_event_id = ""
             for (event_id, event_json_raw) in results:
-                event_json = json.loads(event_json_raw)
-
-                self._simple_insert_many_txn(
-                    txn=txn,
-                    table="event_labels",
-                    values=[
-                        {
-                            "event_id": event_id,
-                            "label": label,
-                            "room_id": event_json["room_id"],
-                            "topological_ordering": event_json["depth"],
-                        }
-                        for label in event_json["content"].get(
-                            EventContentFields.LABELS, []
-                        )
-                        if isinstance(label, str)
-                    ],
-                )
+                try:
+                    event_json = json.loads(event_json_raw)
+
+                    self._simple_insert_many_txn(
+                        txn=txn,
+                        table="event_labels",
+                        values=[
+                            {
+                                "event_id": event_id,
+                                "label": label,
+                                "room_id": event_json["room_id"],
+                                "topological_ordering": event_json["depth"],
+                            }
+                            for label in event_json["content"].get(
+                                EventContentFields.LABELS, []
+                            )
+                            if isinstance(label, str)
+                        ],
+                    )
+                except Exception as e:
+                    logger.warning(
+                        "Unable to load event %s (no labels will be imported): %s",
+                        event_id,
+                        e,
+                    )
 
                 nbrows += 1
                 last_row_event_id = event_id
-- 
cgit 1.4.1


From f9c9e1f07646262cb064782d4bf427dd1634617f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 25 Nov 2019 13:28:12 +0000
Subject: 1.6.0rc2

---
 CHANGES.md              | 9 +++++++++
 changelog.d/6407.bugfix | 1 -
 synapse/__init__.py     | 2 +-
 3 files changed, 10 insertions(+), 2 deletions(-)
 delete mode 100644 changelog.d/6407.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index f4f61db5d4..d26bc7a86f 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,12 @@
+Synapse 1.6.0rc2 (2019-11-25)
+=============================
+
+Bugfixes
+--------
+
+- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
+
+
 Synapse 1.6.0rc1 (2019-11-20)
 =============================
 
diff --git a/changelog.d/6407.bugfix b/changelog.d/6407.bugfix
deleted file mode 100644
index 0fdbf2a781..0000000000
--- a/changelog.d/6407.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 1d962f5dc8..051c83774e 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.6.0rc1"
+__version__ = "1.6.0rc2"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 9eebd46048d0b34767047b2156760a1467f19ae6 Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Tue, 26 Nov 2019 03:45:50 +1100
Subject: Improve the performance of structured logging (#6322)

---
 changelog.d/6322.misc          |   1 +
 synapse/logging/_structured.py |  14 +++++-
 synapse/logging/_terse_json.py | 106 ++++++++++++++++++++++++++++++-----------
 tests/server.py                |   2 +
 4 files changed, 93 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6322.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6322.misc b/changelog.d/6322.misc
new file mode 100644
index 0000000000..70ef36ca80
--- /dev/null
+++ b/changelog.d/6322.misc
@@ -0,0 +1 @@
+Improve the performance of outputting structured logging.
diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
index 334ddaf39a..ffa7b20ca8 100644
--- a/synapse/logging/_structured.py
+++ b/synapse/logging/_structured.py
@@ -261,6 +261,18 @@ def parse_drain_configs(
             )
 
 
+class StoppableLogPublisher(LogPublisher):
+    """
+    A log publisher that can tell its observers to shut down any external
+    communications.
+    """
+
+    def stop(self):
+        for obs in self._observers:
+            if hasattr(obs, "stop"):
+                obs.stop()
+
+
 def setup_structured_logging(
     hs,
     config,
@@ -336,7 +348,7 @@ def setup_structured_logging(
             # We should never get here, but, just in case, throw an error.
             raise ConfigError("%s drain type cannot be configured" % (observer.type,))
 
-    publisher = LogPublisher(*observers)
+    publisher = StoppableLogPublisher(*observers)
     log_filter = LogLevelFilterPredicate()
 
     for namespace, namespace_config in log_config.get(
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 76ce7d8808..05fc64f409 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -17,25 +17,29 @@
 Log formatters that output terse JSON.
 """
 
+import json
 import sys
+import traceback
 from collections import deque
 from ipaddress import IPv4Address, IPv6Address, ip_address
 from math import floor
-from typing import IO
+from typing import IO, Optional
 
 import attr
-from simplejson import dumps
 from zope.interface import implementer
 
 from twisted.application.internet import ClientService
+from twisted.internet.defer import Deferred
 from twisted.internet.endpoints import (
     HostnameEndpoint,
     TCP4ClientEndpoint,
     TCP6ClientEndpoint,
 )
+from twisted.internet.interfaces import IPushProducer, ITransport
 from twisted.internet.protocol import Factory, Protocol
 from twisted.logger import FileLogObserver, ILogObserver, Logger
-from twisted.python.failure import Failure
+
+_encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":"))
 
 
 def flatten_event(event: dict, metadata: dict, include_time: bool = False):
@@ -141,11 +145,49 @@ def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogOb
 
     def formatEvent(_event: dict) -> str:
         flattened = flatten_event(_event, metadata)
-        return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n"
+        return _encoder.encode(flattened) + "\n"
 
     return FileLogObserver(outFile, formatEvent)
 
 
+@attr.s
+@implementer(IPushProducer)
+class LogProducer(object):
+    """
+    An IPushProducer that writes logs from its buffer to its transport when it
+    is resumed.
+
+    Args:
+        buffer: Log buffer to read logs from.
+        transport: Transport to write to.
+    """
+
+    transport = attr.ib(type=ITransport)
+    _buffer = attr.ib(type=deque)
+    _paused = attr.ib(default=False, type=bool, init=False)
+
+    def pauseProducing(self):
+        self._paused = True
+
+    def stopProducing(self):
+        self._paused = True
+        self._buffer = None
+
+    def resumeProducing(self):
+        self._paused = False
+
+        while self._paused is False and (self._buffer and self.transport.connected):
+            try:
+                event = self._buffer.popleft()
+                self.transport.write(_encoder.encode(event).encode("utf8"))
+                self.transport.write(b"\n")
+            except Exception:
+                # Something has gone wrong writing to the transport -- log it
+                # and break out of the while.
+                traceback.print_exc(file=sys.__stderr__)
+                break
+
+
 @attr.s
 @implementer(ILogObserver)
 class TerseJSONToTCPLogObserver(object):
@@ -165,8 +207,9 @@ class TerseJSONToTCPLogObserver(object):
     metadata = attr.ib(type=dict)
     maximum_buffer = attr.ib(type=int)
     _buffer = attr.ib(default=attr.Factory(deque), type=deque)
-    _writer = attr.ib(default=None)
+    _connection_waiter = attr.ib(default=None, type=Optional[Deferred])
     _logger = attr.ib(default=attr.Factory(Logger))
+    _producer = attr.ib(default=None, type=Optional[LogProducer])
 
     def start(self) -> None:
 
@@ -187,38 +230,43 @@ class TerseJSONToTCPLogObserver(object):
         factory = Factory.forProtocol(Protocol)
         self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor())
         self._service.startService()
+        self._connect()
 
-    def _write_loop(self) -> None:
+    def stop(self):
+        self._service.stopService()
+
+    def _connect(self) -> None:
         """
-        Implement the write loop.
+        Triggers an attempt to connect then write to the remote if not already writing.
         """
-        if self._writer:
+        if self._connection_waiter:
             return
 
-        self._writer = self._service.whenConnected()
+        self._connection_waiter = self._service.whenConnected(failAfterFailures=1)
+
+        @self._connection_waiter.addErrback
+        def fail(r):
+            r.printTraceback(file=sys.__stderr__)
+            self._connection_waiter = None
+            self._connect()
 
-        @self._writer.addBoth
+        @self._connection_waiter.addCallback
         def writer(r):
-            if isinstance(r, Failure):
-                r.printTraceback(file=sys.__stderr__)
-                self._writer = None
-                self.hs.get_reactor().callLater(1, self._write_loop)
+            # We have a connection. If we already have a producer, and its
+            # transport is the same, just trigger a resumeProducing.
+            if self._producer and r.transport is self._producer.transport:
+                self._producer.resumeProducing()
                 return
 
-            try:
-                for event in self._buffer:
-                    r.transport.write(
-                        dumps(event, ensure_ascii=False, separators=(",", ":")).encode(
-                            "utf8"
-                        )
-                    )
-                    r.transport.write(b"\n")
-                self._buffer.clear()
-            except Exception as e:
-                sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),))
-
-            self._writer = False
-            self.hs.get_reactor().callLater(1, self._write_loop)
+            # If the producer is still producing, stop it.
+            if self._producer:
+                self._producer.stopProducing()
+
+            # Make a new producer and start it.
+            self._producer = LogProducer(buffer=self._buffer, transport=r.transport)
+            r.transport.registerProducer(self._producer, True)
+            self._producer.resumeProducing()
+            self._connection_waiter = None
 
     def _handle_pressure(self) -> None:
         """
@@ -277,4 +325,4 @@ class TerseJSONToTCPLogObserver(object):
             self._logger.failure("Failed clearing backpressure")
 
         # Try and write immediately.
-        self._write_loop()
+        self._connect()
diff --git a/tests/server.py b/tests/server.py
index f878aeaada..2b7cf4242e 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -379,6 +379,7 @@ class FakeTransport(object):
 
     disconnecting = False
     disconnected = False
+    connected = True
     buffer = attr.ib(default=b"")
     producer = attr.ib(default=None)
     autoflush = attr.ib(default=True)
@@ -402,6 +403,7 @@ class FakeTransport(object):
                     "FakeTransport: Delaying disconnect until buffer is flushed"
                 )
             else:
+                self.connected = False
                 self.disconnected = True
 
     def abortConnection(self):
-- 
cgit 1.4.1


From c01d5435843ad4af3d520851e86d9938b47b2d12 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 25 Nov 2019 21:03:17 +0000
Subject: Make sure that we close cursors before returning from a query (#6408)

There are lots of words in the comment as to why this is a good idea.

Fixes #6403.
---
 changelog.d/6408.bugfix                      |  1 +
 synapse/storage/_base.py                     | 51 +++++++++++++++++++++++-----
 synapse/storage/data_stores/main/receipts.py |  2 +-
 3 files changed, 44 insertions(+), 10 deletions(-)
 create mode 100644 changelog.d/6408.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6408.bugfix b/changelog.d/6408.bugfix
new file mode 100644
index 0000000000..c9babe599b
--- /dev/null
+++ b/changelog.d/6408.bugfix
@@ -0,0 +1 @@
+Fix an intermittent exception when handling read-receipts.
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 6b8a9cd89a..459901ac60 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -409,16 +409,15 @@ class SQLBaseStore(object):
             i = 0
             N = 5
             while True:
+                cursor = LoggingTransaction(
+                    conn.cursor(),
+                    name,
+                    self.database_engine,
+                    after_callbacks,
+                    exception_callbacks,
+                )
                 try:
-                    txn = conn.cursor()
-                    txn = LoggingTransaction(
-                        txn,
-                        name,
-                        self.database_engine,
-                        after_callbacks,
-                        exception_callbacks,
-                    )
-                    r = func(txn, *args, **kwargs)
+                    r = func(cursor, *args, **kwargs)
                     conn.commit()
                     return r
                 except self.database_engine.module.OperationalError as e:
@@ -456,6 +455,40 @@ class SQLBaseStore(object):
                                 )
                             continue
                     raise
+                finally:
+                    # we're either about to retry with a new cursor, or we're about to
+                    # release the connection. Once we release the connection, it could
+                    # get used for another query, which might do a conn.rollback().
+                    #
+                    # In the latter case, even though that probably wouldn't affect the
+                    # results of this transaction, python's sqlite will reset all
+                    # statements on the connection [1], which will make our cursor
+                    # invalid [2].
+                    #
+                    # In any case, continuing to read rows after commit()ing seems
+                    # dubious from the PoV of ACID transactional semantics
+                    # (sqlite explicitly says that once you commit, you may see rows
+                    # from subsequent updates.)
+                    #
+                    # In psycopg2, cursors are essentially a client-side fabrication -
+                    # all the data is transferred to the client side when the statement
+                    # finishes executing - so in theory we could go on streaming results
+                    # from the cursor, but attempting to do so would make us
+                    # incompatible with sqlite, so let's make sure we're not doing that
+                    # by closing the cursor.
+                    #
+                    # (*named* cursors in psycopg2 are different and are proper server-
+                    # side things, but (a) we don't use them and (b) they are implicitly
+                    # closed by ending the transaction anyway.)
+                    #
+                    # In short, if we haven't finished with the cursor yet, that's a
+                    # problem waiting to bite us.
+                    #
+                    # TL;DR: we're done with the cursor, so we can close it.
+                    #
+                    # [1]: https://github.com/python/cpython/blob/v3.8.0/Modules/_sqlite/connection.c#L465
+                    # [2]: https://github.com/python/cpython/blob/v3.8.0/Modules/_sqlite/cursor.c#L236
+                    cursor.close()
         except Exception as e:
             logger.debug("[TXN FAIL] {%s} %s", name, e)
             raise
diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py
index 0c24430f28..8b17334ff4 100644
--- a/synapse/storage/data_stores/main/receipts.py
+++ b/synapse/storage/data_stores/main/receipts.py
@@ -280,7 +280,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 args.append(limit)
             txn.execute(sql, args)
 
-            return (r[0:5] + (json.loads(r[5]),) for r in txn)
+            return list(r[0:5] + (json.loads(r[5]),) for r in txn)
 
         return self.runInteraction(
             "get_all_updated_receipts", get_all_updated_receipts_txn
-- 
cgit 1.4.1


From 35f9165e96f6261e15aadb439a5d2199bede3c99 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 26 Nov 2019 12:04:48 +0000
Subject: Fixup docs

---
 changelog.d/6332.bugfix             | 2 +-
 synapse/replication/http/devices.py | 6 +++++-
 2 files changed, 6 insertions(+), 2 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix
index b14bd7e43c..67d5170ba0 100644
--- a/changelog.d/6332.bugfix
+++ b/changelog.d/6332.bugfix
@@ -1 +1 @@
-Fix caching devices for remote users when using workers.
+Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices.
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index 795ca7b65e..e32aac0a25 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -21,7 +21,11 @@ logger = logging.getLogger(__name__)
 
 
 class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
-    """Notifies that a user has joined or left the room
+    """Ask master to resync the device list for a user by contacting their
+    server.
+
+    This must happen on master so that the results can be correctly cached in
+    the database and streamed to workers.
 
     Request format:
 
-- 
cgit 1.4.1


From 65d54c5e8c1434eada5d1b670d7db2e37d68d4f1 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 26 Nov 2019 13:10:09 +0000
Subject: Fix phone home stats (#6418)

Fix phone home stats
---
 changelog.d/6418.bugfix   | 1 +
 synapse/app/homeserver.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6418.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6418.bugfix b/changelog.d/6418.bugfix
new file mode 100644
index 0000000000..a1f488d3a2
--- /dev/null
+++ b/changelog.d/6418.bugfix
@@ -0,0 +1 @@
+Fix phone home stats reporting.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 73e2c29d06..883b3fb70b 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -585,7 +585,7 @@ def run(hs):
     def performance_stats_init():
         _stats_process.clear()
         _stats_process.append(
-            (int(hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)))
+            (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF))
         )
 
     def start_phone_stats_home():
-- 
cgit 1.4.1


From b98971e8a437eb3903506eadbefdf6cb2e0853d6 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Tue, 26 Nov 2019 12:15:46 +0000
Subject: 1.6.0

---
 CHANGES.md              | 9 +++++++++
 changelog.d/6418.bugfix | 1 -
 debian/changelog        | 6 ++++++
 synapse/__init__.py     | 2 +-
 4 files changed, 16 insertions(+), 2 deletions(-)
 delete mode 100644 changelog.d/6418.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index d26bc7a86f..42281483b3 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,12 @@
+Synapse 1.6.0 (2019-11-26)
+==========================
+
+Bugfixes
+--------
+
+- Fix phone home stats reporting. ([\#6418](https://github.com/matrix-org/synapse/issues/6418))
+
+
 Synapse 1.6.0rc2 (2019-11-25)
 =============================
 
diff --git a/changelog.d/6418.bugfix b/changelog.d/6418.bugfix
deleted file mode 100644
index a1f488d3a2..0000000000
--- a/changelog.d/6418.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix phone home stats reporting.
diff --git a/debian/changelog b/debian/changelog
index c4415f460a..82dae017f1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.6.0) stable; urgency=medium
+
+  * New synapse release 1.6.0.
+
+ -- Synapse Packaging team   Tue, 26 Nov 2019 12:15:40 +0000
+
 matrix-synapse-py3 (1.5.1) stable; urgency=medium
 
   * New synapse release 1.5.1.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 051c83774e..53eedc0048 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.6.0rc2"
+__version__ = "1.6.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From ba110a2030274dc785b37b1d5ad20acc4de9c612 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 26 Nov 2019 15:54:10 +0000
Subject: Newsfile

---
 changelog.d/6420.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6420.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6420.bugfix b/changelog.d/6420.bugfix
new file mode 100644
index 0000000000..aef47cccaa
--- /dev/null
+++ b/changelog.d/6420.bugfix
@@ -0,0 +1 @@
+Fix broken guest registration when there are existing blocks of numeric user IDs.
-- 
cgit 1.4.1


From ef1a85e7733bc1979f48357dd59b638110285075 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 26 Nov 2019 18:10:50 +0000
Subject: Fix startup error when http proxy is defined. (#6421)

Guess I only tested this on python 2 :/

Fixes #6419.
---
 changelog.d/6421.bugfix                       | 1 +
 synapse/rest/media/v1/preview_url_resource.py | 4 ++--
 synapse/server.py                             | 4 ++--
 3 files changed, 5 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6421.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix
new file mode 100644
index 0000000000..7969f7f71d
--- /dev/null
+++ b/changelog.d/6421.bugfix
@@ -0,0 +1 @@
+Fix startup error when http proxy is defined.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index a23d6f5c75..fb0d02aa83 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -77,8 +77,8 @@ class PreviewUrlResource(DirectServeResource):
             treq_args={"browser_like_redirects": True},
             ip_whitelist=hs.config.url_preview_ip_range_whitelist,
             ip_blacklist=hs.config.url_preview_ip_range_blacklist,
-            http_proxy=os.getenv("http_proxy"),
-            https_proxy=os.getenv("HTTPS_PROXY"),
+            http_proxy=os.getenvb(b"http_proxy"),
+            https_proxy=os.getenvb(b"HTTPS_PROXY"),
         )
         self.media_repo = media_repo
         self.primary_base_path = media_repo.primary_base_path
diff --git a/synapse/server.py b/synapse/server.py
index 90c3b072e8..be9af7f986 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -318,8 +318,8 @@ class HomeServer(object):
     def build_proxied_http_client(self):
         return SimpleHttpClient(
             self,
-            http_proxy=os.getenv("http_proxy"),
-            https_proxy=os.getenv("HTTPS_PROXY"),
+            http_proxy=os.getenvb(b"http_proxy"),
+            https_proxy=os.getenvb(b"HTTPS_PROXY"),
         )
 
     def build_room_creation_handler(self):
-- 
cgit 1.4.1


From ce578031f4d0fe6f1eb26de4cb3d30a4175468db Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 26 Nov 2019 18:42:27 +0000
Subject: Remove assertion and provide a clear warning on startup for missing
 public_baseurl (#6379)

---
 changelog.d/6379.misc                       | 1 +
 synapse/config/emailconfig.py               | 2 ++
 synapse/config/registration.py              | 7 +++++++
 tests/rest/client/v2_alpha/test_register.py | 1 +
 4 files changed, 11 insertions(+)
 create mode 100644 changelog.d/6379.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6379.misc b/changelog.d/6379.misc
new file mode 100644
index 0000000000..725c2e7d87
--- /dev/null
+++ b/changelog.d/6379.misc
@@ -0,0 +1 @@
+Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary.
\ No newline at end of file
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 43fad0bf8b..ac1724045f 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -146,6 +146,8 @@ class EmailConfig(Config):
                 if k not in email_config:
                     missing.append("email." + k)
 
+            # public_baseurl is required to build password reset and validation links that
+            # will be emailed to users
             if config.get("public_baseurl") is None:
                 missing.append("public_baseurl")
 
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 1f6dac69da..ee9614c5f7 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -106,6 +106,13 @@ class RegistrationConfig(Config):
         account_threepid_delegates = config.get("account_threepid_delegates") or {}
         self.account_threepid_delegate_email = account_threepid_delegates.get("email")
         self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
+        if self.account_threepid_delegate_msisdn and not self.public_baseurl:
+            raise ConfigError(
+                "The configuration option `public_baseurl` is required if "
+                "`account_threepid_delegate.msisdn` is set, such that "
+                "clients know where to submit validation tokens to. Please "
+                "configure `public_baseurl`."
+            )
 
         self.default_identity_server = config.get("default_identity_server")
         self.allow_guest_access = config.get("allow_guest_access", False)
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index dab87e5edf..c0d0d2b44e 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -203,6 +203,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
 
     @unittest.override_config(
         {
+            "public_baseurl": "https://test_server",
             "enable_registration_captcha": True,
             "user_consent": {
                 "version": "1",
-- 
cgit 1.4.1


From 9b9ee75666ffca8e14b44efec6c360c2ccbcf615 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 26 Nov 2019 18:10:50 +0000
Subject: Fix startup error when http proxy is defined. (#6421)

Guess I only tested this on python 2 :/

Fixes #6419.
---
 changelog.d/6421.bugfix                       | 1 +
 synapse/rest/media/v1/preview_url_resource.py | 4 ++--
 synapse/server.py                             | 4 ++--
 3 files changed, 5 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6421.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix
new file mode 100644
index 0000000000..7969f7f71d
--- /dev/null
+++ b/changelog.d/6421.bugfix
@@ -0,0 +1 @@
+Fix startup error when http proxy is defined.
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 15c15a12f5..87343d9db9 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -77,8 +77,8 @@ class PreviewUrlResource(DirectServeResource):
             treq_args={"browser_like_redirects": True},
             ip_whitelist=hs.config.url_preview_ip_range_whitelist,
             ip_blacklist=hs.config.url_preview_ip_range_blacklist,
-            http_proxy=os.getenv("http_proxy"),
-            https_proxy=os.getenv("HTTPS_PROXY"),
+            http_proxy=os.getenvb(b"http_proxy"),
+            https_proxy=os.getenvb(b"HTTPS_PROXY"),
         )
         self.media_repo = media_repo
         self.primary_base_path = media_repo.primary_base_path
diff --git a/synapse/server.py b/synapse/server.py
index 90c3b072e8..be9af7f986 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -318,8 +318,8 @@ class HomeServer(object):
     def build_proxied_http_client(self):
         return SimpleHttpClient(
             self,
-            http_proxy=os.getenv("http_proxy"),
-            https_proxy=os.getenv("HTTPS_PROXY"),
+            http_proxy=os.getenvb(b"http_proxy"),
+            https_proxy=os.getenvb(b"HTTPS_PROXY"),
         )
 
     def build_room_creation_handler(self):
-- 
cgit 1.4.1


From 6f4a63df0044a304109f7d6958be94262558a7cf Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 27 Nov 2019 18:18:33 +0000
Subject: Add more tests to the worker blacklist (#6429)

---
 .buildkite/worker-blacklist | 29 +++++++++++++++++++++++++++++
 changelog.d/6429.misc       |  1 +
 2 files changed, 30 insertions(+)
 create mode 100644 changelog.d/6429.misc

(limited to 'changelog.d')

diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist
index cda5c84e94..d7908af177 100644
--- a/.buildkite/worker-blacklist
+++ b/.buildkite/worker-blacklist
@@ -28,3 +28,32 @@ User sees updates to presence from other users in the incremental sync.
 Gapped incremental syncs include all state changes
 
 Old members are included in gappy incr LL sync if they start speaking
+
+# new failures as of https://github.com/matrix-org/sytest/pull/732
+Device list doesn't change if remote server is down
+Remote servers cannot set power levels in rooms without existing powerlevels
+Remote servers should reject attempts by non-creators to set the power levels
+
+# new failures as of https://github.com/matrix-org/sytest/pull/753
+GET /rooms/:room_id/messages returns a message
+GET /rooms/:room_id/messages lazy loads members correctly
+Read receipts are sent as events
+Only original members of the room can see messages from erased users
+Device deletion propagates over federation
+If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes
+Changing user-signing key notifies local users
+Newly updated tags appear in an incremental v2 /sync
+Server correctly handles incoming m.device_list_update
+Local device key changes get to remote servers with correct prev_id
+AS-ghosted users can use rooms via AS
+Ghost user must register before joining room
+Test that a message is pushed
+Invites are pushed
+Rooms with aliases are correctly named in pushed
+Rooms with names are correctly named in pushed
+Rooms with canonical alias are correctly named in pushed
+Rooms with many users are correctly pushed
+Don't get pushed for rooms you've muted
+Rejected events are not pushed
+Test that rejected pushers are removed.
+Events come down the correct room
diff --git a/changelog.d/6429.misc b/changelog.d/6429.misc
new file mode 100644
index 0000000000..4b32cdeac6
--- /dev/null
+++ b/changelog.d/6429.misc
@@ -0,0 +1 @@
+Add more tests to the blacklist when running in worker mode.
-- 
cgit 1.4.1


From 0d27aba900136514a8801b902f9a8ac69150e2c0 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Wed, 27 Nov 2019 16:14:44 -0500
Subject: add etag and count to key backup endpoints (#5858)

---
 changelog.d/5858.feature                           |   1 +
 synapse/handlers/e2e_room_keys.py                  | 130 +++++++-----
 synapse/rest/client/v2_alpha/room_keys.py          |   8 +-
 synapse/storage/data_stores/main/e2e_room_keys.py  | 226 +++++++++++++++------
 .../main/schema/delta/56/room_key_etag.sql         |  17 ++
 tests/handlers/test_e2e_room_keys.py               |  31 +++
 tests/storage/test_e2e_room_keys.py                |   8 +-
 7 files changed, 297 insertions(+), 124 deletions(-)
 create mode 100644 changelog.d/5858.feature
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql

(limited to 'changelog.d')

diff --git a/changelog.d/5858.feature b/changelog.d/5858.feature
new file mode 100644
index 0000000000..55ee93051e
--- /dev/null
+++ b/changelog.d/5858.feature
@@ -0,0 +1 @@
+Add etag and count fields to key backup endpoints to help clients guess if there are new keys.
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 0cea445f0d..f1b4424a02 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2017, 2018 New Vector Ltd
+# Copyright 2019 Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -103,14 +104,35 @@ class E2eRoomKeysHandler(object):
                 rooms
             session_id(string): session ID to delete keys for, for None to delete keys
                 for all sessions
+        Raises:
+            NotFoundError: if the backup version does not exist
         Returns:
-            A deferred of the deletion transaction
+            A dict containing the count and etag for the backup version
         """
 
         # lock for consistency with uploading
         with (yield self._upload_linearizer.queue(user_id)):
+            # make sure the backup version exists
+            try:
+                version_info = yield self.store.get_e2e_room_keys_version_info(
+                    user_id, version
+                )
+            except StoreError as e:
+                if e.code == 404:
+                    raise NotFoundError("Unknown backup version")
+                else:
+                    raise
+
             yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
 
+            version_etag = version_info["etag"] + 1
+            yield self.store.update_e2e_room_keys_version(
+                user_id, version, None, version_etag
+            )
+
+            count = yield self.store.count_e2e_room_keys(user_id, version)
+            return {"etag": str(version_etag), "count": count}
+
     @trace
     @defer.inlineCallbacks
     def upload_room_keys(self, user_id, version, room_keys):
@@ -138,6 +160,9 @@ class E2eRoomKeysHandler(object):
             }
         }
 
+        Returns:
+            A dict containing the count and etag for the backup version
+
         Raises:
             NotFoundError: if there are no versions defined
             RoomKeysVersionError: if the uploaded version is not the current version
@@ -171,59 +196,62 @@ class E2eRoomKeysHandler(object):
                     else:
                         raise
 
-            # go through the room_keys.
-            # XXX: this should/could be done concurrently, given we're in a lock.
+            # Fetch any existing room keys for the sessions that have been
+            # submitted.  Then compare them with the submitted keys.  If the
+            # key is new, insert it; if the key should be updated, then update
+            # it; otherwise, drop it.
+            existing_keys = yield self.store.get_e2e_room_keys_multi(
+                user_id, version, room_keys["rooms"]
+            )
+            to_insert = []  # batch the inserts together
+            changed = False  # if anything has changed, we need to update the etag
             for room_id, room in iteritems(room_keys["rooms"]):
-                for session_id, session in iteritems(room["sessions"]):
-                    yield self._upload_room_key(
-                        user_id, version, room_id, session_id, session
+                for session_id, room_key in iteritems(room["sessions"]):
+                    log_kv(
+                        {
+                            "message": "Trying to upload room key",
+                            "room_id": room_id,
+                            "session_id": session_id,
+                            "user_id": user_id,
+                        }
                     )
-
-    @defer.inlineCallbacks
-    def _upload_room_key(self, user_id, version, room_id, session_id, room_key):
-        """Upload a given room_key for a given room and session into a given
-        version of the backup.  Merges the key with any which might already exist.
-
-        Args:
-            user_id(str): the user whose backup we're setting
-            version(str): the version ID of the backup we're updating
-            room_id(str): the ID of the room whose keys we're setting
-            session_id(str): the session whose room_key we're setting
-            room_key(dict): the room_key being set
-        """
-        log_kv(
-            {
-                "message": "Trying to upload room key",
-                "room_id": room_id,
-                "session_id": session_id,
-                "user_id": user_id,
-            }
-        )
-        # get the room_key for this particular row
-        current_room_key = None
-        try:
-            current_room_key = yield self.store.get_e2e_room_key(
-                user_id, version, room_id, session_id
-            )
-        except StoreError as e:
-            if e.code == 404:
-                log_kv(
-                    {
-                        "message": "Room key not found.",
-                        "room_id": room_id,
-                        "user_id": user_id,
-                    }
+                    current_room_key = existing_keys.get(room_id, {}).get(session_id)
+                    if current_room_key:
+                        if self._should_replace_room_key(current_room_key, room_key):
+                            log_kv({"message": "Replacing room key."})
+                            # updates are done one at a time in the DB, so send
+                            # updates right away rather than batching them up,
+                            # like we do with the inserts
+                            yield self.store.update_e2e_room_key(
+                                user_id, version, room_id, session_id, room_key
+                            )
+                            changed = True
+                        else:
+                            log_kv({"message": "Not replacing room_key."})
+                    else:
+                        log_kv(
+                            {
+                                "message": "Room key not found.",
+                                "room_id": room_id,
+                                "user_id": user_id,
+                            }
+                        )
+                        log_kv({"message": "Replacing room key."})
+                        to_insert.append((room_id, session_id, room_key))
+                        changed = True
+
+            if len(to_insert):
+                yield self.store.add_e2e_room_keys(user_id, version, to_insert)
+
+            version_etag = version_info["etag"]
+            if changed:
+                version_etag = version_etag + 1
+                yield self.store.update_e2e_room_keys_version(
+                    user_id, version, None, version_etag
                 )
-            else:
-                raise
 
-        if self._should_replace_room_key(current_room_key, room_key):
-            log_kv({"message": "Replacing room key."})
-            yield self.store.set_e2e_room_key(
-                user_id, version, room_id, session_id, room_key
-            )
-        else:
-            log_kv({"message": "Not replacing room_key."})
+            count = yield self.store.count_e2e_room_keys(user_id, version)
+            return {"etag": str(version_etag), "count": count}
 
     @staticmethod
     def _should_replace_room_key(current_room_key, room_key):
@@ -314,6 +342,8 @@ class E2eRoomKeysHandler(object):
                     raise NotFoundError("Unknown backup version")
                 else:
                     raise
+
+            res["count"] = yield self.store.count_e2e_room_keys(user_id, res["version"])
             return res
 
     @trace
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index d596786430..d83ac8e3c5 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -134,8 +134,8 @@ class RoomKeysServlet(RestServlet):
         if room_id:
             body = {"rooms": {room_id: body}}
 
-        yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
-        return 200, {}
+        ret = yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
+        return 200, ret
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, session_id):
@@ -239,10 +239,10 @@ class RoomKeysServlet(RestServlet):
         user_id = requester.user.to_string()
         version = parse_string(request, "version")
 
-        yield self.e2e_room_keys_handler.delete_room_keys(
+        ret = yield self.e2e_room_keys_handler.delete_room_keys(
             user_id, version, room_id, session_id
         )
-        return 200, {}
+        return 200, ret
 
 
 class RoomKeysNewVersionServlet(RestServlet):
diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/data_stores/main/e2e_room_keys.py
index 1cbbae5b63..113224fd7c 100644
--- a/synapse/storage/data_stores/main/e2e_room_keys.py
+++ b/synapse/storage/data_stores/main/e2e_room_keys.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2017 New Vector Ltd
+# Copyright 2019 Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,49 +25,8 @@ from synapse.storage._base import SQLBaseStore
 
 class EndToEndRoomKeyStore(SQLBaseStore):
     @defer.inlineCallbacks
-    def get_e2e_room_key(self, user_id, version, room_id, session_id):
-        """Get the encrypted E2E room key for a given session from a given
-        backup version of room_keys.  We only store the 'best' room key for a given
-        session at a given time, as determined by the handler.
-
-        Args:
-            user_id(str): the user whose backup we're querying
-            version(str): the version ID of the backup for the set of keys we're querying
-            room_id(str): the ID of the room whose keys we're querying.
-                This is a bit redundant as it's implied by the session_id, but
-                we include for consistency with the rest of the API.
-            session_id(str): the session whose room_key we're querying.
-
-        Returns:
-            A deferred dict giving the session_data and message metadata for
-            this room key.
-        """
-
-        row = yield self._simple_select_one(
-            table="e2e_room_keys",
-            keyvalues={
-                "user_id": user_id,
-                "version": version,
-                "room_id": room_id,
-                "session_id": session_id,
-            },
-            retcols=(
-                "first_message_index",
-                "forwarded_count",
-                "is_verified",
-                "session_data",
-            ),
-            desc="get_e2e_room_key",
-        )
-
-        row["session_data"] = json.loads(row["session_data"])
-
-        return row
-
-    @defer.inlineCallbacks
-    def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key):
-        """Replaces or inserts the encrypted E2E room key for a given session in
-        a given backup
+    def update_e2e_room_key(self, user_id, version, room_id, session_id, room_key):
+        """Replaces the encrypted E2E room key for a given session in a given backup
 
         Args:
             user_id(str): the user whose backup we're setting
@@ -78,7 +38,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             StoreError
         """
 
-        yield self._simple_upsert(
+        yield self._simple_update_one(
             table="e2e_room_keys",
             keyvalues={
                 "user_id": user_id,
@@ -86,21 +46,51 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 "room_id": room_id,
                 "session_id": session_id,
             },
-            values={
+            updatevalues={
                 "first_message_index": room_key["first_message_index"],
                 "forwarded_count": room_key["forwarded_count"],
                 "is_verified": room_key["is_verified"],
                 "session_data": json.dumps(room_key["session_data"]),
             },
-            lock=False,
+            desc="update_e2e_room_key",
         )
-        log_kv(
-            {
-                "message": "Set room key",
-                "room_id": room_id,
-                "session_id": session_id,
-                "room_key": room_key,
-            }
+
+    @defer.inlineCallbacks
+    def add_e2e_room_keys(self, user_id, version, room_keys):
+        """Bulk add room keys to a given backup.
+
+        Args:
+            user_id (str): the user whose backup we're adding to
+            version (str): the version ID of the backup for the set of keys we're adding to
+            room_keys (iterable[(str, str, dict)]): the keys to add, in the form
+                (roomID, sessionID, keyData)
+        """
+
+        values = []
+        for (room_id, session_id, room_key) in room_keys:
+            values.append(
+                {
+                    "user_id": user_id,
+                    "version": version,
+                    "room_id": room_id,
+                    "session_id": session_id,
+                    "first_message_index": room_key["first_message_index"],
+                    "forwarded_count": room_key["forwarded_count"],
+                    "is_verified": room_key["is_verified"],
+                    "session_data": json.dumps(room_key["session_data"]),
+                }
+            )
+            log_kv(
+                {
+                    "message": "Set room key",
+                    "room_id": room_id,
+                    "session_id": session_id,
+                    "room_key": room_key,
+                }
+            )
+
+        yield self._simple_insert_many(
+            table="e2e_room_keys", values=values, desc="add_e2e_room_keys"
         )
 
     @trace
@@ -110,11 +100,11 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         room, or a given session.
 
         Args:
-            user_id(str): the user whose backup we're querying
-            version(str): the version ID of the backup for the set of keys we're querying
-            room_id(str): Optional. the ID of the room whose keys we're querying, if any.
+            user_id (str): the user whose backup we're querying
+            version (str): the version ID of the backup for the set of keys we're querying
+            room_id (str): Optional. the ID of the room whose keys we're querying, if any.
                 If not specified, we return the keys for all the rooms in the backup.
-            session_id(str): Optional. the session whose room_key we're querying, if any.
+            session_id (str): Optional. the session whose room_key we're querying, if any.
                 If specified, we also require the room_id to be specified.
                 If not specified, we return all the keys in this version of
                 the backup (or for the specified room)
@@ -162,6 +152,95 @@ class EndToEndRoomKeyStore(SQLBaseStore):
 
         return sessions
 
+    def get_e2e_room_keys_multi(self, user_id, version, room_keys):
+        """Get multiple room keys at a time.  The difference between this function and
+        get_e2e_room_keys is that this function can be used to retrieve
+        multiple specific keys at a time, whereas get_e2e_room_keys is used for
+        getting all the keys in a backup version, all the keys for a room, or a
+        specific key.
+
+        Args:
+            user_id (str): the user whose backup we're querying
+            version (str): the version ID of the backup we're querying about
+            room_keys (dict[str, dict[str, iterable[str]]]): a map from
+                room ID -> {"session": [session ids]} indicating the session IDs
+                that we want to query
+
+        Returns:
+           Deferred[dict[str, dict[str, dict]]]: a map of room IDs to session IDs to room key
+        """
+
+        return self.runInteraction(
+            "get_e2e_room_keys_multi",
+            self._get_e2e_room_keys_multi_txn,
+            user_id,
+            version,
+            room_keys,
+        )
+
+    @staticmethod
+    def _get_e2e_room_keys_multi_txn(txn, user_id, version, room_keys):
+        if not room_keys:
+            return {}
+
+        where_clauses = []
+        params = [user_id, version]
+        for room_id, room in room_keys.items():
+            sessions = list(room["sessions"])
+            if not sessions:
+                continue
+            params.append(room_id)
+            params.extend(sessions)
+            where_clauses.append(
+                "(room_id = ? AND session_id IN (%s))"
+                % (",".join(["?" for _ in sessions]),)
+            )
+
+        # check if we're actually querying something
+        if not where_clauses:
+            return {}
+
+        sql = """
+        SELECT room_id, session_id, first_message_index, forwarded_count,
+               is_verified, session_data
+        FROM e2e_room_keys
+        WHERE user_id = ? AND version = ? AND (%s)
+        """ % (
+            " OR ".join(where_clauses)
+        )
+
+        txn.execute(sql, params)
+
+        ret = {}
+
+        for row in txn:
+            room_id = row[0]
+            session_id = row[1]
+            ret.setdefault(room_id, {})
+            ret[room_id][session_id] = {
+                "first_message_index": row[2],
+                "forwarded_count": row[3],
+                "is_verified": row[4],
+                "session_data": json.loads(row[5]),
+            }
+
+        return ret
+
+    def count_e2e_room_keys(self, user_id, version):
+        """Get the number of keys in a backup version.
+
+        Args:
+            user_id (str): the user whose backup we're querying
+            version (str): the version ID of the backup we're querying about
+        """
+
+        return self._simple_select_one_onecol(
+            table="e2e_room_keys",
+            keyvalues={"user_id": user_id, "version": version},
+            retcol="COUNT(*)",
+            desc="count_e2e_room_keys",
+        )
+
     @trace
     @defer.inlineCallbacks
     def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
@@ -219,6 +298,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 version(str)
                 algorithm(str)
                 auth_data(object): opaque dict supplied by the client
+                etag(int): tag of the keys in the backup
         """
 
         def _get_e2e_room_keys_version_info_txn(txn):
@@ -236,10 +316,12 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 txn,
                 table="e2e_room_keys_versions",
                 keyvalues={"user_id": user_id, "version": this_version, "deleted": 0},
-                retcols=("version", "algorithm", "auth_data"),
+                retcols=("version", "algorithm", "auth_data", "etag"),
             )
             result["auth_data"] = json.loads(result["auth_data"])
             result["version"] = str(result["version"])
+            if result["etag"] is None:
+                result["etag"] = 0
             return result
 
         return self.runInteraction(
@@ -288,21 +370,33 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         )
 
     @trace
-    def update_e2e_room_keys_version(self, user_id, version, info):
+    def update_e2e_room_keys_version(
+        self, user_id, version, info=None, version_etag=None
+    ):
         """Update a given backup version
 
         Args:
             user_id(str): the user whose backup version we're updating
             version(str): the version ID of the backup version we're updating
-            info(dict): the new backup version info to store
+            info (dict): the new backup version info to store.  If None, then
+                the backup version info is not updated
+            version_etag (Optional[int]): etag of the keys in the backup.  If
+                None, then the etag is not updated
         """
+        updatevalues = {}
 
-        return self._simple_update(
-            table="e2e_room_keys_versions",
-            keyvalues={"user_id": user_id, "version": version},
-            updatevalues={"auth_data": json.dumps(info["auth_data"])},
-            desc="update_e2e_room_keys_version",
-        )
+        if info is not None and "auth_data" in info:
+            updatevalues["auth_data"] = json.dumps(info["auth_data"])
+        if version_etag is not None:
+            updatevalues["etag"] = version_etag
+
+        if updatevalues:
+            return self._simple_update(
+                table="e2e_room_keys_versions",
+                keyvalues={"user_id": user_id, "version": version},
+                updatevalues=updatevalues,
+                desc="update_e2e_room_keys_version",
+            )
 
     @trace
     def delete_e2e_room_keys_version(self, user_id, version=None):
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql b/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql
new file mode 100644
index 0000000000..7d70dd071e
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- store the current etag of backup version
+ALTER TABLE e2e_room_keys_versions ADD COLUMN etag BIGINT;
diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py
index 0bb96674a2..70f172eb02 100644
--- a/tests/handlers/test_e2e_room_keys.py
+++ b/tests/handlers/test_e2e_room_keys.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2016 OpenMarket Ltd
 # Copyright 2017 New Vector Ltd
+# Copyright 2019 Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -94,23 +95,29 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
 
         # check we can retrieve it as the current version
         res = yield self.handler.get_version_info(self.local_user)
+        version_etag = res["etag"]
+        del res["etag"]
         self.assertDictEqual(
             res,
             {
                 "version": "1",
                 "algorithm": "m.megolm_backup.v1",
                 "auth_data": "first_version_auth_data",
+                "count": 0,
             },
         )
 
         # check we can retrieve it as a specific version
         res = yield self.handler.get_version_info(self.local_user, "1")
+        self.assertEqual(res["etag"], version_etag)
+        del res["etag"]
         self.assertDictEqual(
             res,
             {
                 "version": "1",
                 "algorithm": "m.megolm_backup.v1",
                 "auth_data": "first_version_auth_data",
+                "count": 0,
             },
         )
 
@@ -126,12 +133,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
 
         # check we can retrieve it as the current version
         res = yield self.handler.get_version_info(self.local_user)
+        del res["etag"]
         self.assertDictEqual(
             res,
             {
                 "version": "2",
                 "algorithm": "m.megolm_backup.v1",
                 "auth_data": "second_version_auth_data",
+                "count": 0,
             },
         )
 
@@ -158,12 +167,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
 
         # check we can retrieve it as the current version
         res = yield self.handler.get_version_info(self.local_user)
+        del res["etag"]
         self.assertDictEqual(
             res,
             {
                 "algorithm": "m.megolm_backup.v1",
                 "auth_data": "revised_first_version_auth_data",
                 "version": version,
+                "count": 0,
             },
         )
 
@@ -207,12 +218,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
 
         # check we can retrieve it as the current version
         res = yield self.handler.get_version_info(self.local_user)
+        del res["etag"]  # etag is opaque, so don't test its contents
         self.assertDictEqual(
             res,
             {
                 "algorithm": "m.megolm_backup.v1",
                 "auth_data": "revised_first_version_auth_data",
                 "version": version,
+                "count": 0,
             },
         )
 
@@ -409,6 +422,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
 
         yield self.handler.upload_room_keys(self.local_user, version, room_keys)
 
+        # get the etag to compare to future versions
+        res = yield self.handler.get_version_info(self.local_user)
+        backup_etag = res["etag"]
+        self.assertEqual(res["count"], 1)
+
         new_room_keys = copy.deepcopy(room_keys)
         new_room_key = new_room_keys["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"]
 
@@ -423,6 +441,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             "SSBBTSBBIEZJU0gK",
         )
 
+        # the etag should be the same since the session did not change
+        res = yield self.handler.get_version_info(self.local_user)
+        self.assertEqual(res["etag"], backup_etag)
+
         # test that marking the session as verified however /does/ replace it
         new_room_key["is_verified"] = True
         yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
@@ -432,6 +454,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             res["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"]["session_data"], "new"
         )
 
+        # the etag should NOT be equal now, since the key changed
+        res = yield self.handler.get_version_info(self.local_user)
+        self.assertNotEqual(res["etag"], backup_etag)
+        backup_etag = res["etag"]
+
         # test that a session with a higher forwarded_count doesn't replace one
         # with a lower forwarding count
         new_room_key["forwarded_count"] = 2
@@ -443,6 +470,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase):
             res["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"]["session_data"], "new"
         )
 
+        # the etag should be the same since the session did not change
+        res = yield self.handler.get_version_info(self.local_user)
+        self.assertEqual(res["etag"], backup_etag)
+
         # TODO: check edge cases as well as the common variations here
 
     @defer.inlineCallbacks
diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py
index d128fde441..35dafbb904 100644
--- a/tests/storage/test_e2e_room_keys.py
+++ b/tests/storage/test_e2e_room_keys.py
@@ -39,8 +39,8 @@ class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
         )
 
         self.get_success(
-            self.store.set_e2e_room_key(
-                "user_id", version1, "room", "session", room_key
+            self.store.add_e2e_room_keys(
+                "user_id", version1, [("room", "session", room_key)]
             )
         )
 
@@ -51,8 +51,8 @@ class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
         )
 
         self.get_success(
-            self.store.set_e2e_room_key(
-                "user_id", version2, "room", "session", room_key
+            self.store.add_e2e_room_keys(
+                "user_id", version2, [("room", "session", room_key)]
             )
         )
 
-- 
cgit 1.4.1


From 0f87b912aba7e678041632bc9a6d1f7c2d24342c Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Thu, 28 Nov 2019 08:54:07 +1100
Subject: Implementation of MSC2314 (#6176)

---
 changelog.d/6176.feature                   |  1 +
 synapse/federation/federation_server.py    | 26 ++++++++----
 synapse/federation/transport/server.py     |  6 +--
 sytest-blacklist                           |  6 ++-
 tests/federation/test_complexity.py        | 28 ++----------
 tests/federation/test_federation_sender.py |  4 +-
 tests/federation/test_federation_server.py | 63 +++++++++++++++++++++++++++
 tests/handlers/test_typing.py              |  3 ++
 tests/replication/slave/storage/_base.py   |  3 ++
 tests/replication/tcp/streams/_base.py     |  4 ++
 tests/storage/test_roommember.py           | 26 +-----------
 tests/unittest.py                          | 68 +++++++++++++++++++++++++++++-
 tests/utils.py                             |  1 +
 13 files changed, 174 insertions(+), 65 deletions(-)
 create mode 100644 changelog.d/6176.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6176.feature b/changelog.d/6176.feature
new file mode 100644
index 0000000000..3c66d689d4
--- /dev/null
+++ b/changelog.d/6176.feature
@@ -0,0 +1 @@
+Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d942d77a72..84d4eca041 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
 # Copyright 2018 New Vector Ltd
+# Copyright 2019 Matrix.org Federation C.I.C
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -73,6 +74,7 @@ class FederationServer(FederationBase):
 
         self.auth = hs.get_auth()
         self.handler = hs.get_handlers().federation_handler
+        self.state = hs.get_state_handler()
 
         self._server_linearizer = Linearizer("fed_server")
         self._transaction_linearizer = Linearizer("fed_txn_handler")
@@ -264,9 +266,6 @@ class FederationServer(FederationBase):
         await self.registry.on_edu(edu_type, origin, content)
 
     async def on_context_state_request(self, origin, room_id, event_id):
-        if not event_id:
-            raise NotImplementedError("Specify an event")
-
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, room_id)
 
@@ -280,13 +279,18 @@ class FederationServer(FederationBase):
         # - but that's non-trivial to get right, and anyway somewhat defeats
         # the point of the linearizer.
         with (await self._server_linearizer.queue((origin, room_id))):
-            resp = await self._state_resp_cache.wrap(
-                (room_id, event_id),
-                self._on_context_state_request_compute,
-                room_id,
-                event_id,
+            resp = dict(
+                await self._state_resp_cache.wrap(
+                    (room_id, event_id),
+                    self._on_context_state_request_compute,
+                    room_id,
+                    event_id,
+                )
             )
 
+        room_version = await self.store.get_room_version(room_id)
+        resp["room_version"] = room_version
+
         return 200, resp
 
     async def on_state_ids_request(self, origin, room_id, event_id):
@@ -306,7 +310,11 @@ class FederationServer(FederationBase):
         return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
 
     async def _on_context_state_request_compute(self, room_id, event_id):
-        pdus = await self.handler.get_state_for_pdu(room_id, event_id)
+        if event_id:
+            pdus = await self.handler.get_state_for_pdu(room_id, event_id)
+        else:
+            pdus = (await self.state.get_current_state(room_id)).values()
+
         auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
 
         return {
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 09baa9c57d..fefc789c85 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -421,7 +421,7 @@ class FederationEventServlet(BaseFederationServlet):
         return await self.handler.on_pdu_request(origin, event_id)
 
 
-class FederationStateServlet(BaseFederationServlet):
+class FederationStateV1Servlet(BaseFederationServlet):
     PATH = "/state/(?P[^/]*)/?"
 
     # This is when someone asks for all data for a given context.
@@ -429,7 +429,7 @@ class FederationStateServlet(BaseFederationServlet):
         return await self.handler.on_context_state_request(
             origin,
             context,
-            parse_string_from_args(query, "event_id", None, required=True),
+            parse_string_from_args(query, "event_id", None, required=False),
         )
 
 
@@ -1360,7 +1360,7 @@ class RoomComplexityServlet(BaseFederationServlet):
 FEDERATION_SERVLET_CLASSES = (
     FederationSendServlet,
     FederationEventServlet,
-    FederationStateServlet,
+    FederationStateV1Servlet,
     FederationStateIdsServlet,
     FederationBackfillServlet,
     FederationQueryServlet,
diff --git a/sytest-blacklist b/sytest-blacklist
index 11785fd43f..411cce0692 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -1,6 +1,6 @@
 # This file serves as a blacklist for SyTest tests that we expect will fail in
 # Synapse.
-# 
+#
 # Each line of this file is scanned by sytest during a run and if the line
 # exactly matches the name of a test, it will be marked as "expected fail",
 # meaning the test will still run, but failure will not mark the entire test
@@ -29,3 +29,7 @@ Enabling an unknown default rule fails with 404
 
 # Blacklisted due to https://github.com/matrix-org/synapse/issues/1663
 New federated private chats get full presence information (SYN-115)
+
+# Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing
+# this requirement from the spec
+Inbound federation of state requires event_id as a mandatory paramater
diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 51714a2b06..24fa8dbb45 100644
--- a/tests/federation/test_complexity.py
+++ b/tests/federation/test_complexity.py
@@ -18,17 +18,14 @@ from mock import Mock
 from twisted.internet import defer
 
 from synapse.api.errors import Codes, SynapseError
-from synapse.config.ratelimiting import FederationRateLimitConfig
-from synapse.federation.transport import server
 from synapse.rest import admin
 from synapse.rest.client.v1 import login, room
 from synapse.types import UserID
-from synapse.util.ratelimitutils import FederationRateLimiter
 
 from tests import unittest
 
 
-class RoomComplexityTests(unittest.HomeserverTestCase):
+class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
 
     servlets = [
         admin.register_servlets,
@@ -41,25 +38,6 @@ class RoomComplexityTests(unittest.HomeserverTestCase):
         config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05}
         return config
 
-    def prepare(self, reactor, clock, homeserver):
-        class Authenticator(object):
-            def authenticate_request(self, request, content):
-                return defer.succeed("otherserver.nottld")
-
-        ratelimiter = FederationRateLimiter(
-            clock,
-            FederationRateLimitConfig(
-                window_size=1,
-                sleep_limit=1,
-                sleep_msec=1,
-                reject_limit=1000,
-                concurrent_requests=1000,
-            ),
-        )
-        server.register_servlets(
-            homeserver, self.resource, Authenticator(), ratelimiter
-        )
-
     def test_complexity_simple(self):
 
         u1 = self.register_user("u1", "pass")
@@ -105,7 +83,7 @@ class RoomComplexityTests(unittest.HomeserverTestCase):
 
         d = handler._remote_join(
             None,
-            ["otherserver.example"],
+            ["other.example.com"],
             "roomid",
             UserID.from_string(u1),
             {"membership": "join"},
@@ -146,7 +124,7 @@ class RoomComplexityTests(unittest.HomeserverTestCase):
 
         d = handler._remote_join(
             None,
-            ["otherserver.example"],
+            ["other.example.com"],
             room_1,
             UserID.from_string(u1),
             {"membership": "join"},
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index cce8d8c6de..d456267b87 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -19,7 +19,7 @@ from twisted.internet import defer
 
 from synapse.types import ReadReceipt
 
-from tests.unittest import HomeserverTestCase
+from tests.unittest import HomeserverTestCase, override_config
 
 
 class FederationSenderTestCases(HomeserverTestCase):
@@ -29,6 +29,7 @@ class FederationSenderTestCases(HomeserverTestCase):
             federation_transport_client=Mock(spec=["send_transaction"]),
         )
 
+    @override_config({"send_federation": True})
     def test_send_receipts(self):
         mock_state_handler = self.hs.get_state_handler()
         mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
@@ -69,6 +70,7 @@ class FederationSenderTestCases(HomeserverTestCase):
             ],
         )
 
+    @override_config({"send_federation": True})
     def test_send_receipts_with_backoff(self):
         """Send two receipts in quick succession; the second should be flushed, but
         only after 20ms"""
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index b08be451aa..1ec8c40901 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2018 New Vector Ltd
+# Copyright 2019 Matrix.org Federation C.I.C
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,6 +17,8 @@ import logging
 
 from synapse.events import FrozenEvent
 from synapse.federation.federation_server import server_matches_acl_event
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
 
 from tests import unittest
 
@@ -41,6 +44,66 @@ class ServerACLsTestCase(unittest.TestCase):
         self.assertTrue(server_matches_acl_event("1:2:3:4", e))
 
 
+class StateQueryTests(unittest.FederatingHomeserverTestCase):
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def test_without_event_id(self):
+        """
+        Querying v1/state/ without an event ID will return the current
+        known state.
+        """
+        u1 = self.register_user("u1", "pass")
+        u1_token = self.login("u1", "pass")
+
+        room_1 = self.helper.create_room_as(u1, tok=u1_token)
+        self.inject_room_member(room_1, "@user:other.example.com", "join")
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/state/%s" % (room_1,)
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code, channel.result)
+
+        self.assertEqual(
+            channel.json_body["room_version"],
+            self.hs.config.default_room_version.identifier,
+        )
+
+        members = set(
+            map(
+                lambda x: x["state_key"],
+                filter(
+                    lambda x: x["type"] == "m.room.member", channel.json_body["pdus"]
+                ),
+            )
+        )
+
+        self.assertEqual(members, set(["@user:other.example.com", u1]))
+        self.assertEqual(len(channel.json_body["pdus"]), 6)
+
+    def test_needs_to_be_in_room(self):
+        """
+        Querying v1/state/ requires the server
+        be in the room to provide data.
+        """
+        u1 = self.register_user("u1", "pass")
+        u1_token = self.login("u1", "pass")
+
+        room_1 = self.helper.create_room_as(u1, tok=u1_token)
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/state/%s" % (room_1,)
+        )
+        self.render(request)
+        self.assertEquals(403, channel.code, channel.result)
+        self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+
+
 def _create_acl_event(content):
     return FrozenEvent(
         {
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 5ec568f4e6..f6d8660285 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -24,6 +24,7 @@ from synapse.api.errors import AuthError
 from synapse.types import UserID
 
 from tests import unittest
+from tests.unittest import override_config
 from tests.utils import register_federation_servlets
 
 # Some local users to test with
@@ -174,6 +175,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             ],
         )
 
+    @override_config({"send_federation": True})
     def test_started_typing_remote_send(self):
         self.room_members = [U_APPLE, U_ONION]
 
@@ -237,6 +239,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             ],
         )
 
+    @override_config({"send_federation": True})
     def test_stopped_typing(self):
         self.room_members = [U_APPLE, U_BANANA, U_ONION]
 
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py
index 4f924ce451..e7472e3a93 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/slave/storage/_base.py
@@ -48,7 +48,10 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase):
         server_factory = ReplicationStreamProtocolFactory(self.hs)
         self.streamer = server_factory.streamer
 
+        handler_factory = Mock()
         self.replication_handler = ReplicationClientHandler(self.slaved_store)
+        self.replication_handler.factory = handler_factory
+
         client_factory = ReplicationClientFactory(
             self.hs, "client_name", self.replication_handler
         )
diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py
index ce3835ae6a..1d14e77255 100644
--- a/tests/replication/tcp/streams/_base.py
+++ b/tests/replication/tcp/streams/_base.py
@@ -12,6 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from mock import Mock
+
 from synapse.replication.tcp.commands import ReplicateCommand
 from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
@@ -30,7 +32,9 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
         server = server_factory.buildProtocol(None)
 
         # build a replication client, with a dummy handler
+        handler_factory = Mock()
         self.test_handler = TestReplicationClientHandler()
+        self.test_handler.factory = handler_factory
         self.client = ClientReplicationStreamProtocol(
             "client", "test", clock, self.test_handler
         )
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 9ddd17f73d..105a0c2b02 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -16,8 +16,7 @@
 
 from unittest.mock import Mock
 
-from synapse.api.constants import EventTypes, Membership
-from synapse.api.room_versions import RoomVersions
+from synapse.api.constants import Membership
 from synapse.rest.admin import register_servlets_for_client_rest_resource
 from synapse.rest.client.v1 import login, room
 from synapse.types import Requester, UserID
@@ -44,9 +43,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         # We can't test the RoomMemberStore on its own without the other event
         # storage logic
         self.store = hs.get_datastore()
-        self.storage = hs.get_storage()
-        self.event_builder_factory = hs.get_event_builder_factory()
-        self.event_creation_handler = hs.get_event_creation_handler()
 
         self.u_alice = self.register_user("alice", "pass")
         self.t_alice = self.login("alice", "pass")
@@ -55,26 +51,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         # User elsewhere on another host
         self.u_charlie = UserID.from_string("@charlie:elsewhere")
 
-    def inject_room_member(self, room, user, membership, replaces_state=None):
-        builder = self.event_builder_factory.for_room_version(
-            RoomVersions.V1,
-            {
-                "type": EventTypes.Member,
-                "sender": user,
-                "state_key": user,
-                "room_id": room,
-                "content": {"membership": membership},
-            },
-        )
-
-        event, context = self.get_success(
-            self.event_creation_handler.create_new_client_event(builder)
-        )
-
-        self.get_success(self.storage.persistence.persist_event(event, context))
-
-        return event
-
     def test_one_member(self):
 
         # Alice creates the room, and is automatically joined
diff --git a/tests/unittest.py b/tests/unittest.py
index 561cebc223..31997a0f31 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
 # Copyright 2018 New Vector
+# Copyright 2019 Matrix.org Federation C.I.C
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,6 +14,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import gc
 import hashlib
 import hmac
@@ -27,13 +29,17 @@ from twisted.internet.defer import Deferred, succeed
 from twisted.python.threadpool import ThreadPool
 from twisted.trial import unittest
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.config.homeserver import HomeServerConfig
+from synapse.config.ratelimiting import FederationRateLimitConfig
+from synapse.federation.transport import server as federation_server
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import LoggingContext
 from synapse.server import HomeServer
 from synapse.types import Requester, UserID, create_requester
+from synapse.util.ratelimitutils import FederationRateLimiter
 
 from tests.server import get_clock, make_request, render, setup_test_homeserver
 from tests.test_utils.logging_setup import setup_logging
@@ -559,6 +565,66 @@ class HomeserverTestCase(TestCase):
         self.render(request)
         self.assertEqual(channel.code, 403, channel.result)
 
+    def inject_room_member(self, room: str, user: str, membership: Membership) -> None:
+        """
+        Inject a membership event into a room.
+
+        Args:
+            room: Room ID to inject the event into.
+            user: MXID of the user to inject the membership for.
+            membership: The membership type.
+        """
+        event_builder_factory = self.hs.get_event_builder_factory()
+        event_creation_handler = self.hs.get_event_creation_handler()
+
+        room_version = self.get_success(self.hs.get_datastore().get_room_version(room))
+
+        builder = event_builder_factory.for_room_version(
+            KNOWN_ROOM_VERSIONS[room_version],
+            {
+                "type": EventTypes.Member,
+                "sender": user,
+                "state_key": user,
+                "room_id": room,
+                "content": {"membership": membership},
+            },
+        )
+
+        event, context = self.get_success(
+            event_creation_handler.create_new_client_event(builder)
+        )
+
+        self.get_success(
+            self.hs.get_storage().persistence.persist_event(event, context)
+        )
+
+
+class FederatingHomeserverTestCase(HomeserverTestCase):
+    """
+    A federating homeserver that authenticates incoming requests as `other.example.com`.
+    """
+
+    def prepare(self, reactor, clock, homeserver):
+        class Authenticator(object):
+            def authenticate_request(self, request, content):
+                return succeed("other.example.com")
+
+        ratelimiter = FederationRateLimiter(
+            clock,
+            FederationRateLimitConfig(
+                window_size=1,
+                sleep_limit=1,
+                sleep_msec=1,
+                reject_limit=1000,
+                concurrent_requests=1000,
+            ),
+        )
+        federation_server.register_servlets(
+            homeserver, self.resource, Authenticator(), ratelimiter
+        )
+
+        return super().prepare(reactor, clock, homeserver)
+
 
 def override_config(extra_config):
     """A decorator which can be applied to test functions to give additional HS config
diff --git a/tests/utils.py b/tests/utils.py
index 7dc9bdc505..de2ac1ed33 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -109,6 +109,7 @@ def default_config(name, parse=False):
     """
     config_dict = {
         "server_name": name,
+        "send_federation": False,
         "media_store_path": "media",
         "uploads_path": "uploads",
         # the test signing key is just an arbitrary ed25519 key to keep the config
-- 
cgit 1.4.1


From c48ea9800769c22d763cd97ecb137141050739e1 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 28 Nov 2019 09:29:18 +0000
Subject: Clarifications for the email configuration settings. (#6423)

Cf #6422
---
 changelog.d/6423.misc         |  1 +
 docs/sample_config.yaml       | 17 ++++++++++++++++-
 synapse/config/emailconfig.py | 17 ++++++++++++++++-
 3 files changed, 33 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6423.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6423.misc b/changelog.d/6423.misc
new file mode 100644
index 0000000000..9bcd5d36c1
--- /dev/null
+++ b/changelog.d/6423.misc
@@ -0,0 +1 @@
+Clarifications for the email configuration settings.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 09dd21352f..c7391f0c48 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1333,8 +1333,23 @@ password_config:
 #   smtp_user: "exampleusername"
 #   smtp_pass: "examplepassword"
 #   require_transport_security: false
+#
+#   # notif_from defines the "From" address to use when sending emails.
+#   # It must be set if email sending is enabled.
+#   #
+#   # The placeholder '%(app)s' will be replaced by the application name,
+#   # which is normally 'app_name' (below), but may be overridden by the
+#   # Matrix client application.
+#   #
+#   # Note that the placeholder must be written '%(app)s', including the
+#   # trailing 's'.
+#   #
 #   notif_from: "Your Friendly %(app)s homeserver "
-#   app_name: Matrix
+#
+#   # app_name defines the default value for '%(app)s' in notif_from. It
+#   # defaults to 'Matrix'.
+#   #
+#   #app_name: my_branded_matrix_server
 #
 #   # Enable email notifications by default
 #   #
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index ac1724045f..18f42a87f9 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -307,8 +307,23 @@ class EmailConfig(Config):
         #   smtp_user: "exampleusername"
         #   smtp_pass: "examplepassword"
         #   require_transport_security: false
+        #
+        #   # notif_from defines the "From" address to use when sending emails.
+        #   # It must be set if email sending is enabled.
+        #   #
+        #   # The placeholder '%(app)s' will be replaced by the application name,
+        #   # which is normally 'app_name' (below), but may be overridden by the
+        #   # Matrix client application.
+        #   #
+        #   # Note that the placeholder must be written '%(app)s', including the
+        #   # trailing 's'.
+        #   #
         #   notif_from: "Your Friendly %(app)s homeserver "
-        #   app_name: Matrix
+        #
+        #   # app_name defines the default value for '%(app)s' in notif_from. It
+        #   # defaults to 'Matrix'.
+        #   #
+        #   #app_name: my_branded_matrix_server
         #
         #   # Enable email notifications by default
         #   #
-- 
cgit 1.4.1


From a9c44d4008deb29503e2de00e5aae1a56a72d630 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 28 Nov 2019 10:40:42 +0000
Subject: Remove local threepids on account deactivation (#6426)

---
 changelog.d/6426.bugfix                          |  1 +
 synapse/handlers/deactivate_account.py           |  3 +++
 synapse/storage/data_stores/main/registration.py | 13 +++++++++++++
 3 files changed, 17 insertions(+)
 create mode 100644 changelog.d/6426.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix
new file mode 100644
index 0000000000..3acfde4211
--- /dev/null
+++ b/changelog.d/6426.bugfix
@@ -0,0 +1 @@
+Clean up local threepids from user on account deactivation.
\ No newline at end of file
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 63267a0a4c..6dedaaff8d 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -95,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler):
                 user_id, threepid["medium"], threepid["address"]
             )
 
+        # Remove all 3PIDs this user has bound to the homeserver
+        yield self.store.user_delete_threepids(user_id)
+
         # delete any devices belonging to the user, which will also
         # delete corresponding access tokens.
         yield self._device_handler.delete_all_devices_for_user(user_id)
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 0a3c1f0510..98cf6427c3 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -569,6 +569,19 @@ class RegistrationWorkerStore(SQLBaseStore):
         return self._simple_delete(
             "user_threepids",
             keyvalues={"user_id": user_id, "medium": medium, "address": address},
+            desc="user_delete_threepid",
+        )
+
+    def user_delete_threepids(self, user_id: str):
+        """Delete all threepid this user has bound
+
+        Args:
+             user_id: The user id to delete all threepids of
+
+        """
+        return self._simple_delete(
+            "user_threepids",
+            keyvalues={"user_id": user_id},
             desc="user_delete_threepids",
         )
 
-- 
cgit 1.4.1


From 2030193e5523810c4fd6158f97b7a223cee4cb72 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 28 Nov 2019 10:40:42 +0000
Subject: Remove local threepids on account deactivation (#6426)

---
 changelog.d/6426.bugfix                          |  1 +
 synapse/handlers/deactivate_account.py           |  3 +++
 synapse/storage/data_stores/main/registration.py | 13 +++++++++++++
 3 files changed, 17 insertions(+)
 create mode 100644 changelog.d/6426.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix
new file mode 100644
index 0000000000..3acfde4211
--- /dev/null
+++ b/changelog.d/6426.bugfix
@@ -0,0 +1 @@
+Clean up local threepids from user on account deactivation.
\ No newline at end of file
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 63267a0a4c..6dedaaff8d 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -95,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler):
                 user_id, threepid["medium"], threepid["address"]
             )
 
+        # Remove all 3PIDs this user has bound to the homeserver
+        yield self.store.user_delete_threepids(user_id)
+
         # delete any devices belonging to the user, which will also
         # delete corresponding access tokens.
         yield self._device_handler.delete_all_devices_for_user(user_id)
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index ee1b2b2bbf..89147ad511 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -577,6 +577,19 @@ class RegistrationWorkerStore(SQLBaseStore):
         return self._simple_delete(
             "user_threepids",
             keyvalues={"user_id": user_id, "medium": medium, "address": address},
+            desc="user_delete_threepid",
+        )
+
+    def user_delete_threepids(self, user_id: str):
+        """Delete all threepid this user has bound
+
+        Args:
+             user_id: The user id to delete all threepids of
+
+        """
+        return self._simple_delete(
+            "user_threepids",
+            keyvalues={"user_id": user_id},
             desc="user_delete_threepids",
         )
 
-- 
cgit 1.4.1


From e7777f3668d09c87335830f785f42c851827b497 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Thu, 28 Nov 2019 11:24:11 +0000
Subject: 1.6.1

---
 CHANGES.md              | 15 +++++++++++++++
 changelog.d/6421.bugfix |  1 -
 changelog.d/6426.bugfix |  1 -
 debian/changelog        |  6 ++++++
 synapse/__init__.py     |  2 +-
 5 files changed, 22 insertions(+), 3 deletions(-)
 delete mode 100644 changelog.d/6421.bugfix
 delete mode 100644 changelog.d/6426.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index 42281483b3..a9afd36d2c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,18 @@
+Synapse 1.6.1 (2019-11-28)
+==========================
+
+Security updates
+----------------
+
+This release includes a security fix ([\#6426](https://github.com/matrix-org/synapse/issues/6426), below). Administrators are encouraged to upgrade as soon as possible.
+
+Bugfixes
+--------
+
+- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426))
+- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
+
+
 Synapse 1.6.0 (2019-11-26)
 ==========================
 
diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix
deleted file mode 100644
index 7969f7f71d..0000000000
--- a/changelog.d/6421.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix startup error when http proxy is defined.
diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix
deleted file mode 100644
index 3acfde4211..0000000000
--- a/changelog.d/6426.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Clean up local threepids from user on account deactivation.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 82dae017f1..b8a43788ef 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.6.1) stable; urgency=medium
+
+  * New synapse release 1.6.1.
+
+ -- Synapse Packaging team   Thu, 28 Nov 2019 11:10:40 +0000
+
 matrix-synapse-py3 (1.6.0) stable; urgency=medium
 
   * New synapse release 1.6.0.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 53eedc0048..f99de2f3f3 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.6.0"
+__version__ = "1.6.1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 19ba7c142eaced06110c1cb2d22a489dae2ac155 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 28 Nov 2019 13:59:32 +0000
Subject: Newsfile

---
 changelog.d/6434.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6434.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6434.feature b/changelog.d/6434.feature
new file mode 100644
index 0000000000..affa5d50c1
--- /dev/null
+++ b/changelog.d/6434.feature
@@ -0,0 +1 @@
+Add support for MSC 2367, which allows specifying a reason on all membership events.
-- 
cgit 1.4.1


From 5ee2beeddbbcbf09ac054679de71db0e0bf9df31 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 28 Nov 2019 19:32:49 +0000
Subject: Changelog

---
 changelog.d/6436.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6436.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6436.bugfix b/changelog.d/6436.bugfix
new file mode 100644
index 0000000000..954a4e1d84
--- /dev/null
+++ b/changelog.d/6436.bugfix
@@ -0,0 +1 @@
+Fix a bug where a room could become unusable with a low retention policy and a low activity.
-- 
cgit 1.4.1


From 23ea5721259059d50b80083bb7240a8cb56cf297 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 29 Nov 2019 13:51:14 +0000
Subject: Add User-Interactive Auth to /account/3pid/add (#6119)

---
 changelog.d/6119.feature                | 1 +
 synapse/rest/client/v2_alpha/account.py | 5 +++++
 2 files changed, 6 insertions(+)
 create mode 100644 changelog.d/6119.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6119.feature b/changelog.d/6119.feature
new file mode 100644
index 0000000000..1492e83c5a
--- /dev/null
+++ b/changelog.d/6119.feature
@@ -0,0 +1 @@
+Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account.
\ No newline at end of file
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index f26eae794c..ad674239ab 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -642,6 +642,7 @@ class ThreepidAddRestServlet(RestServlet):
         self.auth = hs.get_auth()
         self.auth_handler = hs.get_auth_handler()
 
+    @interactive_auth_handler
     @defer.inlineCallbacks
     def on_POST(self, request):
         requester = yield self.auth.get_user_by_req(request)
@@ -652,6 +653,10 @@ class ThreepidAddRestServlet(RestServlet):
         client_secret = body["client_secret"]
         sid = body["sid"]
 
+        yield self.auth_handler.validate_user_via_ui_auth(
+            requester, body, self.hs.get_ip_from_request(request)
+        )
+
         validation_session = yield self.identity_handler.validate_threepid_session(
             client_secret, sid
         )
-- 
cgit 1.4.1


From 81731c6e75fe904a5b44873efa361a229743d99f Mon Sep 17 00:00:00 2001
From: Filip Štědronský 
Date: Mon, 2 Dec 2019 12:12:55 +0000
Subject: Fix: Pillow error when uploading RGBA image (#3325) (#6241)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-Off-By: Filip Štědronský 
---
 changelog.d/6241.bugfix              | 1 +
 synapse/rest/media/v1/thumbnailer.py | 5 ++++-
 2 files changed, 5 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6241.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6241.bugfix b/changelog.d/6241.bugfix
new file mode 100644
index 0000000000..25109ca4a6
--- /dev/null
+++ b/changelog.d/6241.bugfix
@@ -0,0 +1 @@
+Fix error from the Pillow library when uploading RGBA images.
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index 8cf415e29d..c234ea7421 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -129,5 +129,8 @@ class Thumbnailer(object):
 
     def _encode_image(self, output_image, output_type):
         output_bytes_io = BytesIO()
-        output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80)
+        fmt = self.FORMATS[output_type]
+        if fmt == "JPEG":
+            output_image = output_image.convert("RGB")
+        output_image.save(output_bytes_io, fmt, quality=80)
         return output_bytes_io
-- 
cgit 1.4.1


From 0ad75fd98ef1943ebea98c6d9f2dc5770c643b0a Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 2 Dec 2019 15:09:57 +0000
Subject: Use python3 packages for Ubuntu (#6443)

---
 INSTALL.md           | 4 ++--
 changelog.d/6443.doc | 1 +
 2 files changed, 3 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6443.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index 9b7360f0ef..9da2e3c734 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -109,8 +109,8 @@ Installing prerequisites on Ubuntu or Debian:
 
 ```
 sudo apt-get install build-essential python3-dev libffi-dev \
-                     python-pip python-setuptools sqlite3 \
-                     libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
+                     python3-pip python3-setuptools sqlite3 \
+                     libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
 ```
 
 #### ArchLinux
diff --git a/changelog.d/6443.doc b/changelog.d/6443.doc
new file mode 100644
index 0000000000..67c59f92ee
--- /dev/null
+++ b/changelog.d/6443.doc
@@ -0,0 +1 @@
+Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md.
\ No newline at end of file
-- 
cgit 1.4.1


From 72078e4be56d42421e8748e0e45d0fe1204853dd Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 2 Dec 2019 15:11:32 +0000
Subject: Transfer power level state events on room upgrade (#6237)

---
 changelog.d/6237.bugfix  |  1 +
 synapse/handlers/room.py | 36 +++++++++++++++++++++++++++++++-----
 2 files changed, 32 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6237.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6237.bugfix b/changelog.d/6237.bugfix
new file mode 100644
index 0000000000..9285600b00
--- /dev/null
+++ b/changelog.d/6237.bugfix
@@ -0,0 +1 @@
+Transfer non-standard power levels on room upgrade.
\ No newline at end of file
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index e92b2eafd5..35a759f2fe 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -198,21 +198,21 @@ class RoomCreationHandler(BaseHandler):
         # finally, shut down the PLs in the old room, and update them in the new
         # room.
         yield self._update_upgraded_room_pls(
-            requester, old_room_id, new_room_id, old_room_state
+            requester, old_room_id, new_room_id, old_room_state,
         )
 
         return new_room_id
 
     @defer.inlineCallbacks
     def _update_upgraded_room_pls(
-        self, requester, old_room_id, new_room_id, old_room_state
+        self, requester, old_room_id, new_room_id, old_room_state,
     ):
         """Send updated power levels in both rooms after an upgrade
 
         Args:
             requester (synapse.types.Requester): the user requesting the upgrade
-            old_room_id (unicode): the id of the room to be replaced
-            new_room_id (unicode): the id of the replacement room
+            old_room_id (str): the id of the room to be replaced
+            new_room_id (str): the id of the replacement room
             old_room_state (dict[tuple[str, str], str]): the state map for the old room
 
         Returns:
@@ -298,7 +298,7 @@ class RoomCreationHandler(BaseHandler):
             tombstone_event_id (unicode|str): the ID of the tombstone event in the old
                 room.
         Returns:
-            Deferred[None]
+            Deferred
         """
         user_id = requester.user.to_string()
 
@@ -333,6 +333,7 @@ class RoomCreationHandler(BaseHandler):
             (EventTypes.Encryption, ""),
             (EventTypes.ServerACL, ""),
             (EventTypes.RelatedGroups, ""),
+            (EventTypes.PowerLevels, ""),
         )
 
         old_room_state_ids = yield self.store.get_filtered_current_state_ids(
@@ -346,6 +347,31 @@ class RoomCreationHandler(BaseHandler):
             if old_event:
                 initial_state[k] = old_event.content
 
+        # Resolve the minimum power level required to send any state event
+        # We will give the upgrading user this power level temporarily (if necessary) such that
+        # they are able to copy all of the state events over, then revert them back to their
+        # original power level afterwards in _update_upgraded_room_pls
+
+        # Copy over user power levels now as this will not be possible with >100PL users once
+        # the room has been created
+
+        power_levels = initial_state[(EventTypes.PowerLevels, "")]
+
+        # Calculate the minimum power level needed to clone the room
+        event_power_levels = power_levels.get("events", {})
+        state_default = power_levels.get("state_default", 0)
+        ban = power_levels.get("ban")
+        needed_power_level = max(state_default, ban, max(event_power_levels.values()))
+
+        # Raise the requester's power level in the new room if necessary
+        current_power_level = power_levels["users"][requester.user.to_string()]
+        if current_power_level < needed_power_level:
+            # Assign this power level to the requester
+            power_levels["users"][requester.user.to_string()] = needed_power_level
+
+        # Set the power levels to the modified state
+        initial_state[(EventTypes.PowerLevels, "")] = power_levels
+
         yield self._send_events_for_new_room(
             requester,
             new_room_id,
-- 
cgit 1.4.1


From 8ee62e4b98d71feef72987c94eef1ed097746f34 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Mon, 2 Dec 2019 18:43:25 +0000
Subject: Add changelog

---
 changelog.d/6449.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6449.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6449.bugfix b/changelog.d/6449.bugfix
new file mode 100644
index 0000000000..ced16544c9
--- /dev/null
+++ b/changelog.d/6449.bugfix
@@ -0,0 +1 @@
+Fix assumed missing state_groups index in synapse_port_db.
\ No newline at end of file
-- 
cgit 1.4.1


From e57567f99425c51f6fabb7ba86ae86175ff59992 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Mon, 2 Dec 2019 17:10:57 -0500
Subject: add changelog

---
 changelog.d/6451.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6451.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6451.bugfix b/changelog.d/6451.bugfix
new file mode 100644
index 0000000000..e678dc240b
--- /dev/null
+++ b/changelog.d/6451.bugfix
@@ -0,0 +1 @@
+Change the index on the `e2e_cross_signing_signatures` table to be non-unique.
-- 
cgit 1.4.1


From fdec84aa427e2e3b806eb15f462d652f8554cc8d Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Tue, 3 Dec 2019 20:21:25 +1100
Subject: Add benchmarks for structured logging performance (#6266)

---
 changelog.d/6266.misc          |   1 +
 synapse/logging/_terse_json.py |   1 +
 synmark/__init__.py            |  72 +++++++++++++++++++++++++
 synmark/__main__.py            |  90 +++++++++++++++++++++++++++++++
 synmark/suites/__init__.py     |   3 ++
 synmark/suites/logging.py      | 118 +++++++++++++++++++++++++++++++++++++++++
 tox.ini                        |   9 ++++
 7 files changed, 294 insertions(+)
 create mode 100644 changelog.d/6266.misc
 create mode 100644 synmark/__init__.py
 create mode 100644 synmark/__main__.py
 create mode 100644 synmark/suites/__init__.py
 create mode 100644 synmark/suites/logging.py

(limited to 'changelog.d')

diff --git a/changelog.d/6266.misc b/changelog.d/6266.misc
new file mode 100644
index 0000000000..634e421a79
--- /dev/null
+++ b/changelog.d/6266.misc
@@ -0,0 +1 @@
+Add benchmarks for structured logging and improve output performance.
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 05fc64f409..03934956f4 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -256,6 +256,7 @@ class TerseJSONToTCPLogObserver(object):
             # transport is the same, just trigger a resumeProducing.
             if self._producer and r.transport is self._producer.transport:
                 self._producer.resumeProducing()
+                self._connection_waiter = None
                 return
 
             # If the producer is still producing, stop it.
diff --git a/synmark/__init__.py b/synmark/__init__.py
new file mode 100644
index 0000000000..570eb818d9
--- /dev/null
+++ b/synmark/__init__.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from twisted.internet import epollreactor
+from twisted.internet.main import installReactor
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.util import Clock
+
+from tests.utils import default_config, setup_test_homeserver
+
+
+async def make_homeserver(reactor, config=None):
+    """
+    Make a Homeserver suitable for running benchmarks against.
+
+    Args:
+        reactor: A Twisted reactor to run under.
+        config: A HomeServerConfig to use, or None.
+    """
+    cleanup_tasks = []
+    clock = Clock(reactor)
+
+    if not config:
+        config = default_config("test")
+
+    config_obj = HomeServerConfig()
+    config_obj.parse_config_dict(config, "", "")
+
+    hs = await setup_test_homeserver(
+        cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock
+    )
+    stor = hs.get_datastore()
+
+    # Run the database background updates.
+    if hasattr(stor, "do_next_background_update"):
+        while not await stor.has_completed_background_updates():
+            await stor.do_next_background_update(1)
+
+    def cleanup():
+        for i in cleanup_tasks:
+            i()
+
+    return hs, clock.sleep, cleanup
+
+
+def make_reactor():
+    """
+    Instantiate and install a Twisted reactor suitable for testing (i.e. not the
+    default global one).
+    """
+    reactor = epollreactor.EPollReactor()
+
+    if "twisted.internet.reactor" in sys.modules:
+        del sys.modules["twisted.internet.reactor"]
+    installReactor(reactor)
+
+    return reactor
diff --git a/synmark/__main__.py b/synmark/__main__.py
new file mode 100644
index 0000000000..ac59befbd4
--- /dev/null
+++ b/synmark/__main__.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+from contextlib import redirect_stderr
+from io import StringIO
+
+import pyperf
+from synmark import make_reactor
+from synmark.suites import SUITES
+
+from twisted.internet.defer import ensureDeferred
+from twisted.logger import globalLogBeginner, textFileLogObserver
+from twisted.python.failure import Failure
+
+from tests.utils import setupdb
+
+
+def make_test(main):
+    """
+    Take a benchmark function and wrap it in a reactor start and stop.
+    """
+
+    def _main(loops):
+
+        reactor = make_reactor()
+
+        file_out = StringIO()
+        with redirect_stderr(file_out):
+
+            d = ensureDeferred(main(reactor, loops))
+
+            def on_done(_):
+                if isinstance(_, Failure):
+                    _.printTraceback()
+                    print(file_out.getvalue())
+                reactor.stop()
+                return _
+
+            d.addBoth(on_done)
+            reactor.run()
+
+        return d.result
+
+    return _main
+
+
+if __name__ == "__main__":
+
+    def add_cmdline_args(cmd, args):
+        if args.log:
+            cmd.extend(["--log"])
+
+    runner = pyperf.Runner(
+        processes=3, min_time=2, show_name=True, add_cmdline_args=add_cmdline_args
+    )
+    runner.argparser.add_argument("--log", action="store_true")
+    runner.parse_args()
+
+    orig_loops = runner.args.loops
+    runner.args.inherit_environ = ["SYNAPSE_POSTGRES"]
+
+    if runner.args.worker:
+        if runner.args.log:
+            globalLogBeginner.beginLoggingTo(
+                [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False
+            )
+        setupdb()
+
+    for suite, loops in SUITES:
+        if loops:
+            runner.args.loops = loops
+        else:
+            runner.args.loops = orig_loops
+            loops = "auto"
+        runner.bench_time_func(
+            suite.__name__ + "_" + str(loops), make_test(suite.main),
+        )
diff --git a/synmark/suites/__init__.py b/synmark/suites/__init__.py
new file mode 100644
index 0000000000..cfa3b0ba38
--- /dev/null
+++ b/synmark/suites/__init__.py
@@ -0,0 +1,3 @@
+from . import logging
+
+SUITES = [(logging, 1000), (logging, 10000), (logging, None)]
diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py
new file mode 100644
index 0000000000..d8e4c7d58f
--- /dev/null
+++ b/synmark/suites/logging.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+from io import StringIO
+
+from mock import Mock
+
+from pyperf import perf_counter
+from synmark import make_homeserver
+
+from twisted.internet.defer import Deferred
+from twisted.internet.protocol import ServerFactory
+from twisted.logger import LogBeginner, Logger, LogPublisher
+from twisted.protocols.basic import LineOnlyReceiver
+
+from synapse.logging._structured import setup_structured_logging
+
+
+class LineCounter(LineOnlyReceiver):
+
+    delimiter = b"\n"
+
+    def __init__(self, *args, **kwargs):
+        self.count = 0
+        super().__init__(*args, **kwargs)
+
+    def lineReceived(self, line):
+        self.count += 1
+
+        if self.count >= self.factory.wait_for and self.factory.on_done:
+            on_done = self.factory.on_done
+            self.factory.on_done = None
+            on_done.callback(True)
+
+
+async def main(reactor, loops):
+    """
+    Benchmark how long it takes to send `loops` messages.
+    """
+    servers = []
+
+    def protocol():
+        p = LineCounter()
+        servers.append(p)
+        return p
+
+    logger_factory = ServerFactory.forProtocol(protocol)
+    logger_factory.wait_for = loops
+    logger_factory.on_done = Deferred()
+    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")
+
+    hs, wait, cleanup = await make_homeserver(reactor)
+
+    errors = StringIO()
+    publisher = LogPublisher()
+    mock_sys = Mock()
+    beginner = LogBeginner(
+        publisher, errors, mock_sys, warnings, initialBufferSize=loops
+    )
+
+    log_config = {
+        "loggers": {"synapse": {"level": "DEBUG"}},
+        "drains": {
+            "tersejson": {
+                "type": "network_json_terse",
+                "host": "127.0.0.1",
+                "port": port.getHost().port,
+                "maximum_buffer": 100,
+            }
+        },
+    }
+
+    logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher)
+    logging_system = setup_structured_logging(
+        hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False
+    )
+
+    # Wait for it to connect...
+    await logging_system._observers[0]._service.whenConnected()
+
+    start = perf_counter()
+
+    # Send a bunch of useful messages
+    for i in range(0, loops):
+        logger.info("test message %s" % (i,))
+
+        if (
+            len(logging_system._observers[0]._buffer)
+            == logging_system._observers[0].maximum_buffer
+        ):
+            while (
+                len(logging_system._observers[0]._buffer)
+                > logging_system._observers[0].maximum_buffer / 2
+            ):
+                await wait(0.01)
+
+    await logger_factory.on_done
+
+    end = perf_counter() - start
+
+    logging_system.stop()
+    port.stopListening()
+    cleanup()
+
+    return end
diff --git a/tox.ini b/tox.ini
index 62b350ea6a..903a245fb0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -102,6 +102,15 @@ commands =
 
     {envbindir}/coverage run "{envbindir}/trial"  {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
 
+[testenv:benchmark]
+deps =
+    {[base]deps}
+    pyperf
+setenv =
+    SYNAPSE_POSTGRES = 1
+commands =
+    python -m synmark {posargs:}
+
 [testenv:packaging]
 skip_install=True
 deps =
-- 
cgit 1.4.1


From 620f98b65b43404ea6bf99f5907170de72707f8a Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 3 Dec 2019 18:20:39 +0000
Subject: write some docs for the quarantine_media api (#6458)

---
 changelog.d/6458.doc              |  1 +
 docs/admin_api/media_admin_api.md | 17 +++++++++++++++++
 2 files changed, 18 insertions(+)
 create mode 100644 changelog.d/6458.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6458.doc b/changelog.d/6458.doc
new file mode 100644
index 0000000000..3a9f831d89
--- /dev/null
+++ b/changelog.d/6458.doc
@@ -0,0 +1 @@
+Write some docs for the quarantine_media api.
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 5e9f8e5d84..8b3666d5f5 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -21,3 +21,20 @@ It returns a JSON body like the following:
     ]
 }
 ```
+
+# Quarantine media in a room
+
+This API 'quarantines' all the media in a room.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/quarantine_media/
+
+{}
+```
+
+Quarantining media means that it is marked as inaccessible by users. It applies
+to any local media, and any locally-cached copies of remote media.
+
+The media file itself (and any thumbnails) is not deleted from the server.
-- 
cgit 1.4.1


From 54dd5dc12b0ac5c48303144c4a73ce3822209488 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 3 Dec 2019 19:19:45 +0000
Subject: Add ephemeral messages support (MSC2228) (#6409)

Implement part [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). The parts that differ are:

* the feature is hidden behind a configuration flag (`enable_ephemeral_messages`)
* self-destruction doesn't happen for state events
* only implement support for the `m.self_destruct_after` field (not the `m.self_destruct` one)
* doesn't send synthetic redactions to clients because for this specific case we consider the clients to be able to destroy an event themselves, instead we just censor it (by pruning its JSON) in the database
---
 changelog.d/6409.feature                           |   1 +
 synapse/api/constants.py                           |   4 +
 synapse/config/server.py                           |   2 +
 synapse/handlers/federation.py                     |   8 ++
 synapse/handlers/message.py                        | 123 +++++++++++++++++++-
 synapse/storage/data_stores/main/events.py         | 126 ++++++++++++++++++++-
 .../main/schema/delta/56/event_expiry.sql          |  21 ++++
 tests/rest/client/test_ephemeral_message.py        | 101 +++++++++++++++++
 8 files changed, 379 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6409.feature
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
 create mode 100644 tests/rest/client/test_ephemeral_message.py

(limited to 'changelog.d')

diff --git a/changelog.d/6409.feature b/changelog.d/6409.feature
new file mode 100644
index 0000000000..653ff5a5ad
--- /dev/null
+++ b/changelog.d/6409.feature
@@ -0,0 +1 @@
+Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228).
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index e3f086f1c3..69cef369a5 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -147,3 +147,7 @@ class EventContentFields(object):
 
     # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
     LABELS = "org.matrix.labels"
+
+    # Timestamp to delete the event after
+    # cf https://github.com/matrix-org/matrix-doc/pull/2228
+    SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 7a9d711669..837fbe1582 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -490,6 +490,8 @@ class ServerConfig(Config):
             "cleanup_extremities_with_dummy_events", True
         )
 
+        self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
+
     def has_tls_listener(self) -> bool:
         return any(l["tls"] for l in self.listeners)
 
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d3267734f7..d9d0cd9eef 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -121,6 +121,7 @@ class FederationHandler(BaseHandler):
         self.pusher_pool = hs.get_pusherpool()
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self._message_handler = hs.get_message_handler()
         self._server_notices_mxid = hs.config.server_notices_mxid
         self.config = hs.config
         self.http_client = hs.get_simple_http_client()
@@ -141,6 +142,8 @@ class FederationHandler(BaseHandler):
 
         self.third_party_event_rules = hs.get_third_party_event_rules()
 
+        self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False):
         """ Process a PDU received via a federation /send/ transaction, or
@@ -2715,6 +2718,11 @@ class FederationHandler(BaseHandler):
                 event_and_contexts, backfilled=backfilled
             )
 
+            if self._ephemeral_messages_enabled:
+                for (event, context) in event_and_contexts:
+                    # If there's an expiry timestamp on the event, schedule its expiry.
+                    self._message_handler.maybe_schedule_expiry(event)
+
             if not backfilled:  # Never notify for backfilled events
                 for event, _ in event_and_contexts:
                     yield self._notify_persisted_event(event, max_stream_id)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 3b0156f516..4f53a5f5dc 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Optional
 
 from six import iteritems, itervalues, string_types
 
@@ -22,9 +23,16 @@ from canonicaljson import encode_canonical_json, json
 
 from twisted.internet import defer
 from twisted.internet.defer import succeed
+from twisted.internet.interfaces import IDelayedCall
 
 from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes
+from synapse.api.constants import (
+    EventContentFields,
+    EventTypes,
+    Membership,
+    RelationTypes,
+    UserTypes,
+)
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -62,6 +70,17 @@ class MessageHandler(object):
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
         self._event_serializer = hs.get_event_client_serializer()
+        self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+        self._is_worker_app = bool(hs.config.worker_app)
+
+        # The scheduled call to self._expire_event. None if no call is currently
+        # scheduled.
+        self._scheduled_expiry = None  # type: Optional[IDelayedCall]
+
+        if not hs.config.worker_app:
+            run_as_background_process(
+                "_schedule_next_expiry", self._schedule_next_expiry
+            )
 
     @defer.inlineCallbacks
     def get_room_data(
@@ -225,6 +244,100 @@ class MessageHandler(object):
             for user_id, profile in iteritems(users_with_profile)
         }
 
+    def maybe_schedule_expiry(self, event):
+        """Schedule the expiry of an event if there's not already one scheduled,
+        or if the one running is for an event that will expire after the provided
+        timestamp.
+
+        This function needs to invalidate the event cache, which is only possible on
+        the master process, and therefore needs to be run on there.
+
+        Args:
+            event (EventBase): The event to schedule the expiry of.
+        """
+        assert not self._is_worker_app
+
+        expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
+        if not isinstance(expiry_ts, int) or event.is_state():
+            return
+
+        # _schedule_expiry_for_event won't actually schedule anything if there's already
+        # a task scheduled for a timestamp that's sooner than the provided one.
+        self._schedule_expiry_for_event(event.event_id, expiry_ts)
+
+    @defer.inlineCallbacks
+    def _schedule_next_expiry(self):
+        """Retrieve the ID and the expiry timestamp of the next event to be expired,
+        and schedule an expiry task for it.
+
+        If there's no event left to expire, set _expiry_scheduled to None so that a
+        future call to save_expiry_ts can schedule a new expiry task.
+        """
+        # Try to get the expiry timestamp of the next event to expire.
+        res = yield self.store.get_next_event_to_expire()
+        if res:
+            event_id, expiry_ts = res
+            self._schedule_expiry_for_event(event_id, expiry_ts)
+
+    def _schedule_expiry_for_event(self, event_id, expiry_ts):
+        """Schedule an expiry task for the provided event if there's not already one
+        scheduled at a timestamp that's sooner than the provided one.
+
+        Args:
+            event_id (str): The ID of the event to expire.
+            expiry_ts (int): The timestamp at which to expire the event.
+        """
+        if self._scheduled_expiry:
+            # If the provided timestamp refers to a time before the scheduled time of the
+            # next expiry task, cancel that task and reschedule it for this timestamp.
+            next_scheduled_expiry_ts = self._scheduled_expiry.getTime() * 1000
+            if expiry_ts < next_scheduled_expiry_ts:
+                self._scheduled_expiry.cancel()
+            else:
+                return
+
+        # Figure out how many seconds we need to wait before expiring the event.
+        now_ms = self.clock.time_msec()
+        delay = (expiry_ts - now_ms) / 1000
+
+        # callLater doesn't support negative delays, so trim the delay to 0 if we're
+        # in that case.
+        if delay < 0:
+            delay = 0
+
+        logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
+
+        self._scheduled_expiry = self.clock.call_later(
+            delay,
+            run_as_background_process,
+            "_expire_event",
+            self._expire_event,
+            event_id,
+        )
+
+    @defer.inlineCallbacks
+    def _expire_event(self, event_id):
+        """Retrieve and expire an event that needs to be expired from the database.
+
+        If the event doesn't exist in the database, log it and delete the expiry date
+        from the database (so that we don't try to expire it again).
+        """
+        assert self._ephemeral_events_enabled
+
+        self._scheduled_expiry = None
+
+        logger.info("Expiring event %s", event_id)
+
+        try:
+            # Expire the event if we know about it. This function also deletes the expiry
+            # date from the database in the same database transaction.
+            yield self.store.expire_event(event_id)
+        except Exception as e:
+            logger.error("Could not expire event %s: %r", event_id, e)
+
+        # Schedule the expiry of the next event to expire.
+        yield self._schedule_next_expiry()
+
 
 # The duration (in ms) after which rooms should be removed
 # `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try
@@ -295,6 +408,10 @@ class EventCreationHandler(object):
                 5 * 60 * 1000,
             )
 
+        self._message_handler = hs.get_message_handler()
+
+        self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def create_event(
         self,
@@ -877,6 +994,10 @@ class EventCreationHandler(object):
             event, context=context
         )
 
+        if self._ephemeral_events_enabled:
+            # If there's an expiry timestamp on the event, schedule its expiry.
+            self._message_handler.maybe_schedule_expiry(event)
+
         yield self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id)
 
         def _notify():
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 2737a1d3ae..79c91fe284 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -130,6 +130,8 @@ class EventsStore(
         if self.hs.config.redaction_retention_period is not None:
             hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
 
+        self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+
     @defer.inlineCallbacks
     def _read_forward_extremities(self):
         def fetch(txn):
@@ -940,6 +942,12 @@ class EventsStore(
                     txn, event.event_id, labels, event.room_id, event.depth
                 )
 
+            if self._ephemeral_messages_enabled:
+                # If there's an expiry timestamp on the event, store it.
+                expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
+                if isinstance(expiry_ts, int) and not event.is_state():
+                    self._insert_event_expiry_txn(txn, event.event_id, expiry_ts)
+
         # Insert into the room_memberships table.
         self._store_room_members_txn(
             txn,
@@ -1101,12 +1109,7 @@ class EventsStore(
         def _update_censor_txn(txn):
             for redaction_id, event_id, pruned_json in updates:
                 if pruned_json:
-                    self._simple_update_one_txn(
-                        txn,
-                        table="event_json",
-                        keyvalues={"event_id": event_id},
-                        updatevalues={"json": pruned_json},
-                    )
+                    self._censor_event_txn(txn, event_id, pruned_json)
 
                 self._simple_update_one_txn(
                     txn,
@@ -1117,6 +1120,22 @@ class EventsStore(
 
         yield self.runInteraction("_update_censor_txn", _update_censor_txn)
 
+    def _censor_event_txn(self, txn, event_id, pruned_json):
+        """Censor an event by replacing its JSON in the event_json table with the
+        provided pruned JSON.
+
+        Args:
+            txn (LoggingTransaction): The database transaction.
+            event_id (str): The ID of the event to censor.
+            pruned_json (str): The pruned JSON
+        """
+        self._simple_update_one_txn(
+            txn,
+            table="event_json",
+            keyvalues={"event_id": event_id},
+            updatevalues={"json": pruned_json},
+        )
+
     @defer.inlineCallbacks
     def count_daily_messages(self):
         """
@@ -1957,6 +1976,101 @@ class EventsStore(
             ],
         )
 
+    def _insert_event_expiry_txn(self, txn, event_id, expiry_ts):
+        """Save the expiry timestamp associated with a given event ID.
+
+        Args:
+            txn (LoggingTransaction): The database transaction to use.
+            event_id (str): The event ID the expiry timestamp is associated with.
+            expiry_ts (int): The timestamp at which to expire (delete) the event.
+        """
+        return self._simple_insert_txn(
+            txn=txn,
+            table="event_expiry",
+            values={"event_id": event_id, "expiry_ts": expiry_ts},
+        )
+
+    @defer.inlineCallbacks
+    def expire_event(self, event_id):
+        """Retrieve and expire an event that has expired, and delete its associated
+        expiry timestamp. If the event can't be retrieved, delete its associated
+        timestamp so we don't try to expire it again in the future.
+
+        Args:
+             event_id (str): The ID of the event to delete.
+        """
+        # Try to retrieve the event's content from the database or the event cache.
+        event = yield self.get_event(event_id)
+
+        def delete_expired_event_txn(txn):
+            # Delete the expiry timestamp associated with this event from the database.
+            self._delete_event_expiry_txn(txn, event_id)
+
+            if not event:
+                # If we can't find the event, log a warning and delete the expiry date
+                # from the database so that we don't try to expire it again in the
+                # future.
+                logger.warning(
+                    "Can't expire event %s because we don't have it.", event_id
+                )
+                return
+
+            # Prune the event's dict then convert it to JSON.
+            pruned_json = encode_json(prune_event_dict(event.get_dict()))
+
+            # Update the event_json table to replace the event's JSON with the pruned
+            # JSON.
+            self._censor_event_txn(txn, event.event_id, pruned_json)
+
+            # We need to invalidate the event cache entry for this event because we
+            # changed its content in the database. We can't call
+            # self._invalidate_cache_and_stream because self.get_event_cache isn't of the
+            # right type.
+            txn.call_after(self._get_event_cache.invalidate, (event.event_id,))
+            # Send that invalidation to replication so that other workers also invalidate
+            # the event cache.
+            self._send_invalidation_to_replication(
+                txn, "_get_event_cache", (event.event_id,)
+            )
+
+        yield self.runInteraction("delete_expired_event", delete_expired_event_txn)
+
+    def _delete_event_expiry_txn(self, txn, event_id):
+        """Delete the expiry timestamp associated with an event ID without deleting the
+        actual event.
+
+        Args:
+            txn (LoggingTransaction): The transaction to use to perform the deletion.
+            event_id (str): The event ID to delete the associated expiry timestamp of.
+        """
+        return self._simple_delete_txn(
+            txn=txn, table="event_expiry", keyvalues={"event_id": event_id}
+        )
+
+    def get_next_event_to_expire(self):
+        """Retrieve the entry with the lowest expiry timestamp in the event_expiry
+        table, or None if there's no more event to expire.
+
+        Returns: Deferred[Optional[Tuple[str, int]]]
+            A tuple containing the event ID as its first element and an expiry timestamp
+            as its second one, if there's at least one row in the event_expiry table.
+            None otherwise.
+        """
+
+        def get_next_event_to_expire_txn(txn):
+            txn.execute(
+                """
+                SELECT event_id, expiry_ts FROM event_expiry
+                ORDER BY expiry_ts ASC LIMIT 1
+                """
+            )
+
+            return txn.fetchone()
+
+        return self.runInteraction(
+            desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
+        )
+
 
 AllNewEventsResult = namedtuple(
     "AllNewEventsResult",
diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql b/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
new file mode 100644
index 0000000000..81a36a8b1d
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
@@ -0,0 +1,21 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_expiry (
+    event_id TEXT PRIMARY KEY,
+    expiry_ts BIGINT NOT NULL
+);
+
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry(expiry_ts);
diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py
new file mode 100644
index 0000000000..5e9c07ebf3
--- /dev/null
+++ b/tests/rest/client/test_ephemeral_message.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.api.constants import EventContentFields, EventTypes
+from synapse.rest import admin
+from synapse.rest.client.v1 import room
+
+from tests import unittest
+
+
+class EphemeralMessageTestCase(unittest.HomeserverTestCase):
+
+    user_id = "@user:test"
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        config = self.default_config()
+
+        config["enable_ephemeral_messages"] = True
+
+        self.hs = self.setup_test_homeserver(config=config)
+        return self.hs
+
+    def prepare(self, reactor, clock, homeserver):
+        self.room_id = self.helper.create_room_as(self.user_id)
+
+    def test_message_expiry_no_delay(self):
+        """Tests that sending a message sent with a m.self_destruct_after field set to the
+        past results in that event being deleted right away.
+        """
+        # Send a message in the room that has expired. From here, the reactor clock is
+        # at 200ms, so 0 is in the past, and even if that wasn't the case and the clock
+        # is at 0ms the code path is the same if the event's expiry timestamp is the
+        # current timestamp.
+        res = self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={
+                "msgtype": "m.text",
+                "body": "hello",
+                EventContentFields.SELF_DESTRUCT_AFTER: 0,
+            },
+        )
+        event_id = res["event_id"]
+
+        # Check that we can't retrieve the content of the event.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertFalse(bool(event_content), event_content)
+
+    def test_message_expiry_delay(self):
+        """Tests that sending a message with a m.self_destruct_after field set to the
+        future results in that event not being deleted right away, but advancing the
+        clock to after that expiry timestamp causes the event to be deleted.
+        """
+        # Send a message in the room that'll expire in 1s.
+        res = self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={
+                "msgtype": "m.text",
+                "body": "hello",
+                EventContentFields.SELF_DESTRUCT_AFTER: self.clock.time_msec() + 1000,
+            },
+        )
+        event_id = res["event_id"]
+
+        # Check that we can retrieve the content of the event before it has expired.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertTrue(bool(event_content), event_content)
+
+        # Advance the clock to after the deletion.
+        self.reactor.advance(1)
+
+        # Check that we can't retrieve the content of the event anymore.
+        event_content = self.get_event(self.room_id, event_id)["content"]
+        self.assertFalse(bool(event_content), event_content)
+
+    def get_event(self, room_id, event_id, expected_code=200):
+        url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
+
+        request, channel = self.make_request("GET", url)
+        self.render(request)
+
+        self.assertEqual(channel.code, expected_code, channel.result)
+
+        return channel.json_body
-- 
cgit 1.4.1


From 418813b205b7b6f8c9d21f639d740fd2e1016ab7 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Tue, 3 Dec 2019 15:27:00 -0500
Subject: apply changes from review

---
 changelog.d/6451.bugfix                                           | 2 +-
 synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6451.bugfix b/changelog.d/6451.bugfix
index e678dc240b..23b67583ec 100644
--- a/changelog.d/6451.bugfix
+++ b/changelog.d/6451.bugfix
@@ -1 +1 @@
-Change the index on the `e2e_cross_signing_signatures` table to be non-unique.
+Fix uploading multiple cross signing signatures for the same user.
diff --git a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
index bee0a5da91..5c5fffcafb 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
+++ b/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
@@ -40,7 +40,8 @@ CREATE TABLE IF NOT EXISTS e2e_cross_signing_signatures (
     signature TEXT NOT NULL
 );
 
-CREATE INDEX e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
+-- replaced by the index created in signing_keys_nonunique_signatures.sql
+-- CREATE UNIQUE INDEX e2e_cross_signing_signatures_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
 
 -- stream of user signature updates
 CREATE TABLE IF NOT EXISTS user_signature_stream (
-- 
cgit 1.4.1


From b62c9db8d77d76a962300aa740d2e61d57dc6888 Mon Sep 17 00:00:00 2001
From: Syam G Krishnan 
Date: Fri, 29 Nov 2019 23:06:44 +0530
Subject: Add changelog file

Signed-off-by: Syam G Krishnan 
---
 changelog.d/6406.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6406.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6406.bugfix b/changelog.d/6406.bugfix
new file mode 100644
index 0000000000..ca9bee084b
--- /dev/null
+++ b/changelog.d/6406.bugfix
@@ -0,0 +1 @@
+Fix bug: TypeError in `register_user()` while using LDAP auth module.
-- 
cgit 1.4.1


From 012087546227e566eb7234faae54ab7674e017de Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 4 Dec 2019 07:38:35 +0000
Subject: Fix exception when a cross-signed device is deleted (#6462)

(hopefully)

... and deobfuscate the relevant bit of code.
---
 changelog.d/6462.bugfix                            |  1 +
 .../storage/data_stores/main/end_to_end_keys.py    | 23 ++++++++++++++++++----
 2 files changed, 20 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6462.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6462.bugfix b/changelog.d/6462.bugfix
new file mode 100644
index 0000000000..c435939526
--- /dev/null
+++ b/changelog.d/6462.bugfix
@@ -0,0 +1 @@
+Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted.
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index d8ad59ad93..643327b57b 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -145,13 +145,28 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         txn.execute(signature_sql, signature_query_params)
         rows = self.cursor_to_dict(txn)
 
+        # add each cross-signing signature to the correct device in the result dict.
         for row in rows:
+            signing_user_id = row["user_id"]
+            signing_key_id = row["key_id"]
             target_user_id = row["target_user_id"]
             target_device_id = row["target_device_id"]
-            if target_user_id in result and target_device_id in result[target_user_id]:
-                result[target_user_id][target_device_id].setdefault(
-                    "signatures", {}
-                ).setdefault(row["user_id"], {})[row["key_id"]] = row["signature"]
+            signature = row["signature"]
+
+            target_user_result = result.get(target_user_id)
+            if not target_user_result:
+                continue
+
+            target_device_result = target_user_result.get(target_device_id)
+            if not target_device_result:
+                # note that target_device_result will be None for deleted devices.
+                continue
+
+            target_device_signatures = target_device_result.setdefault("signatures", {})
+            signing_user_signatures = target_device_signatures.setdefault(
+                signing_user_id, {}
+            )
+            signing_user_signatures[signing_key_id] = signature
 
         log_kv(result)
         return result
-- 
cgit 1.4.1


From cb0aeb147e3b3defc27866ad0e4982e63600a7ee Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Wed, 4 Dec 2019 09:46:16 +0000
Subject: privacy by default for room dir (#6355)

Ensure that the the default settings for the room directory are that the it is hidden from public view by default.
---
 UPGRADE.rst                               | 17 ++++++++++
 changelog.d/6354.feature                  |  1 +
 docs/sample_config.yaml                   | 13 ++++----
 synapse/config/server.py                  | 26 +++++++++-------
 tests/federation/transport/test_server.py | 52 +++++++++++++++++++++++++++++++
 5 files changed, 91 insertions(+), 18 deletions(-)
 create mode 100644 changelog.d/6354.feature
 create mode 100644 tests/federation/transport/test_server.py

(limited to 'changelog.d')

diff --git a/UPGRADE.rst b/UPGRADE.rst
index 5ebf16a73e..d9020f2663 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -75,6 +75,23 @@ for example:
      wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
      dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
 
+Upgrading to v1.7.0
+===================
+
+In an attempt to configure Synapse in a privacy preserving way, the default
+behaviours of ``allow_public_rooms_without_auth`` and
+``allow_public_rooms_over_federation`` have been inverted. This means that by
+default, only authenticated users querying the Client/Server API will be able
+to query the room directory, and relatedly that the server will not share
+room directory information with other servers over federation.
+
+If your installation does not explicitly set these settings one way or the other
+and you want either setting to be ``true`` then it will necessary to update
+your homeserver configuration file accordingly.
+
+For more details on the surrounding context see our `explainer
+`_.
+
 
 Upgrading to v1.5.0
 ===================
diff --git a/changelog.d/6354.feature b/changelog.d/6354.feature
new file mode 100644
index 0000000000..fed9db884b
--- /dev/null
+++ b/changelog.d/6354.feature
@@ -0,0 +1 @@
+Configure privacy preserving settings by default for the room directory.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index c7391f0c48..10664ae8f7 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -54,15 +54,16 @@ pid_file: DATADIR/homeserver.pid
 #
 #require_auth_for_profile_requests: true
 
-# If set to 'false', requires authentication to access the server's public rooms
-# directory through the client API. Defaults to 'true'.
+# If set to 'true', removes the need for authentication to access the server's
+# public rooms directory through the client API, meaning that anyone can
+# query the room directory. Defaults to 'false'.
 #
-#allow_public_rooms_without_auth: false
+#allow_public_rooms_without_auth: true
 
-# If set to 'false', forbids any other homeserver to fetch the server's public
-# rooms directory via federation. Defaults to 'true'.
+# If set to 'true', allows any other homeserver to fetch the server's public
+# rooms directory via federation. Defaults to 'false'.
 #
-#allow_public_rooms_over_federation: false
+#allow_public_rooms_over_federation: true
 
 # The default room version for newly created rooms.
 #
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 837fbe1582..a4bef00936 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -118,15 +118,16 @@ class ServerConfig(Config):
             self.allow_public_rooms_without_auth = False
             self.allow_public_rooms_over_federation = False
         else:
-            # If set to 'False', requires authentication to access the server's public
-            # rooms directory through the client API. Defaults to 'True'.
+            # If set to 'true', removes the need for authentication to access the server's
+            # public rooms directory through the client API, meaning that anyone can
+            # query the room directory. Defaults to 'false'.
             self.allow_public_rooms_without_auth = config.get(
-                "allow_public_rooms_without_auth", True
+                "allow_public_rooms_without_auth", False
             )
-            # If set to 'False', forbids any other homeserver to fetch the server's public
-            # rooms directory via federation. Defaults to 'True'.
+            # If set to 'true', allows any other homeserver to fetch the server's public
+            # rooms directory via federation. Defaults to 'false'.
             self.allow_public_rooms_over_federation = config.get(
-                "allow_public_rooms_over_federation", True
+                "allow_public_rooms_over_federation", False
             )
 
         default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
@@ -620,15 +621,16 @@ class ServerConfig(Config):
         #
         #require_auth_for_profile_requests: true
 
-        # If set to 'false', requires authentication to access the server's public rooms
-        # directory through the client API. Defaults to 'true'.
+        # If set to 'true', removes the need for authentication to access the server's
+        # public rooms directory through the client API, meaning that anyone can
+        # query the room directory. Defaults to 'false'.
         #
-        #allow_public_rooms_without_auth: false
+        #allow_public_rooms_without_auth: true
 
-        # If set to 'false', forbids any other homeserver to fetch the server's public
-        # rooms directory via federation. Defaults to 'true'.
+        # If set to 'true', allows any other homeserver to fetch the server's public
+        # rooms directory via federation. Defaults to 'false'.
         #
-        #allow_public_rooms_over_federation: false
+        #allow_public_rooms_over_federation: true
 
         # The default room version for newly created rooms.
         #
diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py
new file mode 100644
index 0000000000..27d83bb7d9
--- /dev/null
+++ b/tests/federation/transport/test_server.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from synapse.config.ratelimiting import FederationRateLimitConfig
+from synapse.federation.transport import server
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+from tests import unittest
+from tests.unittest import override_config
+
+
+class RoomDirectoryFederationTests(unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, homeserver):
+        class Authenticator(object):
+            def authenticate_request(self, request, content):
+                return defer.succeed("otherserver.nottld")
+
+        ratelimiter = FederationRateLimiter(clock, FederationRateLimitConfig())
+        server.register_servlets(
+            homeserver, self.resource, Authenticator(), ratelimiter
+        )
+
+    @override_config({"allow_public_rooms_over_federation": False})
+    def test_blocked_public_room_list_over_federation(self):
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/publicRooms"
+        )
+        self.render(request)
+        self.assertEquals(403, channel.code)
+
+    @override_config({"allow_public_rooms_over_federation": True})
+    def test_open_public_room_list_over_federation(self):
+        request, channel = self.make_request(
+            "GET", "/_matrix/federation/v1/publicRooms"
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code)
-- 
cgit 1.4.1


From 768b84409b6ad516796d6452394ff1fe32b9abb1 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 4 Dec 2019 10:45:56 +0000
Subject: Update changelog.d/6449.bugfix

Co-Authored-By: Erik Johnston 
---
 changelog.d/6449.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6449.bugfix b/changelog.d/6449.bugfix
index ced16544c9..002f33c450 100644
--- a/changelog.d/6449.bugfix
+++ b/changelog.d/6449.bugfix
@@ -1 +1 @@
-Fix assumed missing state_groups index in synapse_port_db.
\ No newline at end of file
+Fix error when using synapse_port_db on a vanilla synapse db.
-- 
cgit 1.4.1


From c1ae453932da8b5761cd1644b4b0bbaa039ae6ab Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 4 Dec 2019 12:21:48 +0000
Subject: Markdownification and other fixes to CONTRIBUTING (#6461)

---
 .github/PULL_REQUEST_TEMPLATE.md |   8 +-
 CONTRIBUTING.md                  | 210 +++++++++++++++++++++++++++++++++++++++
 CONTRIBUTING.rst                 | 206 --------------------------------------
 changelog.d/6461.doc             |   1 +
 4 files changed, 215 insertions(+), 210 deletions(-)
 create mode 100644 CONTRIBUTING.md
 delete mode 100644 CONTRIBUTING.rst
 create mode 100644 changelog.d/6461.doc

(limited to 'changelog.d')

diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 8939fda67d..11fb05ca96 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,8 +1,8 @@
 ### Pull Request Checklist
 
-
+
 
 * [ ] Pull request is based on the develop branch
-* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
-* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
-* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#code-style))
+* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
+* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
+* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..c0091346f3
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,210 @@
+# Contributing code to Matrix
+
+Everyone is welcome to contribute code to Matrix
+(https://github.com/matrix-org), provided that they are willing to license
+their contributions under the same license as the project itself. We follow a
+simple 'inbound=outbound' model for contributions: the act of submitting an
+'inbound' contribution means that the contributor agrees to license the code
+under the same terms as the project's overall 'outbound' license - in our
+case, this is almost always Apache Software License v2 (see [LICENSE](LICENSE)).
+
+## How to contribute
+
+The preferred and easiest way to contribute changes to Matrix is to fork the
+relevant project on github, and then [create a pull request](
+https://help.github.com/articles/using-pull-requests/) to ask us to pull
+your changes into our repo.
+
+**The single biggest thing you need to know is: please base your changes on
+the develop branch - *not* master.**
+
+We use the master branch to track the most recent release, so that folks who
+blindly clone the repo and automatically check out master get something that
+works. Develop is the unstable branch where all the development actually
+happens: the workflow is that contributors should fork the develop branch to
+make a 'feature' branch for a particular contribution, and then make a pull
+request to merge this back into the matrix.org 'official' develop branch. We
+use github's pull request workflow to review the contribution, and either ask
+you to make any refinements needed or merge it and make them ourselves. The
+changes will then land on master when we next do a release.
+
+We use [Buildkite](https://buildkite.com/matrix-dot-org/synapse) for continuous
+integration. If your change breaks the build, this will be shown in GitHub, so
+please keep an eye on the pull request for feedback.
+
+To run unit tests in a local development environment, you can use:
+
+- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
+  for SQLite-backed Synapse on Python 3.5.
+- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
+- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
+  (requires a running local PostgreSQL with access to create databases).
+- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
+  (requires Docker). Entirely self-contained, recommended if you don't want to
+  set up PostgreSQL yourself.
+
+Docker images are available for running the integration tests (SyTest) locally,
+see the [documentation in the SyTest repo](
+https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
+information.
+
+## Code style
+
+All Matrix projects have a well-defined code-style - and sometimes we've even
+got as far as documenting it... For instance, synapse's code style doc lives
+[here](docs/code_style.md).
+
+To facilitate meeting these criteria you can run `scripts-dev/lint.sh`
+locally. Since this runs the tools listed in the above document, you'll need
+python 3.6 and to install each tool:
+
+```
+# Install the dependencies
+pip install -U black flake8 isort
+
+# Run the linter script
+./scripts-dev/lint.sh
+```
+
+**Note that the script does not just test/check, but also reformats code, so you
+may wish to ensure any new code is committed first**. By default this script
+checks all files and can take some time; if you alter only certain files, you
+might wish to specify paths as arguments to reduce the run-time:
+
+```
+./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
+```
+
+Before pushing new changes, ensure they don't produce linting errors. Commit any
+files that were corrected.
+
+Please ensure your changes match the cosmetic style of the existing project,
+and **never** mix cosmetic and functional changes in the same commit, as it
+makes it horribly hard to review otherwise.
+
+
+## Changelog
+
+All changes, even minor ones, need a corresponding changelog / newsfragment
+entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
+
+To create a changelog entry, make a new file in the `changelog.d` directory named
+in the format of `PRnumber.type`. The type can be one of the following:
+
+* `feature`
+* `bugfix`
+* `docker` (for updates to the Docker image)
+* `doc` (for updates to the documentation)
+* `removal` (also used for deprecations)
+* `misc` (for internal-only changes)
+
+The content of the file is your changelog entry, which should be a short
+description of your change in the same style as the rest of our [changelog](
+https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
+contain Markdown formatting, and should end with a full stop ('.') for
+consistency.
+
+Adding credits to the changelog is encouraged, we value your
+contributions and would like to have you shouted out in the release notes!
+
+For example, a fix in PR #1234 would have its changelog entry in
+`changelog.d/1234.bugfix`, and contain content like "The security levels of
+Florbs are now validated when received over federation. Contributed by Jane
+Matrix.".
+
+## Debian changelog
+
+Changes which affect the debian packaging files (in `debian`) are an
+exception.
+
+In this case, you will need to add an entry to the debian changelog for the
+next release. For this, run the following command:
+
+```
+dch
+```
+
+This will make up a new version number (if there isn't already an unreleased
+version in flight), and open an editor where you can add a new changelog entry.
+(Our release process will ensure that the version number and maintainer name is
+corrected for the release.)
+
+If your change affects both the debian packaging *and* files outside the debian
+directory, you will need both a regular newsfragment *and* an entry in the
+debian changelog. (Though typically such changes should be submitted as two
+separate pull requests.)
+
+## Sign off
+
+In order to have a concrete record that your contribution is intentional
+and you agree to license it under the same terms as the project's license, we've adopted the
+same lightweight approach that the Linux Kernel
+[submitting patches process](
+https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
+[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
+projects use: the DCO (Developer Certificate of Origin:
+http://developercertificate.org/). This is a simple declaration that you wrote
+the contribution or otherwise have the right to contribute it to Matrix:
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
+```
+
+If you agree to this for your contribution, then all that's needed is to
+include the line in your commit or pull request comment:
+
+```
+Signed-off-by: Your Name 
+```
+
+We accept contributions under a legally identifiable name, such as
+your name on government documentation or common-law names (names
+claimed by legitimate usage or repute). Unfortunately, we cannot
+accept anonymous contributions at this time.
+
+Git allows you to add this signoff automatically when using the `-s`
+flag to `git commit`, which uses the name and email set in your
+`user.name` and `user.email` git configs.
+
+## Conclusion
+
+That's it! Matrix is a very open and collaborative project as you might expect
+given our obsession with open communication. If we're going to successfully
+matrix together all the fragmented communication technologies out there we are
+reliant on contributions and collaboration from the community to do so. So
+please get involved - and we hope you have as much fun hacking on Matrix as we
+do!
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
deleted file mode 100644
index df81f6e54f..0000000000
--- a/CONTRIBUTING.rst
+++ /dev/null
@@ -1,206 +0,0 @@
-Contributing code to Matrix
-===========================
-
-Everyone is welcome to contribute code to Matrix
-(https://github.com/matrix-org), provided that they are willing to license
-their contributions under the same license as the project itself. We follow a
-simple 'inbound=outbound' model for contributions: the act of submitting an
-'inbound' contribution means that the contributor agrees to license the code
-under the same terms as the project's overall 'outbound' license - in our
-case, this is almost always Apache Software License v2 (see LICENSE).
-
-How to contribute
-~~~~~~~~~~~~~~~~~
-
-The preferred and easiest way to contribute changes to Matrix is to fork the
-relevant project on github, and then create a pull request to ask us to pull
-your changes into our repo
-(https://help.github.com/articles/using-pull-requests/)
-
-**The single biggest thing you need to know is: please base your changes on
-the develop branch - /not/ master.**
-
-We use the master branch to track the most recent release, so that folks who
-blindly clone the repo and automatically check out master get something that
-works. Develop is the unstable branch where all the development actually
-happens: the workflow is that contributors should fork the develop branch to
-make a 'feature' branch for a particular contribution, and then make a pull
-request to merge this back into the matrix.org 'official' develop branch. We
-use github's pull request workflow to review the contribution, and either ask
-you to make any refinements needed or merge it and make them ourselves. The
-changes will then land on master when we next do a release.
-
-We use `Buildkite `_ for
-continuous integration.  Buildkite builds need to be authorised by a
-maintainer. If your change breaks the build, this will be shown in GitHub, so
-please keep an eye on the pull request for feedback.
-
-To run unit tests in a local development environment, you can use:
-
-- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
-  for SQLite-backed Synapse on Python 3.5.
-- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
-- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
-  (requires a running local PostgreSQL with access to create databases).
-- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
-  (requires Docker). Entirely self-contained, recommended if you don't want to
-  set up PostgreSQL yourself.
-
-Docker images are available for running the integration tests (SyTest) locally,
-see the `documentation in the SyTest repo
-`_ for more
-information.
-
-Code style
-~~~~~~~~~~
-
-All Matrix projects have a well-defined code-style - and sometimes we've even
-got as far as documenting it... For instance, synapse's code style doc lives
-at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
-
-To facilitate meeting these criteria you can run ``scripts-dev/lint.sh``
-locally. Since this runs the tools listed in the above document, you'll need
-python 3.6 and to install each tool. **Note that the script does not just
-test/check, but also reformats code, so you may wish to ensure any new code is
-committed first**. By default this script checks all files and can take some
-time; if you alter only certain files, you might wish to specify paths as
-arguments to reduce the run-time.
-
-Please ensure your changes match the cosmetic style of the existing project,
-and **never** mix cosmetic and functional changes in the same commit, as it
-makes it horribly hard to review otherwise.
-
-Before doing a commit, ensure the changes you've made don't produce
-linting errors. You can do this by running the linters as follows. Ensure to
-commit any files that were corrected.
-
-::
-    # Install the dependencies
-    pip install -U black flake8 isort
-    
-    # Run the linter script
-    ./scripts-dev/lint.sh
-
-Changelog
-~~~~~~~~~
-
-All changes, even minor ones, need a corresponding changelog / newsfragment
-entry. These are managed by Towncrier
-(https://github.com/hawkowl/towncrier).
-
-To create a changelog entry, make a new file in the ``changelog.d`` file named
-in the format of ``PRnumber.type``. The type can be one of the following:
-
-* ``feature``.
-* ``bugfix``.
-* ``docker`` (for updates to the Docker image).
-* ``doc`` (for updates to the documentation).
-* ``removal`` (also used for deprecations).
-* ``misc`` (for internal-only changes).
-
-The content of the file is your changelog entry, which should be a short
-description of your change in the same style as the rest of our `changelog
-`_. The file can
-contain Markdown formatting, and should end with a full stop ('.') for
-consistency.
-
-Adding credits to the changelog is encouraged, we value your
-contributions and would like to have you shouted out in the release notes!
-
-For example, a fix in PR #1234 would have its changelog entry in
-``changelog.d/1234.bugfix``, and contain content like "The security levels of
-Florbs are now validated when recieved over federation. Contributed by Jane
-Matrix.".
-
-Debian changelog
-----------------
-
-Changes which affect the debian packaging files (in ``debian``) are an
-exception.
-
-In this case, you will need to add an entry to the debian changelog for the
-next release. For this, run the following command::
-
-  dch
-
-This will make up a new version number (if there isn't already an unreleased
-version in flight), and open an editor where you can add a new changelog entry.
-(Our release process will ensure that the version number and maintainer name is
-corrected for the release.)
-
-If your change affects both the debian packaging *and* files outside the debian
-directory, you will need both a regular newsfragment *and* an entry in the
-debian changelog. (Though typically such changes should be submitted as two
-separate pull requests.)
-
-Sign off
-~~~~~~~~
-
-In order to have a concrete record that your contribution is intentional
-and you agree to license it under the same terms as the project's license, we've adopted the
-same lightweight approach that the Linux Kernel
-`submitting patches process `_, Docker
-(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
-projects use: the DCO (Developer Certificate of Origin:
-http://developercertificate.org/). This is a simple declaration that you wrote
-the contribution or otherwise have the right to contribute it to Matrix::
-
-    Developer Certificate of Origin
-    Version 1.1
-
-    Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-    660 York Street, Suite 102,
-    San Francisco, CA 94110 USA
-
-    Everyone is permitted to copy and distribute verbatim copies of this
-    license document, but changing it is not allowed.
-
-    Developer's Certificate of Origin 1.1
-
-    By making a contribution to this project, I certify that:
-
-    (a) The contribution was created in whole or in part by me and I
-        have the right to submit it under the open source license
-        indicated in the file; or
-
-    (b) The contribution is based upon previous work that, to the best
-        of my knowledge, is covered under an appropriate open source
-        license and I have the right under that license to submit that
-        work with modifications, whether created in whole or in part
-        by me, under the same open source license (unless I am
-        permitted to submit under a different license), as indicated
-        in the file; or
-
-    (c) The contribution was provided directly to me by some other
-        person who certified (a), (b) or (c) and I have not modified
-        it.
-
-    (d) I understand and agree that this project and the contribution
-        are public and that a record of the contribution (including all
-        personal information I submit with it, including my sign-off) is
-        maintained indefinitely and may be redistributed consistent with
-        this project or the open source license(s) involved.
-
-If you agree to this for your contribution, then all that's needed is to
-include the line in your commit or pull request comment::
-
-    Signed-off-by: Your Name 
-
-We accept contributions under a legally identifiable name, such as
-your name on government documentation or common-law names (names
-claimed by legitimate usage or repute). Unfortunately, we cannot
-accept anonymous contributions at this time.
-
-Git allows you to add this signoff automatically when using the ``-s``
-flag to ``git commit``, which uses the name and email set in your
-``user.name`` and ``user.email`` git configs.
-
-Conclusion
-~~~~~~~~~~
-
-That's it!  Matrix is a very open and collaborative project as you might expect
-given our obsession with open communication.  If we're going to successfully
-matrix together all the fragmented communication technologies out there we are
-reliant on contributions and collaboration from the community to do so.  So
-please get involved - and we hope you have as much fun hacking on Matrix as we
-do!
diff --git a/changelog.d/6461.doc b/changelog.d/6461.doc
new file mode 100644
index 0000000000..1502fa2855
--- /dev/null
+++ b/changelog.d/6461.doc
@@ -0,0 +1 @@
+Convert CONTRIBUTING.rst to markdown (among other small fixes).
\ No newline at end of file
-- 
cgit 1.4.1


From 08a436ecb25de2c4c8f2daf423bfcaf72e985143 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 4 Dec 2019 14:18:46 +0000
Subject: Incorporate review

---
 changelog.d/6329.bugfix  | 1 +
 changelog.d/6329.feature | 1 -
 synapse/handlers/room.py | 3 +--
 3 files changed, 2 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6329.bugfix
 delete mode 100644 changelog.d/6329.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6329.bugfix b/changelog.d/6329.bugfix
new file mode 100644
index 0000000000..e558d13b7d
--- /dev/null
+++ b/changelog.d/6329.bugfix
@@ -0,0 +1 @@
+Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests.
\ No newline at end of file
diff --git a/changelog.d/6329.feature b/changelog.d/6329.feature
deleted file mode 100644
index c27dbb06a4..0000000000
--- a/changelog.d/6329.feature
+++ /dev/null
@@ -1 +0,0 @@
-Filter `state`, `events_before` and `events_after` in `/context` requests.
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3148df0de9..fd3ea8daf8 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -908,12 +908,11 @@ class RoomContextHandler(object):
             [last_event_id], state_filter=state_filter
         )
 
-        # Apply the filter on state events.
         state_events = list(state[last_event_id].values())
         if event_filter:
             state_events = event_filter.filter(state_events)
 
-        results["state"] = list(state_events)
+        results["state"] = state_events
 
         # We use a dummy token here as we only care about the room portion of
         # the token, which we replace.
-- 
cgit 1.4.1


From a785a2febe6783bfa800504c6750028bc61c84ea Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 3 Dec 2019 14:32:00 +0000
Subject: Newsfile

---
 changelog.d/6454.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6454.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6454.misc b/changelog.d/6454.misc
new file mode 100644
index 0000000000..9e5259157c
--- /dev/null
+++ b/changelog.d/6454.misc
@@ -0,0 +1 @@
+Move data store specific code out of `SQLBaseStore`.
-- 
cgit 1.4.1


From 685fae1ba5a173279faf2b89cad62798c60d3aec Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 4 Dec 2019 16:25:34 +0000
Subject: Newsfile

---
 changelog.d/6464.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6464.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6464.misc b/changelog.d/6464.misc
new file mode 100644
index 0000000000..bd65276ef6
--- /dev/null
+++ b/changelog.d/6464.misc
@@ -0,0 +1 @@
+Prepare SQLBaseStore functions being moved out of the stores.
-- 
cgit 1.4.1


From e203874caaae2a378ccbb6b827b6847b3d9a06b8 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 4 Dec 2019 17:27:32 +0000
Subject: get rid of (most of) have_events from
 _update_auth_events_and_context_for_auth (#6468)

have_events was a map from event_id to rejection reason (or None) for events
which are in our local database. It was used as filter on the list of
event_ids being passed into get_events_as_list. However, since
get_events_as_list will ignore any event_ids that are unknown or rejected, we
can equivalently just leave it to get_events_as_list to do the filtering.

That means that we don't have to keep `have_events` up-to-date, and can use
`have_seen_events` instead of `get_seen_events_with_rejection` in the one place
we do need it.
---
 changelog.d/6468.misc                             |  1 +
 synapse/handlers/federation.py                    | 62 +++++++++--------------
 synapse/storage/data_stores/main/events_worker.py | 34 -------------
 3 files changed, 25 insertions(+), 72 deletions(-)
 create mode 100644 changelog.d/6468.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6468.misc b/changelog.d/6468.misc
new file mode 100644
index 0000000000..d9a44389b9
--- /dev/null
+++ b/changelog.d/6468.misc
@@ -0,0 +1 @@
+Refactor some code in the event authentication path for clarity.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d9d0cd9eef..7784b80b77 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -2122,14 +2122,9 @@ class FederationHandler(BaseHandler):
         #
         # we start by checking if they are in the store, and then try calling /event_auth/.
         if missing_auth:
-            # TODO: can we use store.have_seen_events here instead?
-            have_events = yield self.store.get_seen_events_with_rejections(missing_auth)
-            logger.debug("Found events %s in the store", have_events)
-            missing_auth.difference_update(have_events.keys())
-        else:
-            have_events = {}
-
-        have_events.update({e.event_id: "" for e in auth_events.values()})
+            have_events = yield self.store.have_seen_events(missing_auth)
+            logger.debug("Events %s are in the store", have_events)
+            missing_auth.difference_update(have_events)
 
         if missing_auth:
             # If we don't have all the auth events, we need to get them.
@@ -2175,9 +2170,6 @@ class FederationHandler(BaseHandler):
                     except AuthError:
                         pass
 
-                have_events = yield self.store.get_seen_events_with_rejections(
-                    event.auth_event_ids()
-                )
             except Exception:
                 logger.exception("Failed to get auth chain")
 
@@ -2207,39 +2199,33 @@ class FederationHandler(BaseHandler):
         # idea of them.
 
         room_version = yield self.store.get_room_version(event.room_id)
-        different_event_ids = [
-            d for d in different_auth if d in have_events and not have_events[d]
-        ]
 
-        if different_event_ids:
-            # XXX: currently this checks for redactions but I'm not convinced that is
-            # necessary?
-            different_events = yield self.store.get_events_as_list(different_event_ids)
+        # XXX: currently this checks for redactions but I'm not convinced that is
+        # necessary?
+        different_events = yield self.store.get_events_as_list(different_auth)
 
-            local_view = dict(auth_events)
-            remote_view = dict(auth_events)
-            remote_view.update({(d.type, d.state_key): d for d in different_events})
+        local_view = dict(auth_events)
+        remote_view = dict(auth_events)
+        remote_view.update({(d.type, d.state_key): d for d in different_events})
 
-            new_state = yield self.state_handler.resolve_events(
-                room_version,
-                [list(local_view.values()), list(remote_view.values())],
-                event,
-            )
+        new_state = yield self.state_handler.resolve_events(
+            room_version, [list(local_view.values()), list(remote_view.values())], event
+        )
 
-            logger.info(
-                "After state res: updating auth_events with new state %s",
-                {
-                    (d.type, d.state_key): d.event_id
-                    for d in new_state.values()
-                    if auth_events.get((d.type, d.state_key)) != d
-                },
-            )
+        logger.info(
+            "After state res: updating auth_events with new state %s",
+            {
+                (d.type, d.state_key): d.event_id
+                for d in new_state.values()
+                if auth_events.get((d.type, d.state_key)) != d
+            },
+        )
 
-            auth_events.update(new_state)
+        auth_events.update(new_state)
 
-            context = yield self._update_context_for_auth_events(
-                event, context, auth_events
-            )
+        context = yield self._update_context_for_auth_events(
+            event, context, auth_events
+        )
 
         return context
 
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index e782e8f481..eaddca65b7 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -783,40 +783,6 @@ class EventsWorkerStore(SQLBaseStore):
             yield self.runInteraction("have_seen_events", have_seen_events_txn, chunk)
         return results
 
-    def get_seen_events_with_rejections(self, event_ids):
-        """Given a list of event ids, check if we rejected them.
-
-        Args:
-            event_ids (list[str])
-
-        Returns:
-            Deferred[dict[str, str|None):
-                Has an entry for each event id we already have seen. Maps to
-                the rejected reason string if we rejected the event, else maps
-                to None.
-        """
-        if not event_ids:
-            return defer.succeed({})
-
-        def f(txn):
-            sql = (
-                "SELECT e.event_id, reason FROM events as e "
-                "LEFT JOIN rejections as r ON e.event_id = r.event_id "
-                "WHERE e.event_id = ?"
-            )
-
-            res = {}
-            for event_id in event_ids:
-                txn.execute(sql, (event_id,))
-                row = txn.fetchone()
-                if row:
-                    _, rejected = row
-                    res[event_id] = rejected
-
-            return res
-
-        return self.runInteraction("get_seen_events_with_rejections", f)
-
     def _get_total_state_event_counts_txn(self, txn, room_id):
         """
         See get_total_state_event_counts.
-- 
cgit 1.4.1


From 02c1f36ccd5676922f718e82bb2bce0e9cb77ca8 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 4 Dec 2019 17:49:28 +0000
Subject: Changelog

---
 changelog.d/6470.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6470.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6470.bugfix b/changelog.d/6470.bugfix
new file mode 100644
index 0000000000..c08b34c14c
--- /dev/null
+++ b/changelog.d/6470.bugfix
@@ -0,0 +1 @@
+Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process.
-- 
cgit 1.4.1


From 6dcd6c40a05b55dad420f7dce13f145e09992f00 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 5 Dec 2019 10:51:26 +0000
Subject: Newsfile

---
 changelog.d/6469.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6469.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6469.misc b/changelog.d/6469.misc
new file mode 100644
index 0000000000..32216b9046
--- /dev/null
+++ b/changelog.d/6469.misc
@@ -0,0 +1 @@
+Move per database functionality out of the data stores and into a dedicated `Database` class.
-- 
cgit 1.4.1


From ba7af15d4eb88712742edbf129667996cf3a59b3 Mon Sep 17 00:00:00 2001
From: Clifford Garwood II 
Date: Thu, 5 Dec 2019 08:13:47 -0500
Subject: Modify systemd unit file reference to align with installation
 instruction (#6369)

Signed-off-by: Clifford Garwood II cliff@cigii.com
---
 changelog.d/6369.doc                   |  1 +
 contrib/systemd/README.md              | 17 +++++++++++++++++
 contrib/systemd/matrix-synapse.service |  7 +++++--
 3 files changed, 23 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6369.doc
 create mode 100644 contrib/systemd/README.md

(limited to 'changelog.d')

diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc
new file mode 100644
index 0000000000..6db351d7db
--- /dev/null
+++ b/changelog.d/6369.doc
@@ -0,0 +1 @@
+Update documentation and variables in user contributed systemd reference file.
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
new file mode 100644
index 0000000000..5d42b3464f
--- /dev/null
+++ b/contrib/systemd/README.md
@@ -0,0 +1,17 @@
+# Setup Synapse with Systemd
+This is a setup for managing synapse with a user contributed systemd unit 
+file. It provides a `matrix-synapse` systemd unit file that should be tailored 
+to accommodate your installation in accordance with the installation 
+instructions provided in [installation instructions](../../INSTALL.md).
+
+## Setup
+1. Under the service section, ensure the `User` variable matches which user
+you installed synapse under and wish to run it as. 
+2. Under the service section, ensure the `WorkingDirectory` variable matches
+where you have installed synapse.
+3. Under the service section, ensure the `ExecStart` variable matches the
+appropriate locations of your installation.
+4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
+5. Start Synapse: `sudo systemctl start matrix-synapse`
+6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
+7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
index 38d369ea3d..bd492544b6 100644
--- a/contrib/systemd/matrix-synapse.service
+++ b/contrib/systemd/matrix-synapse.service
@@ -4,8 +4,11 @@
 #    systemctl enable matrix-synapse
 #    systemctl start matrix-synapse
 #
+# This assumes that Synapse has been installed by a user named
+# synapse.
+#
 # This assumes that Synapse has been installed in a virtualenv in
-# /opt/synapse/env.
+# the user's home directory: `/home/synapse/synapse/env`.
 #
 # **NOTE:** This is an example service file that may change in the future. If you
 # wish to use this please copy rather than symlink it.
@@ -23,7 +26,7 @@ User=synapse
 Group=nogroup
 
 WorkingDirectory=/opt/synapse
-ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
+ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
 SyslogIdentifier=matrix-synapse
 
 # adjust the cache factor if necessary
-- 
cgit 1.4.1


From e1f4c83f41bf6f06bef3d160eb94eacabe59eff1 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 5 Dec 2019 14:14:45 +0000
Subject: Sanity-check the rooms of auth events before pulling them in. (#6472)

---
 changelog.d/6472.bugfix        |  1 +
 synapse/handlers/federation.py | 34 +++++++++++++++++++++++++---------
 2 files changed, 26 insertions(+), 9 deletions(-)
 create mode 100644 changelog.d/6472.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6472.bugfix b/changelog.d/6472.bugfix
new file mode 100644
index 0000000000..598efb79fc
--- /dev/null
+++ b/changelog.d/6472.bugfix
@@ -0,0 +1 @@
+Improve sanity-checking when receiving events over federation.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 7784b80b77..f5d04cdf91 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -2195,21 +2195,37 @@ class FederationHandler(BaseHandler):
             different_auth,
         )
 
-        # now we state-resolve between our own idea of the auth events, and the remote's
-        # idea of them.
-
-        room_version = yield self.store.get_room_version(event.room_id)
-
         # XXX: currently this checks for redactions but I'm not convinced that is
         # necessary?
         different_events = yield self.store.get_events_as_list(different_auth)
 
-        local_view = dict(auth_events)
-        remote_view = dict(auth_events)
-        remote_view.update({(d.type, d.state_key): d for d in different_events})
+        for d in different_events:
+            if d.room_id != event.room_id:
+                logger.warning(
+                    "Event %s refers to auth_event %s which is in a different room",
+                    event.event_id,
+                    d.event_id,
+                )
+
+                # don't attempt to resolve the claimed auth events against our own
+                # in this case: just use our own auth events.
+                #
+                # XXX: should we reject the event in this case? It feels like we should,
+                # but then shouldn't we also do so if we've failed to fetch any of the
+                # auth events?
+                return context
 
+        # now we state-resolve between our own idea of the auth events, and the remote's
+        # idea of them.
+
+        local_state = auth_events.values()
+        remote_auth_events = dict(auth_events)
+        remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
+        remote_state = remote_auth_events.values()
+
+        room_version = yield self.store.get_room_version(event.room_id)
         new_state = yield self.state_handler.resolve_events(
-            room_version, [list(local_view.values()), list(remote_view.values())], event
+            room_version, (local_state, remote_state), event
         )
 
         logger.info(
-- 
cgit 1.4.1


From 63d6ad1064c1a5fe23da3b6b64474a2b211f5eea Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 5 Dec 2019 15:02:35 +0000
Subject: Stronger typing in the federation handler (#6480)

replace the event_info dict with an attrs thing
---
 changelog.d/6480.misc          |  1 +
 synapse/handlers/federation.py | 81 +++++++++++++++++++++++++++++-------------
 2 files changed, 58 insertions(+), 24 deletions(-)
 create mode 100644 changelog.d/6480.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6480.misc b/changelog.d/6480.misc
new file mode 100644
index 0000000000..d9a44389b9
--- /dev/null
+++ b/changelog.d/6480.misc
@@ -0,0 +1 @@
+Refactor some code in the event authentication path for clarity.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index f5d04cdf91..bc26921768 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -19,11 +19,13 @@
 
 import itertools
 import logging
+from typing import Dict, Iterable, Optional, Sequence, Tuple
 
 import six
 from six import iteritems, itervalues
 from six.moves import http_client, zip
 
+import attr
 from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import verify_signed_json
 from unpaddedbase64 import decode_base64
@@ -45,6 +47,7 @@ from synapse.api.errors import (
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
 from synapse.crypto.event_signing import compute_event_signature
 from synapse.event_auth import auth_types_for_event
+from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.logging.context import (
@@ -72,6 +75,23 @@ from ._base import BaseHandler
 logger = logging.getLogger(__name__)
 
 
+@attr.s
+class _NewEventInfo:
+    """Holds information about a received event, ready for passing to _handle_new_events
+
+    Attributes:
+        event: the received event
+
+        state: the state at that event
+
+        auth_events: the auth_event map for that event
+    """
+
+    event = attr.ib(type=EventBase)
+    state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
+    auth_events = attr.ib(type=Optional[Dict[Tuple[str, str], EventBase]], default=None)
+
+
 def shortstr(iterable, maxitems=5):
     """If iterable has maxitems or fewer, return the stringification of a list
     containing those items.
@@ -597,14 +617,14 @@ class FederationHandler(BaseHandler):
                     for e in auth_chain
                     if e.event_id in auth_ids or e.type == EventTypes.Create
                 }
-                event_infos.append({"event": e, "auth_events": auth})
+                event_infos.append(_NewEventInfo(event=e, auth_events=auth))
                 seen_ids.add(e.event_id)
 
             logger.info(
                 "[%s %s] persisting newly-received auth/state events %s",
                 room_id,
                 event_id,
-                [e["event"].event_id for e in event_infos],
+                [e.event.event_id for e in event_infos],
             )
             yield self._handle_new_events(origin, event_infos)
 
@@ -795,9 +815,9 @@ class FederationHandler(BaseHandler):
 
             a.internal_metadata.outlier = True
             ev_infos.append(
-                {
-                    "event": a,
-                    "auth_events": {
+                _NewEventInfo(
+                    event=a,
+                    auth_events={
                         (
                             auth_events[a_id].type,
                             auth_events[a_id].state_key,
@@ -805,7 +825,7 @@ class FederationHandler(BaseHandler):
                         for a_id in a.auth_event_ids()
                         if a_id in auth_events
                     },
-                }
+                )
             )
 
         # Step 1b: persist the events in the chunk we fetched state for (i.e.
@@ -817,10 +837,10 @@ class FederationHandler(BaseHandler):
             assert not ev.internal_metadata.is_outlier()
 
             ev_infos.append(
-                {
-                    "event": ev,
-                    "state": events_to_state[e_id],
-                    "auth_events": {
+                _NewEventInfo(
+                    event=ev,
+                    state=events_to_state[e_id],
+                    auth_events={
                         (
                             auth_events[a_id].type,
                             auth_events[a_id].state_key,
@@ -828,7 +848,7 @@ class FederationHandler(BaseHandler):
                         for a_id in ev.auth_event_ids()
                         if a_id in auth_events
                     },
-                }
+                )
             )
 
         yield self._handle_new_events(dest, ev_infos, backfilled=True)
@@ -1713,7 +1733,12 @@ class FederationHandler(BaseHandler):
         return context
 
     @defer.inlineCallbacks
-    def _handle_new_events(self, origin, event_infos, backfilled=False):
+    def _handle_new_events(
+        self,
+        origin: str,
+        event_infos: Iterable[_NewEventInfo],
+        backfilled: bool = False,
+    ):
         """Creates the appropriate contexts and persists events. The events
         should not depend on one another, e.g. this should be used to persist
         a bunch of outliers, but not a chunk of individual events that depend
@@ -1723,14 +1748,14 @@ class FederationHandler(BaseHandler):
         """
 
         @defer.inlineCallbacks
-        def prep(ev_info):
-            event = ev_info["event"]
+        def prep(ev_info: _NewEventInfo):
+            event = ev_info.event
             with nested_logging_context(suffix=event.event_id):
                 res = yield self._prep_event(
                     origin,
                     event,
-                    state=ev_info.get("state"),
-                    auth_events=ev_info.get("auth_events"),
+                    state=ev_info.state,
+                    auth_events=ev_info.auth_events,
                     backfilled=backfilled,
                 )
             return res
@@ -1744,7 +1769,7 @@ class FederationHandler(BaseHandler):
 
         yield self.persist_events_and_notify(
             [
-                (ev_info["event"], context)
+                (ev_info.event, context)
                 for ev_info, context in zip(event_infos, contexts)
             ],
             backfilled=backfilled,
@@ -1846,7 +1871,14 @@ class FederationHandler(BaseHandler):
         yield self.persist_events_and_notify([(event, new_event_context)])
 
     @defer.inlineCallbacks
-    def _prep_event(self, origin, event, state, auth_events, backfilled):
+    def _prep_event(
+        self,
+        origin: str,
+        event: EventBase,
+        state: Optional[Iterable[EventBase]],
+        auth_events: Optional[Dict[Tuple[str, str], EventBase]],
+        backfilled: bool,
+    ):
         """
 
         Args:
@@ -1854,7 +1886,7 @@ class FederationHandler(BaseHandler):
             event:
             state:
             auth_events:
-            backfilled (bool)
+            backfilled:
 
         Returns:
             Deferred, which resolves to synapse.events.snapshot.EventContext
@@ -1890,15 +1922,16 @@ class FederationHandler(BaseHandler):
         return context
 
     @defer.inlineCallbacks
-    def _check_for_soft_fail(self, event, state, backfilled):
+    def _check_for_soft_fail(
+        self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool
+    ):
         """Checks if we should soft fail the event, if so marks the event as
         such.
 
         Args:
-            event (FrozenEvent)
-            state (dict|None): The state at the event if we don't have all the
-                event's prev events
-            backfilled (bool): Whether the event is from backfill
+            event
+            state: The state at the event if we don't have all the event's prev events
+            backfilled: Whether the event is from backfill
 
         Returns:
             Deferred
-- 
cgit 1.4.1


From dc8747895ec026c365e687853b5ca12225fb881e Mon Sep 17 00:00:00 2001
From: Clifford Garwood II 
Date: Thu, 5 Dec 2019 08:13:47 -0500
Subject: Modify systemd unit file reference to align with installation
 instruction (#6369)

Signed-off-by: Clifford Garwood II cliff@cigii.com
---
 changelog.d/6369.doc                   |  1 +
 contrib/systemd/README.md              | 17 +++++++++++++++++
 contrib/systemd/matrix-synapse.service |  7 +++++--
 3 files changed, 23 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6369.doc
 create mode 100644 contrib/systemd/README.md

(limited to 'changelog.d')

diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc
new file mode 100644
index 0000000000..6db351d7db
--- /dev/null
+++ b/changelog.d/6369.doc
@@ -0,0 +1 @@
+Update documentation and variables in user contributed systemd reference file.
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
new file mode 100644
index 0000000000..5d42b3464f
--- /dev/null
+++ b/contrib/systemd/README.md
@@ -0,0 +1,17 @@
+# Setup Synapse with Systemd
+This is a setup for managing synapse with a user contributed systemd unit 
+file. It provides a `matrix-synapse` systemd unit file that should be tailored 
+to accommodate your installation in accordance with the installation 
+instructions provided in [installation instructions](../../INSTALL.md).
+
+## Setup
+1. Under the service section, ensure the `User` variable matches which user
+you installed synapse under and wish to run it as. 
+2. Under the service section, ensure the `WorkingDirectory` variable matches
+where you have installed synapse.
+3. Under the service section, ensure the `ExecStart` variable matches the
+appropriate locations of your installation.
+4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
+5. Start Synapse: `sudo systemctl start matrix-synapse`
+6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
+7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
index 38d369ea3d..bd492544b6 100644
--- a/contrib/systemd/matrix-synapse.service
+++ b/contrib/systemd/matrix-synapse.service
@@ -4,8 +4,11 @@
 #    systemctl enable matrix-synapse
 #    systemctl start matrix-synapse
 #
+# This assumes that Synapse has been installed by a user named
+# synapse.
+#
 # This assumes that Synapse has been installed in a virtualenv in
-# /opt/synapse/env.
+# the user's home directory: `/home/synapse/synapse/env`.
 #
 # **NOTE:** This is an example service file that may change in the future. If you
 # wish to use this please copy rather than symlink it.
@@ -23,7 +26,7 @@ User=synapse
 Group=nogroup
 
 WorkingDirectory=/opt/synapse
-ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
+ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
 SyslogIdentifier=matrix-synapse
 
 # adjust the cache factor if necessary
-- 
cgit 1.4.1


From e2cce15af16cd85d5379e8d961680028bfc9e754 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Thu, 5 Dec 2019 15:44:02 +0000
Subject: Remove #6369 changelog

---
 changelog.d/6369.doc | 1 -
 1 file changed, 1 deletion(-)
 delete mode 100644 changelog.d/6369.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc
deleted file mode 100644
index 6db351d7db..0000000000
--- a/changelog.d/6369.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update documentation and variables in user contributed systemd reference file.
-- 
cgit 1.4.1


From ff119879d618c60e1292152724d90b160514a76f Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Thu, 5 Dec 2019 15:46:19 +0000
Subject: Revert "Modify systemd unit file reference to align with installation
 instruction (#6369)"

This reverts commit dc8747895ec026c365e687853b5ca12225fb881e.
---
 changelog.d/6369.doc                   |  1 -
 contrib/systemd/README.md              | 17 -----------------
 contrib/systemd/matrix-synapse.service |  7 ++-----
 3 files changed, 2 insertions(+), 23 deletions(-)
 delete mode 100644 changelog.d/6369.doc
 delete mode 100644 contrib/systemd/README.md

(limited to 'changelog.d')

diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc
deleted file mode 100644
index 6db351d7db..0000000000
--- a/changelog.d/6369.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update documentation and variables in user contributed systemd reference file.
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
deleted file mode 100644
index 5d42b3464f..0000000000
--- a/contrib/systemd/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Setup Synapse with Systemd
-This is a setup for managing synapse with a user contributed systemd unit 
-file. It provides a `matrix-synapse` systemd unit file that should be tailored 
-to accommodate your installation in accordance with the installation 
-instructions provided in [installation instructions](../../INSTALL.md).
-
-## Setup
-1. Under the service section, ensure the `User` variable matches which user
-you installed synapse under and wish to run it as. 
-2. Under the service section, ensure the `WorkingDirectory` variable matches
-where you have installed synapse.
-3. Under the service section, ensure the `ExecStart` variable matches the
-appropriate locations of your installation.
-4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
-5. Start Synapse: `sudo systemctl start matrix-synapse`
-6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
-7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
index bd492544b6..38d369ea3d 100644
--- a/contrib/systemd/matrix-synapse.service
+++ b/contrib/systemd/matrix-synapse.service
@@ -4,11 +4,8 @@
 #    systemctl enable matrix-synapse
 #    systemctl start matrix-synapse
 #
-# This assumes that Synapse has been installed by a user named
-# synapse.
-#
 # This assumes that Synapse has been installed in a virtualenv in
-# the user's home directory: `/home/synapse/synapse/env`.
+# /opt/synapse/env.
 #
 # **NOTE:** This is an example service file that may change in the future. If you
 # wish to use this please copy rather than symlink it.
@@ -26,7 +23,7 @@ User=synapse
 Group=nogroup
 
 WorkingDirectory=/opt/synapse
-ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
+ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
 SyslogIdentifier=matrix-synapse
 
 # adjust the cache factor if necessary
-- 
cgit 1.4.1


From 410bfd035a5f2b77ad94d297f689fc29b9197218 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 5 Dec 2019 15:54:15 +0000
Subject: Newsfile

---
 changelog.d/6482.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6482.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6482.misc b/changelog.d/6482.misc
new file mode 100644
index 0000000000..bdef9cf40a
--- /dev/null
+++ b/changelog.d/6482.misc
@@ -0,0 +1 @@
+Port synapse.rest.client.v1 to async/await.
-- 
cgit 1.4.1


From edb8b6af9ad1f5bf26c5116d909f39020a785670 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 5 Dec 2019 16:51:48 +0000
Subject: Newsfile

---
 changelog.d/6483.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6483.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6483.misc b/changelog.d/6483.misc
new file mode 100644
index 0000000000..cb2cd2bc39
--- /dev/null
+++ b/changelog.d/6483.misc
@@ -0,0 +1 @@
+Port synapse.rest.client.v2_alpha to async/await.
-- 
cgit 1.4.1


From b2ee65ea8c29ce698906351c458f40bb2eadc65e Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 5 Dec 2019 17:59:37 +0000
Subject: Newsfile

---
 changelog.d/6484.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6484.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6484.misc b/changelog.d/6484.misc
new file mode 100644
index 0000000000..b7cd600012
--- /dev/null
+++ b/changelog.d/6484.misc
@@ -0,0 +1 @@
+Port SyncHandler to async/await.
-- 
cgit 1.4.1


From 649b6bc0888bb1f8c408d72dd92b0c025535a866 Mon Sep 17 00:00:00 2001
From: Manuel Stahl <37705355+awesome-manuel@users.noreply.github.com>
Date: Thu, 5 Dec 2019 19:12:23 +0100
Subject: Replace /admin/v1/users_paginate endpoint with /admin/v2/users
 (#5925)

---
 changelog.d/5925.feature                     |  1 +
 changelog.d/5925.removal                     |  1 +
 docs/admin_api/user_admin_api.rst            | 45 +++++++++++++++
 synapse/handlers/admin.py                    | 21 ++++---
 synapse/rest/admin/__init__.py               |  4 +-
 synapse/rest/admin/users.py                  | 83 ++++++++++------------------
 synapse/storage/_base.py                     | 50 +++++++++--------
 synapse/storage/data_stores/main/__init__.py | 63 ++++++++++++++-------
 synapse/storage/data_stores/main/stats.py    |  2 +-
 9 files changed, 161 insertions(+), 109 deletions(-)
 create mode 100644 changelog.d/5925.feature
 create mode 100644 changelog.d/5925.removal

(limited to 'changelog.d')

diff --git a/changelog.d/5925.feature b/changelog.d/5925.feature
new file mode 100644
index 0000000000..8025cc8231
--- /dev/null
+++ b/changelog.d/5925.feature
@@ -0,0 +1 @@
+Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/5925.removal b/changelog.d/5925.removal
new file mode 100644
index 0000000000..cbba2855cb
--- /dev/null
+++ b/changelog.d/5925.removal
@@ -0,0 +1 @@
+Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index d0871f9438..b451dc5014 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -1,3 +1,48 @@
+List Accounts
+=============
+
+This API returns all local user accounts.
+
+The api is::
+
+    GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
+
+including an ``access_token`` of a server admin.
+The parameters ``from`` and ``limit`` are required only for pagination.
+By default, a ``limit`` of 100 is used.
+The parameter ``user_id`` can be used to select only users with user ids that
+contain this value.
+The parameter ``guests=false`` can be used to exclude guest users,
+default is to include guest users.
+The parameter ``deactivated=true`` can be used to include deactivated users,
+default is to exclude deactivated users.
+If the endpoint does not return a ``next_token`` then there are no more users left.
+It returns a JSON body like the following:
+
+.. code:: json
+
+    {
+        "users": [
+            {
+                "name": "",
+                "password_hash": "",
+                "is_guest": 0,
+                "admin": 0,
+                "user_type": null,
+                "deactivated": 0
+            }, {
+                "name": "",
+                "password_hash": "",
+                "is_guest": 0,
+                "admin": 1,
+                "user_type": null,
+                "deactivated": 0
+            }
+        ],
+        "next_token": "100"
+    }
+
+
 Query Account
 =============
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 6407d56f8e..14449b9a1e 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -56,7 +56,7 @@ class AdminHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def get_users(self):
-        """Function to reterive a list of users in users table.
+        """Function to retrieve a list of users in users table.
 
         Args:
         Returns:
@@ -67,19 +67,22 @@ class AdminHandler(BaseHandler):
         return ret
 
     @defer.inlineCallbacks
-    def get_users_paginate(self, order, start, limit):
-        """Function to reterive a paginated list of users from
-        users list. This will return a json object, which contains
-        list of users and the total number of users in users table.
+    def get_users_paginate(self, start, limit, name, guests, deactivated):
+        """Function to retrieve a paginated list of users from
+        users list. This will return a json list of users.
 
         Args:
-            order (str): column name to order the select by this column
             start (int): start number to begin the query from
-            limit (int): number of rows to reterive
+            limit (int): number of rows to retrieve
+            name (string): filter for user names
+            guests (bool): whether to in include guest users
+            deactivated (bool): whether to include deactivated users
         Returns:
-            defer.Deferred: resolves to json object {list[dict[str, Any]], count}
+            defer.Deferred: resolves to json list[dict[str, Any]]
         """
-        ret = yield self.store.get_users_paginate(order, start, limit)
+        ret = yield self.store.get_users_paginate(
+            start, limit, name, guests, deactivated
+        )
 
         return ret
 
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 68a59a3424..c122c449f4 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -34,12 +34,12 @@ from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.rest.admin.users import (
     AccountValidityRenewServlet,
     DeactivateAccountRestServlet,
-    GetUsersPaginatedRestServlet,
     ResetPasswordRestServlet,
     SearchUsersRestServlet,
     UserAdminServlet,
     UserRegisterServlet,
     UsersRestServlet,
+    UsersRestServletV2,
     WhoisRestServlet,
 )
 from synapse.util.versionstring import get_version_string
@@ -191,6 +191,7 @@ def register_servlets(hs, http_server):
     SendServerNoticeServlet(hs).register(http_server)
     VersionServlet(hs).register(http_server)
     UserAdminServlet(hs).register(http_server)
+    UsersRestServletV2(hs).register(http_server)
 
 
 def register_servlets_for_client_rest_resource(hs, http_server):
@@ -201,7 +202,6 @@ def register_servlets_for_client_rest_resource(hs, http_server):
     PurgeHistoryRestServlet(hs).register(http_server)
     UsersRestServlet(hs).register(http_server)
     ResetPasswordRestServlet(hs).register(http_server)
-    GetUsersPaginatedRestServlet(hs).register(http_server)
     SearchUsersRestServlet(hs).register(http_server)
     ShutdownRoomRestServlet(hs).register(http_server)
     UserRegisterServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 58a83f93af..1937879dbe 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -25,6 +25,7 @@ from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
+    parse_boolean,
     parse_integer,
     parse_json_object_from_request,
     parse_string,
@@ -59,71 +60,45 @@ class UsersRestServlet(RestServlet):
         return 200, ret
 
 
-class GetUsersPaginatedRestServlet(RestServlet):
-    """Get request to get specific number of users from Synapse.
+class UsersRestServletV2(RestServlet):
+    PATTERNS = (re.compile("^/_synapse/admin/v2/users$"),)
+
+    """Get request to list all local users.
     This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/users_paginate/
-            @admin:user?access_token=admin_access_token&start=0&limit=10
-        Returns:
-            200 OK with json object {list[dict[str, Any]], count} or empty object.
-        """
 
-    PATTERNS = historical_admin_path_patterns(
-        "/users_paginate/(?P[^/]*)"
-    )
+    GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
+
+    returns:
+        200 OK with list of users if success otherwise an error.
+
+    The parameters `from` and `limit` are required only for pagination.
+    By default, a `limit` of 100 is used.
+    The parameter `user_id` can be used to filter by user id.
+    The parameter `guests` can be used to exclude guest users.
+    The parameter `deactivated` can be used to include deactivated users.
+    """
 
     def __init__(self, hs):
-        self.store = hs.get_datastore()
         self.hs = hs
         self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
+        self.admin_handler = hs.get_handlers().admin_handler
 
-    async def on_GET(self, request, target_user_id):
-        """Get request to get specific number of users from Synapse.
-        This needs user to have administrator access in Synapse.
-        """
+    async def on_GET(self, request):
         await assert_requester_is_admin(self.auth, request)
 
-        target_user = UserID.from_string(target_user_id)
-
-        if not self.hs.is_mine(target_user):
-            raise SynapseError(400, "Can only users a local user")
-
-        order = "name"  # order by name in user table
-        start = parse_integer(request, "start", required=True)
-        limit = parse_integer(request, "limit", required=True)
-
-        logger.info("limit: %s, start: %s", limit, start)
-
-        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
-        return 200, ret
+        start = parse_integer(request, "from", default=0)
+        limit = parse_integer(request, "limit", default=100)
+        user_id = parse_string(request, "user_id", default=None)
+        guests = parse_boolean(request, "guests", default=True)
+        deactivated = parse_boolean(request, "deactivated", default=False)
 
-    async def on_POST(self, request, target_user_id):
-        """Post request to get specific number of users from Synapse..
-        This needs user to have administrator access in Synapse.
-        Example:
-            http://localhost:8008/_synapse/admin/v1/users_paginate/
-            @admin:user?access_token=admin_access_token
-        JsonBodyToSend:
-            {
-                "start": "0",
-                "limit": "10
-            }
-        Returns:
-            200 OK with json object {list[dict[str, Any]], count} or empty object.
-        """
-        await assert_requester_is_admin(self.auth, request)
-        UserID.from_string(target_user_id)
-
-        order = "name"  # order by name in user table
-        params = parse_json_object_from_request(request)
-        assert_params_in_dict(params, ["limit", "start"])
-        limit = params["limit"]
-        start = params["start"]
-        logger.info("limit: %s, start: %s", limit, start)
+        users = await self.admin_handler.get_users_paginate(
+            start, limit, user_id, guests, deactivated
+        )
+        ret = {"users": users}
+        if len(users) >= limit:
+            ret["next_token"] = str(start + len(users))
 
-        ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
         return 200, ret
 
 
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 9205e550bb..0d7c7dff27 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -1350,11 +1350,12 @@ class SQLBaseStore(object):
     def simple_select_list_paginate(
         self,
         table,
-        keyvalues,
         orderby,
         start,
         limit,
         retcols,
+        filters=None,
+        keyvalues=None,
         order_direction="ASC",
         desc="simple_select_list_paginate",
     ):
@@ -1365,6 +1366,9 @@ class SQLBaseStore(object):
 
         Args:
             table (str): the table name
+            filters (dict[str, T] | None):
+                column names and values to filter the rows with, or None to not
+                apply a WHERE ? LIKE ? clause.
             keyvalues (dict[str, T] | None):
                 column names and values to select the rows with, or None to not
                 apply a WHERE clause.
@@ -1380,11 +1384,12 @@ class SQLBaseStore(object):
             desc,
             self.simple_select_list_paginate_txn,
             table,
-            keyvalues,
             orderby,
             start,
             limit,
             retcols,
+            filters=filters,
+            keyvalues=keyvalues,
             order_direction=order_direction,
         )
 
@@ -1393,11 +1398,12 @@ class SQLBaseStore(object):
         cls,
         txn,
         table,
-        keyvalues,
         orderby,
         start,
         limit,
         retcols,
+        filters=None,
+        keyvalues=None,
         order_direction="ASC",
     ):
         """
@@ -1405,16 +1411,23 @@ class SQLBaseStore(object):
         of row numbers, which may return zero or number of rows from start to limit,
         returning the result as a list of dicts.
 
+        Use `filters` to search attributes using SQL wildcards and/or `keyvalues` to
+        select attributes with exact matches. All constraints are joined together
+        using 'AND'.
+
         Args:
             txn : Transaction object
             table (str): the table name
-            keyvalues (dict[str, T] | None):
-                column names and values to select the rows with, or None to not
-                apply a WHERE clause.
             orderby (str): Column to order the results by.
             start (int): Index to begin the query at.
             limit (int): Number of results to return.
             retcols (iterable[str]): the names of the columns to return
+            filters (dict[str, T] | None):
+                column names and values to filter the rows with, or None to not
+                apply a WHERE ? LIKE ? clause.
+            keyvalues (dict[str, T] | None):
+                column names and values to select the rows with, or None to not
+                apply a WHERE clause.
             order_direction (str): Whether the results should be ordered "ASC" or "DESC".
         Returns:
             defer.Deferred: resolves to list[dict[str, Any]]
@@ -1422,10 +1435,15 @@ class SQLBaseStore(object):
         if order_direction not in ["ASC", "DESC"]:
             raise ValueError("order_direction must be one of 'ASC' or 'DESC'.")
 
+        where_clause = "WHERE " if filters or keyvalues else ""
+        arg_list = []
+        if filters:
+            where_clause += " AND ".join("%s LIKE ?" % (k,) for k in filters)
+            arg_list += list(filters.values())
+        where_clause += " AND " if filters and keyvalues else ""
         if keyvalues:
-            where_clause = "WHERE " + " AND ".join("%s = ?" % (k,) for k in keyvalues)
-        else:
-            where_clause = ""
+            where_clause += " AND ".join("%s = ?" % (k,) for k in keyvalues)
+            arg_list += list(keyvalues.values())
 
         sql = "SELECT %s FROM %s %s ORDER BY %s %s LIMIT ? OFFSET ?" % (
             ", ".join(retcols),
@@ -1434,22 +1452,10 @@ class SQLBaseStore(object):
             orderby,
             order_direction,
         )
-        txn.execute(sql, list(keyvalues.values()) + [limit, start])
+        txn.execute(sql, arg_list + [limit, start])
 
         return cls.cursor_to_dict(txn)
 
-    def get_user_count_txn(self, txn):
-        """Get a total number of registered users in the users list.
-
-        Args:
-            txn : Transaction object
-        Returns:
-            int : number of users
-        """
-        sql_count = "SELECT COUNT(*) FROM users WHERE is_guest = 0;"
-        txn.execute(sql_count)
-        return txn.fetchone()[0]
-
     def simple_search_list(self, table, term, col, retcols, desc="simple_search_list"):
         """Executes a SELECT query on the named table, which may return zero or
         more rows, returning the result as a list of dicts.
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index 2a5b33dda1..3720ff3088 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -19,8 +19,6 @@ import calendar
 import logging
 import time
 
-from twisted.internet import defer
-
 from synapse.api.constants import PresenceState
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import (
@@ -476,7 +474,7 @@ class DataStore(
         )
 
     def get_users(self):
-        """Function to reterive a list of users in users table.
+        """Function to retrieve a list of users in users table.
 
         Args:
         Returns:
@@ -485,36 +483,59 @@ class DataStore(
         return self.simple_select_list(
             table="users",
             keyvalues={},
-            retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
+            retcols=[
+                "name",
+                "password_hash",
+                "is_guest",
+                "admin",
+                "user_type",
+                "deactivated",
+            ],
             desc="get_users",
         )
 
-    @defer.inlineCallbacks
-    def get_users_paginate(self, order, start, limit):
-        """Function to reterive a paginated list of users from
-        users list. This will return a json object, which contains
-        list of users and the total number of users in users table.
+    def get_users_paginate(
+        self, start, limit, name=None, guests=True, deactivated=False
+    ):
+        """Function to retrieve a paginated list of users from
+        users list. This will return a json list of users.
 
         Args:
-            order (str): column name to order the select by this column
             start (int): start number to begin the query from
-            limit (int): number of rows to reterive
+            limit (int): number of rows to retrieve
+            name (string): filter for user names
+            guests (bool): whether to in include guest users
+            deactivated (bool): whether to include deactivated users
         Returns:
-            defer.Deferred: resolves to json object {list[dict[str, Any]], count}
+            defer.Deferred: resolves to list[dict[str, Any]]
         """
-        users = yield self.runInteraction(
-            "get_users_paginate",
-            self.simple_select_list_paginate_txn,
+        name_filter = {}
+        if name:
+            name_filter["name"] = "%" + name + "%"
+
+        attr_filter = {}
+        if not guests:
+            attr_filter["is_guest"] = False
+        if not deactivated:
+            attr_filter["deactivated"] = False
+
+        return self.simple_select_list_paginate(
+            desc="get_users_paginate",
             table="users",
-            keyvalues={"is_guest": False},
-            orderby=order,
+            orderby="name",
             start=start,
             limit=limit,
-            retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
+            filters=name_filter,
+            keyvalues=attr_filter,
+            retcols=[
+                "name",
+                "password_hash",
+                "is_guest",
+                "admin",
+                "user_type",
+                "deactivated",
+            ],
         )
-        count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn)
-        retval = {"users": users, "total": count}
-        return retval
 
     def search_users(self, term):
         """Function to search users list for one or more users with
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 3aeba859fd..b306478824 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -260,11 +260,11 @@ class StatsStore(StateDeltasStore):
         slice_list = self.simple_select_list_paginate_txn(
             txn,
             table + "_historical",
-            {id_col: stats_id},
             "end_ts",
             start,
             size,
             retcols=selected_columns + ["bucket_size", "end_ts"],
+            keyvalues={id_col: stats_id},
             order_direction="DESC",
         )
 
-- 
cgit 1.4.1


From 5e35f69ac35cbe12d2e7e033e68ec507222c40b3 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 6 Dec 2019 16:13:41 +0000
Subject: Newsfile

---
 changelog.d/6487.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6487.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6487.misc b/changelog.d/6487.misc
new file mode 100644
index 0000000000..18b49b9cbd
--- /dev/null
+++ b/changelog.d/6487.misc
@@ -0,0 +1 @@
+Pass in `Database` object to data stores.
-- 
cgit 1.4.1


From e519489fc43865a0a01e2295782389e322ba5100 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 9 Dec 2019 11:37:26 +0000
Subject: Remove fallback for missing /federation/v1/state_ids API (#6488)

This API was added way back in 0.17.0; the code here is annoying to maintain
and entirely redundant.
---
 changelog.d/6488.removal                |  1 +
 synapse/federation/federation_client.py | 89 +++++++--------------------------
 synapse/federation/transport/client.py  | 24 ---------
 3 files changed, 18 insertions(+), 96 deletions(-)
 create mode 100644 changelog.d/6488.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6488.removal b/changelog.d/6488.removal
new file mode 100644
index 0000000000..06e034a213
--- /dev/null
+++ b/changelog.d/6488.removal
@@ -0,0 +1 @@
+Remove fallback for federation with old servers which lack the /federation/v1/state_ids API.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 27f6aff004..709449c9e3 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -324,87 +324,32 @@ class FederationClient(FederationBase):
                 A list of events in the state, and a list of events in the auth chain
                 for the given event.
         """
-        try:
-            # First we try and ask for just the IDs, as thats far quicker if
-            # we have most of the state and auth_chain already.
-            # However, this may 404 if the other side has an old synapse.
-            result = yield self.transport_layer.get_room_state_ids(
-                destination, room_id, event_id=event_id
-            )
-
-            state_event_ids = result["pdu_ids"]
-            auth_event_ids = result.get("auth_chain_ids", [])
-
-            fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
-                destination, room_id, set(state_event_ids + auth_event_ids)
-            )
-
-            if failed_to_fetch:
-                logger.warning(
-                    "Failed to fetch missing state/auth events for %s: %s",
-                    room_id,
-                    failed_to_fetch,
-                )
-
-            event_map = {ev.event_id: ev for ev in fetched_events}
-
-            pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
-            auth_chain = [
-                event_map[e_id] for e_id in auth_event_ids if e_id in event_map
-            ]
-
-            auth_chain.sort(key=lambda e: e.depth)
-
-            return pdus, auth_chain
-        except HttpResponseException as e:
-            if e.code == 400 or e.code == 404:
-                logger.info("Failed to use get_room_state_ids API, falling back")
-            else:
-                raise e
-
-        result = yield self.transport_layer.get_room_state(
+        result = yield self.transport_layer.get_room_state_ids(
             destination, room_id, event_id=event_id
         )
 
-        room_version = yield self.store.get_room_version(room_id)
-        format_ver = room_version_to_event_format(room_version)
-
-        pdus = [
-            event_from_pdu_json(p, format_ver, outlier=True) for p in result["pdus"]
-        ]
+        state_event_ids = result["pdu_ids"]
+        auth_event_ids = result.get("auth_chain_ids", [])
 
-        auth_chain = [
-            event_from_pdu_json(p, format_ver, outlier=True)
-            for p in result.get("auth_chain", [])
-        ]
-
-        seen_events = yield self.store.get_events(
-            [ev.event_id for ev in itertools.chain(pdus, auth_chain)]
+        fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
+            destination, room_id, set(state_event_ids + auth_event_ids)
         )
 
-        signed_pdus = yield self._check_sigs_and_hash_and_fetch(
-            destination,
-            [p for p in pdus if p.event_id not in seen_events],
-            outlier=True,
-            room_version=room_version,
-        )
-        signed_pdus.extend(
-            seen_events[p.event_id] for p in pdus if p.event_id in seen_events
-        )
+        if failed_to_fetch:
+            logger.warning(
+                "Failed to fetch missing state/auth events for %s: %s",
+                room_id,
+                failed_to_fetch,
+            )
 
-        signed_auth = yield self._check_sigs_and_hash_and_fetch(
-            destination,
-            [p for p in auth_chain if p.event_id not in seen_events],
-            outlier=True,
-            room_version=room_version,
-        )
-        signed_auth.extend(
-            seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
-        )
+        event_map = {ev.event_id: ev for ev in fetched_events}
 
-        signed_auth.sort(key=lambda e: e.depth)
+        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
+        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
+
+        auth_chain.sort(key=lambda e: e.depth)
 
-        return signed_pdus, signed_auth
+        return pdus, auth_chain
 
     @defer.inlineCallbacks
     def get_events_from_store_or_dest(self, destination, room_id, event_ids):
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index dc95ab2113..46dba84cac 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -38,30 +38,6 @@ class TransportLayerClient(object):
         self.server_name = hs.hostname
         self.client = hs.get_http_client()
 
-    @log_function
-    def get_room_state(self, destination, room_id, event_id):
-        """ Requests all state for a given room from the given server at the
-        given event.
-
-        Args:
-            destination (str): The host name of the remote homeserver we want
-                to get the state from.
-            context (str): The name of the context we want the state of
-            event_id (str): The event we want the context at.
-
-        Returns:
-            Deferred: Results in a dict received from the remote homeserver.
-        """
-        logger.debug("get_room_state dest=%s, room=%s", destination, room_id)
-
-        path = _create_v1_path("/state/%s", room_id)
-        return self.client.get_json(
-            destination,
-            path=path,
-            args={"event_id": event_id},
-            try_trailing_slash_on_400=True,
-        )
-
     @log_function
     def get_room_state_ids(self, destination, room_id, event_id):
         """ Requests all state for a given room from the given server at the
-- 
cgit 1.4.1


From 4a161a29aca6d73f7f226ca147e4e46e70e29d5b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 9 Dec 2019 11:54:43 +0000
Subject: Newsfile

---
 changelog.d/6493.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6493.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6493.bugfix b/changelog.d/6493.bugfix
new file mode 100644
index 0000000000..440c02efbe
--- /dev/null
+++ b/changelog.d/6493.bugfix
@@ -0,0 +1 @@
+Fix small performance regression for sending invites.
-- 
cgit 1.4.1


From 18660a34d82ccb120efd2fa2480b42ac62dbe2b4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 9 Dec 2019 11:55:30 +0000
Subject: Fix inaccurate per-block metrics (#6491)

`Measure` incorrectly assumed that it was the only thing being done by the parent `LoggingContext`. For instance, during a "renew group attestations" operation, hundreds of `outbound_request` calls could take place in parallel, all using the same `LoggingContext`. This would mean that any resources used during *any* of those calls would be reported against *all* of them, producing wildly inaccurate results.

Instead, we now give each `Measure` block its own `LoggingContext` (using the parent `LoggingContext` mechanism to ensure that the log lines look correct and that the metrics are ultimately propogated to the top level for reporting against requests/backgrond tasks).
---
 changelog.d/6491.bugfix |  1 +
 synapse/util/metrics.py | 60 +++++++++++++++----------------------------------
 2 files changed, 19 insertions(+), 42 deletions(-)
 create mode 100644 changelog.d/6491.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6491.bugfix b/changelog.d/6491.bugfix
new file mode 100644
index 0000000000..78204693b0
--- /dev/null
+++ b/changelog.d/6491.bugfix
@@ -0,0 +1 @@
+Fix inaccurate per-block Prometheus metrics.
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 63ddaaba87..7b18455469 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -91,72 +91,48 @@ class Measure(object):
     __slots__ = [
         "clock",
         "name",
-        "start_context",
+        "_logging_context",
         "start",
-        "created_context",
-        "start_usage",
     ]
 
     def __init__(self, clock, name):
         self.clock = clock
         self.name = name
-        self.start_context = None
+        self._logging_context = None
         self.start = None
-        self.created_context = False
 
     def __enter__(self):
-        self.start = self.clock.time()
-        self.start_context = LoggingContext.current_context()
-        if not self.start_context:
-            self.start_context = LoggingContext("Measure")
-            self.start_context.__enter__()
-            self.created_context = True
-
-        self.start_usage = self.start_context.get_resource_usage()
+        if self._logging_context:
+            raise RuntimeError("Measure() objects cannot be re-used")
 
+        self.start = self.clock.time()
+        parent_context = LoggingContext.current_context()
+        self._logging_context = LoggingContext(
+            "Measure[%s]" % (self.name,), parent_context
+        )
+        self._logging_context.__enter__()
         in_flight.register((self.name,), self._update_in_flight)
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        if isinstance(exc_type, Exception) or not self.start_context:
-            return
-
-        in_flight.unregister((self.name,), self._update_in_flight)
+        if not self._logging_context:
+            raise RuntimeError("Measure() block exited without being entered")
 
         duration = self.clock.time() - self.start
+        usage = self._logging_context.get_resource_usage()
 
-        block_counter.labels(self.name).inc()
-        block_timer.labels(self.name).inc(duration)
-
-        context = LoggingContext.current_context()
-
-        if context != self.start_context:
-            logger.warning(
-                "Context has unexpectedly changed from '%s' to '%s'. (%r)",
-                self.start_context,
-                context,
-                self.name,
-            )
-            return
-
-        if not context:
-            logger.warning("Expected context. (%r)", self.name)
-            return
+        in_flight.unregister((self.name,), self._update_in_flight)
+        self._logging_context.__exit__(exc_type, exc_val, exc_tb)
 
-        current = context.get_resource_usage()
-        usage = current - self.start_usage
         try:
+            block_counter.labels(self.name).inc()
+            block_timer.labels(self.name).inc(duration)
             block_ru_utime.labels(self.name).inc(usage.ru_utime)
             block_ru_stime.labels(self.name).inc(usage.ru_stime)
             block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
             block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
             block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
         except ValueError:
-            logger.warning(
-                "Failed to save metrics! OLD: %r, NEW: %r", self.start_usage, current
-            )
-
-        if self.created_context:
-            self.start_context.__exit__(exc_type, exc_val, exc_tb)
+            logger.warning("Failed to save metrics! Usage: %s", usage)
 
     def _update_in_flight(self, metrics):
         """Gets called when processing in flight metrics
-- 
cgit 1.4.1


From aeaeb72ee41076b0f08b07ea4686d1c38acf2d6b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 9 Dec 2019 13:48:14 +0000
Subject: Newsfile

---
 changelog.d/6496.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6496.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6496.misc b/changelog.d/6496.misc
new file mode 100644
index 0000000000..19c6e926b8
--- /dev/null
+++ b/changelog.d/6496.misc
@@ -0,0 +1 @@
+Port synapse.handlers.initial_sync to async/await.
-- 
cgit 1.4.1


From adfdd82b21ae296ed77453b2f51d55414890f162 Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Mon, 9 Dec 2019 13:59:27 +0000
Subject: Back out perf regression from get_cross_signing_keys_from_cache.
 (#6494)

Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression.
---
 changelog.d/6494.bugfix         |  1 +
 synapse/handlers/e2e_keys.py    | 38 ++++++++------------------------------
 sytest-blacklist                |  3 +++
 tests/handlers/test_e2e_keys.py |  8 ++++++++
 4 files changed, 20 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6494.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6494.bugfix b/changelog.d/6494.bugfix
new file mode 100644
index 0000000000..78726d5d7f
--- /dev/null
+++ b/changelog.d/6494.bugfix
@@ -0,0 +1 @@
+Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 28c12753c1..57a10daefd 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -264,7 +264,6 @@ class E2eKeysHandler(object):
 
         return ret
 
-    @defer.inlineCallbacks
     def get_cross_signing_keys_from_cache(self, query, from_user_id):
         """Get cross-signing keys for users from the database
 
@@ -284,35 +283,14 @@ class E2eKeysHandler(object):
         self_signing_keys = {}
         user_signing_keys = {}
 
-        for user_id in query:
-            # XXX: consider changing the store functions to allow querying
-            # multiple users simultaneously.
-            key = yield self.store.get_e2e_cross_signing_key(
-                user_id, "master", from_user_id
-            )
-            if key:
-                master_keys[user_id] = key
-
-            key = yield self.store.get_e2e_cross_signing_key(
-                user_id, "self_signing", from_user_id
-            )
-            if key:
-                self_signing_keys[user_id] = key
-
-            # users can see other users' master and self-signing keys, but can
-            # only see their own user-signing keys
-            if from_user_id == user_id:
-                key = yield self.store.get_e2e_cross_signing_key(
-                    user_id, "user_signing", from_user_id
-                )
-                if key:
-                    user_signing_keys[user_id] = key
-
-        return {
-            "master_keys": master_keys,
-            "self_signing_keys": self_signing_keys,
-            "user_signing_keys": user_signing_keys,
-        }
+        # Currently a stub, implementation coming in https://github.com/matrix-org/synapse/pull/6486
+        return defer.succeed(
+            {
+                "master_keys": master_keys,
+                "self_signing_keys": self_signing_keys,
+                "user_signing_keys": user_signing_keys,
+            }
+        )
 
     @trace
     @defer.inlineCallbacks
diff --git a/sytest-blacklist b/sytest-blacklist
index 411cce0692..79b2d4402a 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -33,3 +33,6 @@ New federated private chats get full presence information (SYN-115)
 # Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing
 # this requirement from the spec
 Inbound federation of state requires event_id as a mandatory paramater
+
+# Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands
+Can upload self-signing keys
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 854eb6c024..fdfa2cbbc4 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -183,6 +183,10 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
         )
         self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]})
 
+    test_replace_master_key.skip = (
+        "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486"
+    )
+
     @defer.inlineCallbacks
     def test_reupload_signatures(self):
         """re-uploading a signature should not fail"""
@@ -503,3 +507,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
             ],
             other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey],
         )
+
+    test_upload_signatures.skip = (
+        "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486"
+    )
-- 
cgit 1.4.1


From 96d35f1028833ae700f39bb12c6f77a7de2c30bf Mon Sep 17 00:00:00 2001
From: Clifford Garwood II 
Date: Mon, 9 Dec 2019 09:40:37 -0500
Subject: Systemd documentation (#6490)

---
 changelog.d/6490.doc                   | 1 +
 contrib/systemd/matrix-synapse.service | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6490.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6490.doc b/changelog.d/6490.doc
new file mode 100644
index 0000000000..6db351d7db
--- /dev/null
+++ b/changelog.d/6490.doc
@@ -0,0 +1 @@
+Update documentation and variables in user contributed systemd reference file.
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
index bd492544b6..813717b032 100644
--- a/contrib/systemd/matrix-synapse.service
+++ b/contrib/systemd/matrix-synapse.service
@@ -25,7 +25,7 @@ Restart=on-abort
 User=synapse
 Group=nogroup
 
-WorkingDirectory=/opt/synapse
+WorkingDirectory=/home/synapse/synapse
 ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
 SyslogIdentifier=matrix-synapse
 
-- 
cgit 1.4.1


From 24da1ffcb615ecde30c413b73434688c0d5963b9 Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Mon, 9 Dec 2019 14:46:20 +0000
Subject: 1.7.0rc1

---
 CHANGES.md               | 78 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/5815.feature |  1 -
 changelog.d/5858.feature |  1 -
 changelog.d/5925.feature |  1 -
 changelog.d/5925.removal |  1 -
 changelog.d/6119.feature |  1 -
 changelog.d/6176.feature |  1 -
 changelog.d/6237.bugfix  |  1 -
 changelog.d/6241.bugfix  |  1 -
 changelog.d/6266.misc    |  1 -
 changelog.d/6322.misc    |  1 -
 changelog.d/6329.bugfix  |  1 -
 changelog.d/6332.bugfix  |  1 -
 changelog.d/6333.bugfix  |  1 -
 changelog.d/6343.misc    |  1 -
 changelog.d/6354.feature |  1 -
 changelog.d/6362.misc    |  1 -
 changelog.d/6369.doc     |  1 -
 changelog.d/6379.misc    |  1 -
 changelog.d/6388.doc     |  1 -
 changelog.d/6390.doc     |  1 -
 changelog.d/6392.misc    |  1 -
 changelog.d/6406.bugfix  |  1 -
 changelog.d/6408.bugfix  |  1 -
 changelog.d/6409.feature |  1 -
 changelog.d/6420.bugfix  |  1 -
 changelog.d/6421.bugfix  |  1 -
 changelog.d/6423.misc    |  1 -
 changelog.d/6426.bugfix  |  1 -
 changelog.d/6429.misc    |  1 -
 changelog.d/6434.feature |  1 -
 changelog.d/6436.bugfix  |  1 -
 changelog.d/6443.doc     |  1 -
 changelog.d/6449.bugfix  |  1 -
 changelog.d/6451.bugfix  |  1 -
 changelog.d/6454.misc    |  1 -
 changelog.d/6458.doc     |  1 -
 changelog.d/6461.doc     |  1 -
 changelog.d/6462.bugfix  |  1 -
 changelog.d/6464.misc    |  1 -
 changelog.d/6468.misc    |  1 -
 changelog.d/6469.misc    |  1 -
 changelog.d/6470.bugfix  |  1 -
 changelog.d/6472.bugfix  |  1 -
 changelog.d/6480.misc    |  1 -
 changelog.d/6482.misc    |  1 -
 changelog.d/6483.misc    |  1 -
 changelog.d/6484.misc    |  1 -
 changelog.d/6487.misc    |  1 -
 changelog.d/6488.removal |  1 -
 changelog.d/6490.doc     |  1 -
 changelog.d/6491.bugfix  |  1 -
 changelog.d/6493.bugfix  |  1 -
 changelog.d/6494.bugfix  |  1 -
 synapse/__init__.py      |  2 +-
 55 files changed, 79 insertions(+), 54 deletions(-)
 delete mode 100644 changelog.d/5815.feature
 delete mode 100644 changelog.d/5858.feature
 delete mode 100644 changelog.d/5925.feature
 delete mode 100644 changelog.d/5925.removal
 delete mode 100644 changelog.d/6119.feature
 delete mode 100644 changelog.d/6176.feature
 delete mode 100644 changelog.d/6237.bugfix
 delete mode 100644 changelog.d/6241.bugfix
 delete mode 100644 changelog.d/6266.misc
 delete mode 100644 changelog.d/6322.misc
 delete mode 100644 changelog.d/6329.bugfix
 delete mode 100644 changelog.d/6332.bugfix
 delete mode 100644 changelog.d/6333.bugfix
 delete mode 100644 changelog.d/6343.misc
 delete mode 100644 changelog.d/6354.feature
 delete mode 100644 changelog.d/6362.misc
 delete mode 100644 changelog.d/6369.doc
 delete mode 100644 changelog.d/6379.misc
 delete mode 100644 changelog.d/6388.doc
 delete mode 100644 changelog.d/6390.doc
 delete mode 100644 changelog.d/6392.misc
 delete mode 100644 changelog.d/6406.bugfix
 delete mode 100644 changelog.d/6408.bugfix
 delete mode 100644 changelog.d/6409.feature
 delete mode 100644 changelog.d/6420.bugfix
 delete mode 100644 changelog.d/6421.bugfix
 delete mode 100644 changelog.d/6423.misc
 delete mode 100644 changelog.d/6426.bugfix
 delete mode 100644 changelog.d/6429.misc
 delete mode 100644 changelog.d/6434.feature
 delete mode 100644 changelog.d/6436.bugfix
 delete mode 100644 changelog.d/6443.doc
 delete mode 100644 changelog.d/6449.bugfix
 delete mode 100644 changelog.d/6451.bugfix
 delete mode 100644 changelog.d/6454.misc
 delete mode 100644 changelog.d/6458.doc
 delete mode 100644 changelog.d/6461.doc
 delete mode 100644 changelog.d/6462.bugfix
 delete mode 100644 changelog.d/6464.misc
 delete mode 100644 changelog.d/6468.misc
 delete mode 100644 changelog.d/6469.misc
 delete mode 100644 changelog.d/6470.bugfix
 delete mode 100644 changelog.d/6472.bugfix
 delete mode 100644 changelog.d/6480.misc
 delete mode 100644 changelog.d/6482.misc
 delete mode 100644 changelog.d/6483.misc
 delete mode 100644 changelog.d/6484.misc
 delete mode 100644 changelog.d/6487.misc
 delete mode 100644 changelog.d/6488.removal
 delete mode 100644 changelog.d/6490.doc
 delete mode 100644 changelog.d/6491.bugfix
 delete mode 100644 changelog.d/6493.bugfix
 delete mode 100644 changelog.d/6494.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index a9afd36d2c..0ef9794aac 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,81 @@
+Synapse 1.7.0rc1 (2019-12-09)
+=============================
+
+Features
+--------
+
+- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815))
+- Add etag and count fields to key backup endpoints to help clients guess if there are new keys. ([\#5858](https://github.com/matrix-org/synapse/issues/5858))
+- Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
+- Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119))
+- Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176))
+- Configure privacy preserving settings by default for the room directory. ([\#6354](https://github.com/matrix-org/synapse/issues/6354))
+- Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409))
+- Add support for MSC 2367, which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434))
+
+
+Bugfixes
+--------
+
+- Transfer non-standard power levels on room upgrade. ([\#6237](https://github.com/matrix-org/synapse/issues/6237))
+- Fix error from the Pillow library when uploading RGBA images. ([\#6241](https://github.com/matrix-org/synapse/issues/6241))
+- Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests. ([\#6329](https://github.com/matrix-org/synapse/issues/6329))
+- Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. ([\#6332](https://github.com/matrix-org/synapse/issues/6332))
+- Prevent account data syncs getting lost across TCP replication. ([\#6333](https://github.com/matrix-org/synapse/issues/6333))
+- Fix bug: TypeError in `register_user()` while using LDAP auth module. ([\#6406](https://github.com/matrix-org/synapse/issues/6406))
+- Fix an intermittent exception when handling read-receipts. ([\#6408](https://github.com/matrix-org/synapse/issues/6408))
+- Fix broken guest registration when there are existing blocks of numeric user IDs. ([\#6420](https://github.com/matrix-org/synapse/issues/6420))
+- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
+- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426))
+- Fix a bug where a room could become unusable with a low retention policy and a low activity. ([\#6436](https://github.com/matrix-org/synapse/issues/6436))
+- Fix error when using synapse_port_db on a vanilla synapse db. ([\#6449](https://github.com/matrix-org/synapse/issues/6449))
+- Fix uploading multiple cross signing signatures for the same user. ([\#6451](https://github.com/matrix-org/synapse/issues/6451))
+- Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. ([\#6462](https://github.com/matrix-org/synapse/issues/6462))
+- Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process. ([\#6470](https://github.com/matrix-org/synapse/issues/6470))
+- Improve sanity-checking when receiving events over federation. ([\#6472](https://github.com/matrix-org/synapse/issues/6472))
+- Fix inaccurate per-block Prometheus metrics. ([\#6491](https://github.com/matrix-org/synapse/issues/6491))
+- Fix small performance regression for sending invites. ([\#6493](https://github.com/matrix-org/synapse/issues/6493))
+- Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. ([\#6494](https://github.com/matrix-org/synapse/issues/6494))
+
+
+Improved Documentation
+----------------------
+
+- Update documentation and variables in user contributed systemd reference file. ([\#6369](https://github.com/matrix-org/synapse/issues/6369), [\#6490](https://github.com/matrix-org/synapse/issues/6490))
+- Fix link in the user directory documentation. ([\#6388](https://github.com/matrix-org/synapse/issues/6388))
+- Add build instructions to the docker readme. ([\#6390](https://github.com/matrix-org/synapse/issues/6390))
+- Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. ([\#6443](https://github.com/matrix-org/synapse/issues/6443))
+- Write some docs for the quarantine_media api. ([\#6458](https://github.com/matrix-org/synapse/issues/6458))
+- Convert CONTRIBUTING.rst to markdown (among other small fixes). ([\#6461](https://github.com/matrix-org/synapse/issues/6461))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
+- Remove fallback for federation with old servers which lack the /federation/v1/state_ids API. ([\#6488](https://github.com/matrix-org/synapse/issues/6488))
+
+
+Internal Changes
+----------------
+
+- Add benchmarks for structured logging and improve output performance. ([\#6266](https://github.com/matrix-org/synapse/issues/6266))
+- Improve the performance of outputting structured logging. ([\#6322](https://github.com/matrix-org/synapse/issues/6322))
+- Refactor some code in the event authentication path for clarity. ([\#6343](https://github.com/matrix-org/synapse/issues/6343), [\#6468](https://github.com/matrix-org/synapse/issues/6468), [\#6480](https://github.com/matrix-org/synapse/issues/6480))
+- Clean up some unnecessary quotation marks around the codebase. ([\#6362](https://github.com/matrix-org/synapse/issues/6362))
+- Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. ([\#6379](https://github.com/matrix-org/synapse/issues/6379))
+- Add a test scenario to make sure room history purges don't break `/messages` in the future. ([\#6392](https://github.com/matrix-org/synapse/issues/6392))
+- Clarifications for the email configuration settings. ([\#6423](https://github.com/matrix-org/synapse/issues/6423))
+- Add more tests to the blacklist when running in worker mode. ([\#6429](https://github.com/matrix-org/synapse/issues/6429))
+- Move data store specific code out of `SQLBaseStore`. ([\#6454](https://github.com/matrix-org/synapse/issues/6454))
+- Prepare SQLBaseStore functions being moved out of the stores. ([\#6464](https://github.com/matrix-org/synapse/issues/6464))
+- Move per database functionality out of the data stores and into a dedicated `Database` class. ([\#6469](https://github.com/matrix-org/synapse/issues/6469))
+- Port synapse.rest.client.v1 to async/await. ([\#6482](https://github.com/matrix-org/synapse/issues/6482))
+- Port synapse.rest.client.v2_alpha to async/await. ([\#6483](https://github.com/matrix-org/synapse/issues/6483))
+- Port SyncHandler to async/await. ([\#6484](https://github.com/matrix-org/synapse/issues/6484))
+- Pass in `Database` object to data stores. ([\#6487](https://github.com/matrix-org/synapse/issues/6487))
+
+
 Synapse 1.6.1 (2019-11-28)
 ==========================
 
diff --git a/changelog.d/5815.feature b/changelog.d/5815.feature
deleted file mode 100644
index ca4df4e7f6..0000000000
--- a/changelog.d/5815.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement per-room message retention policies.
diff --git a/changelog.d/5858.feature b/changelog.d/5858.feature
deleted file mode 100644
index 55ee93051e..0000000000
--- a/changelog.d/5858.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add etag and count fields to key backup endpoints to help clients guess if there are new keys.
diff --git a/changelog.d/5925.feature b/changelog.d/5925.feature
deleted file mode 100644
index 8025cc8231..0000000000
--- a/changelog.d/5925.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add admin/v2/users endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/5925.removal b/changelog.d/5925.removal
deleted file mode 100644
index cbba2855cb..0000000000
--- a/changelog.d/5925.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/6119.feature b/changelog.d/6119.feature
deleted file mode 100644
index 1492e83c5a..0000000000
--- a/changelog.d/6119.feature
+++ /dev/null
@@ -1 +0,0 @@
-Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account.
\ No newline at end of file
diff --git a/changelog.d/6176.feature b/changelog.d/6176.feature
deleted file mode 100644
index 3c66d689d4..0000000000
--- a/changelog.d/6176.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314.
diff --git a/changelog.d/6237.bugfix b/changelog.d/6237.bugfix
deleted file mode 100644
index 9285600b00..0000000000
--- a/changelog.d/6237.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Transfer non-standard power levels on room upgrade.
\ No newline at end of file
diff --git a/changelog.d/6241.bugfix b/changelog.d/6241.bugfix
deleted file mode 100644
index 25109ca4a6..0000000000
--- a/changelog.d/6241.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix error from the Pillow library when uploading RGBA images.
diff --git a/changelog.d/6266.misc b/changelog.d/6266.misc
deleted file mode 100644
index 634e421a79..0000000000
--- a/changelog.d/6266.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add benchmarks for structured logging and improve output performance.
diff --git a/changelog.d/6322.misc b/changelog.d/6322.misc
deleted file mode 100644
index 70ef36ca80..0000000000
--- a/changelog.d/6322.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the performance of outputting structured logging.
diff --git a/changelog.d/6329.bugfix b/changelog.d/6329.bugfix
deleted file mode 100644
index e558d13b7d..0000000000
--- a/changelog.d/6329.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests.
\ No newline at end of file
diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix
deleted file mode 100644
index 67d5170ba0..0000000000
--- a/changelog.d/6332.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices.
diff --git a/changelog.d/6333.bugfix b/changelog.d/6333.bugfix
deleted file mode 100644
index a25d6ef3cb..0000000000
--- a/changelog.d/6333.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent account data syncs getting lost across TCP replication.
\ No newline at end of file
diff --git a/changelog.d/6343.misc b/changelog.d/6343.misc
deleted file mode 100644
index d9a44389b9..0000000000
--- a/changelog.d/6343.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor some code in the event authentication path for clarity.
diff --git a/changelog.d/6354.feature b/changelog.d/6354.feature
deleted file mode 100644
index fed9db884b..0000000000
--- a/changelog.d/6354.feature
+++ /dev/null
@@ -1 +0,0 @@
-Configure privacy preserving settings by default for the room directory.
diff --git a/changelog.d/6362.misc b/changelog.d/6362.misc
deleted file mode 100644
index b79a5bea99..0000000000
--- a/changelog.d/6362.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some unnecessary quotation marks around the codebase.
\ No newline at end of file
diff --git a/changelog.d/6369.doc b/changelog.d/6369.doc
deleted file mode 100644
index 6db351d7db..0000000000
--- a/changelog.d/6369.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update documentation and variables in user contributed systemd reference file.
diff --git a/changelog.d/6379.misc b/changelog.d/6379.misc
deleted file mode 100644
index 725c2e7d87..0000000000
--- a/changelog.d/6379.misc
+++ /dev/null
@@ -1 +0,0 @@
-Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary.
\ No newline at end of file
diff --git a/changelog.d/6388.doc b/changelog.d/6388.doc
deleted file mode 100644
index c777cb6b8f..0000000000
--- a/changelog.d/6388.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix link in the user directory documentation.
diff --git a/changelog.d/6390.doc b/changelog.d/6390.doc
deleted file mode 100644
index 093411bec1..0000000000
--- a/changelog.d/6390.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add build instructions to the docker readme.
\ No newline at end of file
diff --git a/changelog.d/6392.misc b/changelog.d/6392.misc
deleted file mode 100644
index a00257944f..0000000000
--- a/changelog.d/6392.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a test scenario to make sure room history purges don't break `/messages` in the future.
diff --git a/changelog.d/6406.bugfix b/changelog.d/6406.bugfix
deleted file mode 100644
index ca9bee084b..0000000000
--- a/changelog.d/6406.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug: TypeError in `register_user()` while using LDAP auth module.
diff --git a/changelog.d/6408.bugfix b/changelog.d/6408.bugfix
deleted file mode 100644
index c9babe599b..0000000000
--- a/changelog.d/6408.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix an intermittent exception when handling read-receipts.
diff --git a/changelog.d/6409.feature b/changelog.d/6409.feature
deleted file mode 100644
index 653ff5a5ad..0000000000
--- a/changelog.d/6409.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228).
diff --git a/changelog.d/6420.bugfix b/changelog.d/6420.bugfix
deleted file mode 100644
index aef47cccaa..0000000000
--- a/changelog.d/6420.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix broken guest registration when there are existing blocks of numeric user IDs.
diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix
deleted file mode 100644
index 7969f7f71d..0000000000
--- a/changelog.d/6421.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix startup error when http proxy is defined.
diff --git a/changelog.d/6423.misc b/changelog.d/6423.misc
deleted file mode 100644
index 9bcd5d36c1..0000000000
--- a/changelog.d/6423.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clarifications for the email configuration settings.
diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix
deleted file mode 100644
index 3acfde4211..0000000000
--- a/changelog.d/6426.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Clean up local threepids from user on account deactivation.
\ No newline at end of file
diff --git a/changelog.d/6429.misc b/changelog.d/6429.misc
deleted file mode 100644
index 4b32cdeac6..0000000000
--- a/changelog.d/6429.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add more tests to the blacklist when running in worker mode.
diff --git a/changelog.d/6434.feature b/changelog.d/6434.feature
deleted file mode 100644
index affa5d50c1..0000000000
--- a/changelog.d/6434.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for MSC 2367, which allows specifying a reason on all membership events.
diff --git a/changelog.d/6436.bugfix b/changelog.d/6436.bugfix
deleted file mode 100644
index 954a4e1d84..0000000000
--- a/changelog.d/6436.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug where a room could become unusable with a low retention policy and a low activity.
diff --git a/changelog.d/6443.doc b/changelog.d/6443.doc
deleted file mode 100644
index 67c59f92ee..0000000000
--- a/changelog.d/6443.doc
+++ /dev/null
@@ -1 +0,0 @@
-Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md.
\ No newline at end of file
diff --git a/changelog.d/6449.bugfix b/changelog.d/6449.bugfix
deleted file mode 100644
index 002f33c450..0000000000
--- a/changelog.d/6449.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix error when using synapse_port_db on a vanilla synapse db.
diff --git a/changelog.d/6451.bugfix b/changelog.d/6451.bugfix
deleted file mode 100644
index 23b67583ec..0000000000
--- a/changelog.d/6451.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix uploading multiple cross signing signatures for the same user.
diff --git a/changelog.d/6454.misc b/changelog.d/6454.misc
deleted file mode 100644
index 9e5259157c..0000000000
--- a/changelog.d/6454.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move data store specific code out of `SQLBaseStore`.
diff --git a/changelog.d/6458.doc b/changelog.d/6458.doc
deleted file mode 100644
index 3a9f831d89..0000000000
--- a/changelog.d/6458.doc
+++ /dev/null
@@ -1 +0,0 @@
-Write some docs for the quarantine_media api.
diff --git a/changelog.d/6461.doc b/changelog.d/6461.doc
deleted file mode 100644
index 1502fa2855..0000000000
--- a/changelog.d/6461.doc
+++ /dev/null
@@ -1 +0,0 @@
-Convert CONTRIBUTING.rst to markdown (among other small fixes).
\ No newline at end of file
diff --git a/changelog.d/6462.bugfix b/changelog.d/6462.bugfix
deleted file mode 100644
index c435939526..0000000000
--- a/changelog.d/6462.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted.
diff --git a/changelog.d/6464.misc b/changelog.d/6464.misc
deleted file mode 100644
index bd65276ef6..0000000000
--- a/changelog.d/6464.misc
+++ /dev/null
@@ -1 +0,0 @@
-Prepare SQLBaseStore functions being moved out of the stores.
diff --git a/changelog.d/6468.misc b/changelog.d/6468.misc
deleted file mode 100644
index d9a44389b9..0000000000
--- a/changelog.d/6468.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor some code in the event authentication path for clarity.
diff --git a/changelog.d/6469.misc b/changelog.d/6469.misc
deleted file mode 100644
index 32216b9046..0000000000
--- a/changelog.d/6469.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move per database functionality out of the data stores and into a dedicated `Database` class.
diff --git a/changelog.d/6470.bugfix b/changelog.d/6470.bugfix
deleted file mode 100644
index c08b34c14c..0000000000
--- a/changelog.d/6470.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process.
diff --git a/changelog.d/6472.bugfix b/changelog.d/6472.bugfix
deleted file mode 100644
index 598efb79fc..0000000000
--- a/changelog.d/6472.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Improve sanity-checking when receiving events over federation.
diff --git a/changelog.d/6480.misc b/changelog.d/6480.misc
deleted file mode 100644
index d9a44389b9..0000000000
--- a/changelog.d/6480.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor some code in the event authentication path for clarity.
diff --git a/changelog.d/6482.misc b/changelog.d/6482.misc
deleted file mode 100644
index bdef9cf40a..0000000000
--- a/changelog.d/6482.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port synapse.rest.client.v1 to async/await.
diff --git a/changelog.d/6483.misc b/changelog.d/6483.misc
deleted file mode 100644
index cb2cd2bc39..0000000000
--- a/changelog.d/6483.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port synapse.rest.client.v2_alpha to async/await.
diff --git a/changelog.d/6484.misc b/changelog.d/6484.misc
deleted file mode 100644
index b7cd600012..0000000000
--- a/changelog.d/6484.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port SyncHandler to async/await.
diff --git a/changelog.d/6487.misc b/changelog.d/6487.misc
deleted file mode 100644
index 18b49b9cbd..0000000000
--- a/changelog.d/6487.misc
+++ /dev/null
@@ -1 +0,0 @@
-Pass in `Database` object to data stores.
diff --git a/changelog.d/6488.removal b/changelog.d/6488.removal
deleted file mode 100644
index 06e034a213..0000000000
--- a/changelog.d/6488.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove fallback for federation with old servers which lack the /federation/v1/state_ids API.
diff --git a/changelog.d/6490.doc b/changelog.d/6490.doc
deleted file mode 100644
index 6db351d7db..0000000000
--- a/changelog.d/6490.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update documentation and variables in user contributed systemd reference file.
diff --git a/changelog.d/6491.bugfix b/changelog.d/6491.bugfix
deleted file mode 100644
index 78204693b0..0000000000
--- a/changelog.d/6491.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix inaccurate per-block Prometheus metrics.
diff --git a/changelog.d/6493.bugfix b/changelog.d/6493.bugfix
deleted file mode 100644
index 440c02efbe..0000000000
--- a/changelog.d/6493.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix small performance regression for sending invites.
diff --git a/changelog.d/6494.bugfix b/changelog.d/6494.bugfix
deleted file mode 100644
index 78726d5d7f..0000000000
--- a/changelog.d/6494.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index f99de2f3f3..c67a51a8d5 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.6.1"
+__version__ = "1.7.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 5e8abe9013427e8ad452c4652dfcb40da05c246e Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 9 Dec 2019 14:54:33 +0000
Subject: Better errors regarding changing avatar_url (#6497)

---
 changelog.d/6497.bugfix           |  1 +
 synapse/rest/client/v1/profile.py | 11 ++++++++---
 2 files changed, 9 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6497.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix
new file mode 100644
index 0000000000..92ed08fc40
--- /dev/null
+++ b/changelog.d/6497.bugfix
@@ -0,0 +1 @@
+Fix error message when setting your profile's avatar URL mentioning displaynames, and prevent NoneType avatar_urls.
\ No newline at end of file
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index 1eac8a44c5..4f47562c1b 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -103,11 +103,16 @@ class ProfileAvatarURLRestServlet(RestServlet):
 
         content = parse_json_object_from_request(request)
         try:
-            new_name = content["avatar_url"]
+            new_avatar_url = content.get("avatar_url")
         except Exception:
-            return 400, "Unable to parse name"
+            return 400, "Unable to parse avatar_url"
+
+        if new_avatar_url is None:
+            return 400, "Missing required key: avatar_url"
 
-        await self.profile_handler.set_avatar_url(user, requester, new_name, is_admin)
+        await self.profile_handler.set_avatar_url(
+            user, requester, new_avatar_url, is_admin
+        )
 
         return 200, {}
 
-- 
cgit 1.4.1


From 52fe9788bcc2c4b422f387421667698187fc2135 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 9 Dec 2019 15:19:32 +0000
Subject: Newsfile

---
 changelog.d/6499.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6499.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6499.bugfix b/changelog.d/6499.bugfix
new file mode 100644
index 0000000000..299feba0f8
--- /dev/null
+++ b/changelog.d/6499.bugfix
@@ -0,0 +1 @@
+Fix support for SQLite 3.7.
-- 
cgit 1.4.1


From f5bb1531b7307bc2d826789746e7c82fa4dbf36c Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 11:23:52 +0000
Subject: Newsfile

---
 changelog.d/6505.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6505.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6505.misc b/changelog.d/6505.misc
new file mode 100644
index 0000000000..3a75b2d9dd
--- /dev/null
+++ b/changelog.d/6505.misc
@@ -0,0 +1 @@
+Make `make_deferred_yieldable` to work with async/await.
-- 
cgit 1.4.1


From b1e7012deea2d254ecbf92d1fed429c34a65db54 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 11:29:44 +0000
Subject: Newsfile

---
 changelog.d/6506.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6506.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6506.misc b/changelog.d/6506.misc
new file mode 100644
index 0000000000..99d7a70bcf
--- /dev/null
+++ b/changelog.d/6506.misc
@@ -0,0 +1 @@
+Remove `SnapshotCache` in favour of `ResponseCache`.
-- 
cgit 1.4.1


From ec5fdd13339cb9bd2bb87537fdac524da2cad614 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 10 Dec 2019 12:34:33 +0000
Subject: Changelog

---
 changelog.d/6507.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6507.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix
new file mode 100644
index 0000000000..7f95da52c9
--- /dev/null
+++ b/changelog.d/6507.bugfix
@@ -0,0 +1 @@
+Fix pusher worker failing because it can't retrieve retention policies for rooms.
-- 
cgit 1.4.1


From 451ec9b8b96c17dcd465981fb8715f071a9316b4 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 10 Dec 2019 13:06:41 +0000
Subject: Changelog

---
 changelog.d/6509.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6509.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix
new file mode 100644
index 0000000000..7f95da52c9
--- /dev/null
+++ b/changelog.d/6509.bugfix
@@ -0,0 +1 @@
+Fix pusher worker failing because it can't retrieve retention policies for rooms.
-- 
cgit 1.4.1


From accd343f9104653c0e89d79101ae6ae767f7aa35 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 13:22:42 +0000
Subject: Newsfile

---
 changelog.d/6510.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6510.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6510.misc b/changelog.d/6510.misc
new file mode 100644
index 0000000000..214f06539b
--- /dev/null
+++ b/changelog.d/6510.misc
@@ -0,0 +1 @@
+Change phone home stats to not assume there is a single database and report information about the database used by the main data store.
-- 
cgit 1.4.1


From 4643bb2a37dcd6302fd5b9e185ea5d0f4eb0df8c Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 13:36:00 +0000
Subject: Newsfile

---
 changelog.d/6512.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6512.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6512.misc b/changelog.d/6512.misc
new file mode 100644
index 0000000000..37a8099eec
--- /dev/null
+++ b/changelog.d/6512.misc
@@ -0,0 +1 @@
+Silence mypy errors for files outside those specified.
-- 
cgit 1.4.1


From ae49d29ef1dabec25dc348bf11c2919d28d1cef3 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 10 Dec 2019 13:55:03 +0000
Subject: Fixup changelogs

---
 changelog.d/6507.bugfix | 2 +-
 changelog.d/6509.bugfix | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix
index 7f95da52c9..d767a6237f 100644
--- a/changelog.d/6507.bugfix
+++ b/changelog.d/6507.bugfix
@@ -1 +1 @@
-Fix pusher worker failing because it can't retrieve retention policies for rooms.
+Fix regression where sending email push would not work when using a pusher worker.
diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix
index 7f95da52c9..d767a6237f 100644
--- a/changelog.d/6509.bugfix
+++ b/changelog.d/6509.bugfix
@@ -1 +1 @@
-Fix pusher worker failing because it can't retrieve retention policies for rooms.
+Fix regression where sending email push would not work when using a pusher worker.
-- 
cgit 1.4.1


From d630c8234928c008e12c79ebef10cee570779fa5 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 13:34:17 +0000
Subject: Newsfile

---
 changelog.d/6511.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6511.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6511.misc b/changelog.d/6511.misc
new file mode 100644
index 0000000000..19ce435e68
--- /dev/null
+++ b/changelog.d/6511.misc
@@ -0,0 +1 @@
+Move database config from apps into HomeServer object.
-- 
cgit 1.4.1


From 3f97b4c16bbe8a32fc465bd59421f3ba879d2124 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 10 Dec 2019 11:17:13 +0000
Subject: Newsfile

---
 changelog.d/6504.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6504.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6504.misc b/changelog.d/6504.misc
new file mode 100644
index 0000000000..7c873459af
--- /dev/null
+++ b/changelog.d/6504.misc
@@ -0,0 +1 @@
+Port handlers.account_data and handlers.account_validity to async/await.
-- 
cgit 1.4.1


From 424fd58237d4a40c6f94772be136298d326fcd69 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 15:09:45 +0000
Subject: Remove redundant code from event authorisation implementation.
 (#6502)

---
 changelog.d/6502.removal | 1 +
 synapse/event_auth.py    | 8 ++------
 2 files changed, 3 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6502.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6502.removal b/changelog.d/6502.removal
new file mode 100644
index 0000000000..0b72261d58
--- /dev/null
+++ b/changelog.d/6502.removal
@@ -0,0 +1 @@
+Remove redundant code from event authorisation implementation.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index ec3243b27b..c940b84470 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -42,6 +42,8 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
     Returns:
          if the auth checks pass.
     """
+    assert isinstance(auth_events, dict)
+
     if do_size_check:
         _check_size_limits(event)
 
@@ -74,12 +76,6 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
             if not event.signatures.get(event_id_domain):
                 raise AuthError(403, "Event not signed by sending server")
 
-    if auth_events is None:
-        # Oh, we don't know what the state of the room was, so we
-        # are trusting that this is allowed (at least for now)
-        logger.warning("Trusting event: %s", event.event_id)
-        return
-
     if event.type == EventTypes.Create:
         sender_domain = get_domain_from_id(event.sender)
         room_id_domain = get_domain_from_id(event.room_id)
-- 
cgit 1.4.1


From c3dda2874d78790525f47e502aaed22b64961873 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 16:22:00 +0000
Subject: Refactor get_events_from_store_or_dest to return a dict (#6501)

There was a bunch of unnecessary conversion back and forth between dict and
list going on here. We can simplify a bunch of the code.
---
 changelog.d/6501.misc                   |  1 +
 synapse/federation/federation_client.py | 44 +++++++++++----------------------
 2 files changed, 16 insertions(+), 29 deletions(-)
 create mode 100644 changelog.d/6501.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6501.misc b/changelog.d/6501.misc
new file mode 100644
index 0000000000..255f45a9c3
--- /dev/null
+++ b/changelog.d/6501.misc
@@ -0,0 +1 @@
+Refactor get_events_from_store_or_dest to return a dict.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 709449c9e3..73e1dda6a3 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -18,8 +18,6 @@ import copy
 import itertools
 import logging
 
-from six.moves import range
-
 from prometheus_client import Counter
 
 from twisted.internet import defer
@@ -41,7 +39,7 @@ from synapse.events import builder, room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.logging.utils import log_function
-from synapse.util import unwrapFirstError
+from synapse.util import batch_iter, unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.retryutils import NotRetryingDestination
 
@@ -331,10 +329,12 @@ class FederationClient(FederationBase):
         state_event_ids = result["pdu_ids"]
         auth_event_ids = result.get("auth_chain_ids", [])
 
-        fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
-            destination, room_id, set(state_event_ids + auth_event_ids)
+        desired_events = set(state_event_ids + auth_event_ids)
+        event_map = yield self.get_events_from_store_or_dest(
+            destination, room_id, desired_events
         )
 
+        failed_to_fetch = desired_events - event_map.keys()
         if failed_to_fetch:
             logger.warning(
                 "Failed to fetch missing state/auth events for %s: %s",
@@ -342,8 +342,6 @@ class FederationClient(FederationBase):
                 failed_to_fetch,
             )
 
-        event_map = {ev.event_id: ev for ev in fetched_events}
-
         pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
         auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
 
@@ -358,23 +356,18 @@ class FederationClient(FederationBase):
         Args:
             destination (str)
             room_id (str)
-            event_ids (list)
+            event_ids (Iterable[str])
 
         Returns:
-            Deferred: A deferred resolving to a 2-tuple where the first is a list of
-            events and the second is a list of event ids that we failed to fetch.
+            Deferred[dict[str, EventBase]]: A deferred resolving to a map
+            from event_id to event
         """
-        seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
-        signed_events = list(seen_events.values())
-
-        failed_to_fetch = set()
+        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
 
-        missing_events = set(event_ids)
-        for k in seen_events:
-            missing_events.discard(k)
+        missing_events = set(event_ids) - fetched_events.keys()
 
         if not missing_events:
-            return signed_events, failed_to_fetch
+            return fetched_events
 
         logger.debug(
             "Fetching unknown state/auth events %s for room %s",
@@ -384,11 +377,8 @@ class FederationClient(FederationBase):
 
         room_version = yield self.store.get_room_version(room_id)
 
-        batch_size = 20
-        missing_events = list(missing_events)
-        for i in range(0, len(missing_events), batch_size):
-            batch = set(missing_events[i : i + batch_size])
-
+        # XXX 20 requests at once? really?
+        for batch in batch_iter(missing_events, 20):
             deferreds = [
                 run_in_background(
                     self.get_pdu,
@@ -404,13 +394,9 @@ class FederationClient(FederationBase):
             )
             for success, result in res:
                 if success and result:
-                    signed_events.append(result)
-                    batch.discard(result.event_id)
-
-            # We removed all events we successfully fetched from `batch`
-            failed_to_fetch.update(batch)
+                    fetched_events[result.event_id] = result
 
-        return signed_events, failed_to_fetch
+        return fetched_events
 
     @defer.inlineCallbacks
     @log_function
-- 
cgit 1.4.1


From 40eda849338b6e47a5804b4cf7000e9d2417c4d8 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 16:22:29 +0000
Subject: Fix race which caused deleted devices to reappear (#6514)

Stop the `update_client_ips` background job from recreating deleted devices.
---
 changelog.d/6514.bugfix                        |  1 +
 synapse/storage/data_stores/main/client_ips.py |  8 +++--
 tests/storage/test_client_ips.py               | 49 +++++++++++++++-----------
 3 files changed, 35 insertions(+), 23 deletions(-)
 create mode 100644 changelog.d/6514.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6514.bugfix b/changelog.d/6514.bugfix
new file mode 100644
index 0000000000..6dc1985c24
--- /dev/null
+++ b/changelog.d/6514.bugfix
@@ -0,0 +1 @@
+Fix race which occasionally caused deleted devices to reappear.
diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index 320c5b0f07..add3037b69 100644
--- a/synapse/storage/data_stores/main/client_ips.py
+++ b/synapse/storage/data_stores/main/client_ips.py
@@ -451,16 +451,18 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                 # Technically an access token might not be associated with
                 # a device so we need to check.
                 if device_id:
-                    self.db.simple_upsert_txn(
+                    # this is always an update rather than an upsert: the row should
+                    # already exist, and if it doesn't, that may be because it has been
+                    # deleted, and we don't want to re-create it.
+                    self.db.simple_update_txn(
                         txn,
                         table="devices",
                         keyvalues={"user_id": user_id, "device_id": device_id},
-                        values={
+                        updatevalues={
                             "user_agent": user_agent,
                             "last_seen": last_seen,
                             "ip": ip,
                         },
-                        lock=False,
                     )
             except Exception as e:
                 # Failed to upsert, log and continue
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index fc279340d4..bf674dd184 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -37,9 +37,13 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
         self.reactor.advance(12345678)
 
         user_id = "@user:id"
+        device_id = "MY_DEVICE"
+
+        # Insert a user IP
+        self.get_success(self.store.store_device(user_id, device_id, "display name",))
         self.get_success(
             self.store.insert_client_ip(
-                user_id, "access_token", "ip", "user_agent", "device_id"
+                user_id, "access_token", "ip", "user_agent", device_id
             )
         )
 
@@ -47,14 +51,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
         self.reactor.advance(10)
 
         result = self.get_success(
-            self.store.get_last_client_ip_by_device(user_id, "device_id")
+            self.store.get_last_client_ip_by_device(user_id, device_id)
         )
 
-        r = result[(user_id, "device_id")]
+        r = result[(user_id, device_id)]
         self.assertDictContainsSubset(
             {
                 "user_id": user_id,
-                "device_id": "device_id",
+                "device_id": device_id,
                 "ip": "ip",
                 "user_agent": "user_agent",
                 "last_seen": 12345678000,
@@ -209,14 +213,16 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
                 self.store.db.updates.do_next_background_update(100), by=0.1
             )
 
-        # Insert a user IP
         user_id = "@user:id"
+        device_id = "MY_DEVICE"
+
+        # Insert a user IP
+        self.get_success(self.store.store_device(user_id, device_id, "display name",))
         self.get_success(
             self.store.insert_client_ip(
-                user_id, "access_token", "ip", "user_agent", "device_id"
+                user_id, "access_token", "ip", "user_agent", device_id
             )
         )
-
         # Force persisting to disk
         self.reactor.advance(200)
 
@@ -224,7 +230,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
         self.get_success(
             self.store.db.simple_update(
                 table="devices",
-                keyvalues={"user_id": user_id, "device_id": "device_id"},
+                keyvalues={"user_id": user_id, "device_id": device_id},
                 updatevalues={"last_seen": None, "ip": None, "user_agent": None},
                 desc="test_devices_last_seen_bg_update",
             )
@@ -232,14 +238,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
 
         # We should now get nulls when querying
         result = self.get_success(
-            self.store.get_last_client_ip_by_device(user_id, "device_id")
+            self.store.get_last_client_ip_by_device(user_id, device_id)
         )
 
-        r = result[(user_id, "device_id")]
+        r = result[(user_id, device_id)]
         self.assertDictContainsSubset(
             {
                 "user_id": user_id,
-                "device_id": "device_id",
+                "device_id": device_id,
                 "ip": None,
                 "user_agent": None,
                 "last_seen": None,
@@ -272,14 +278,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
 
         # We should now get the correct result again
         result = self.get_success(
-            self.store.get_last_client_ip_by_device(user_id, "device_id")
+            self.store.get_last_client_ip_by_device(user_id, device_id)
         )
 
-        r = result[(user_id, "device_id")]
+        r = result[(user_id, device_id)]
         self.assertDictContainsSubset(
             {
                 "user_id": user_id,
-                "device_id": "device_id",
+                "device_id": device_id,
                 "ip": "ip",
                 "user_agent": "user_agent",
                 "last_seen": 0,
@@ -296,11 +302,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
                 self.store.db.updates.do_next_background_update(100), by=0.1
             )
 
-        # Insert a user IP
         user_id = "@user:id"
+        device_id = "MY_DEVICE"
+
+        # Insert a user IP
+        self.get_success(self.store.store_device(user_id, device_id, "display name",))
         self.get_success(
             self.store.insert_client_ip(
-                user_id, "access_token", "ip", "user_agent", "device_id"
+                user_id, "access_token", "ip", "user_agent", device_id
             )
         )
 
@@ -324,7 +333,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
                     "access_token": "access_token",
                     "ip": "ip",
                     "user_agent": "user_agent",
-                    "device_id": "device_id",
+                    "device_id": device_id,
                     "last_seen": 0,
                 }
             ],
@@ -347,14 +356,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
 
         # But we should still get the correct values for the device
         result = self.get_success(
-            self.store.get_last_client_ip_by_device(user_id, "device_id")
+            self.store.get_last_client_ip_by_device(user_id, device_id)
         )
 
-        r = result[(user_id, "device_id")]
+        r = result[(user_id, device_id)]
         self.assertDictContainsSubset(
             {
                 "user_id": user_id,
-                "device_id": "device_id",
+                "device_id": device_id,
                 "ip": "ip",
                 "user_agent": "user_agent",
                 "last_seen": 0,
-- 
cgit 1.4.1


From 4947de5a147d4f6a4e60aecac1284714fb64df8a Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 10 Dec 2019 17:30:16 +0000
Subject: Allow SAML username provider plugins (#6411)

---
 changelog.d/6411.feature         |   1 +
 docs/saml_mapping_providers.md   |  77 +++++++++++++++
 docs/sample_config.yaml          |  61 ++++++++----
 synapse/config/saml2_config.py   | 186 ++++++++++++++++++++++++------------
 synapse/handlers/saml_handler.py | 198 +++++++++++++++++++++++++++++++++------
 5 files changed, 417 insertions(+), 106 deletions(-)
 create mode 100644 changelog.d/6411.feature
 create mode 100644 docs/saml_mapping_providers.md

(limited to 'changelog.d')

diff --git a/changelog.d/6411.feature b/changelog.d/6411.feature
new file mode 100644
index 0000000000..ebea4a208d
--- /dev/null
+++ b/changelog.d/6411.feature
@@ -0,0 +1 @@
+Allow custom SAML username mapping functinality through an external provider plugin.
\ No newline at end of file
diff --git a/docs/saml_mapping_providers.md b/docs/saml_mapping_providers.md
new file mode 100644
index 0000000000..92f2380488
--- /dev/null
+++ b/docs/saml_mapping_providers.md
@@ -0,0 +1,77 @@
+# SAML Mapping Providers
+
+A SAML mapping provider is a Python class (loaded via a Python module) that
+works out how to map attributes of a SAML response object to Matrix-specific
+user attributes. Details such as user ID localpart, displayname, and even avatar
+URLs are all things that can be mapped from talking to a SSO service.
+
+As an example, a SSO service may return the email address
+"john.smith@example.com" for a user, whereas Synapse will need to figure out how
+to turn that into a displayname when creating a Matrix user for this individual.
+It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
+variations. As each Synapse configuration may want something different, this is
+where SAML mapping providers come into play.
+
+## Enabling Providers
+
+External mapping providers are provided to Synapse in the form of an external
+Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere,
+then tell Synapse where to look for the handler class by editing the
+`saml2_config.user_mapping_provider.module` config option.
+
+`saml2_config.user_mapping_provider.config` allows you to provide custom
+configuration options to the module. Check with the module's documentation for
+what options it provides (if any). The options listed by default are for the
+user mapping provider built in to Synapse. If using a custom module, you should
+comment these options out and use those specified by the module instead.
+
+## Building a Custom Mapping Provider
+
+A custom mapping provider must specify the following methods:
+
+* `__init__(self, parsed_config)`
+   - Arguments:
+     - `parsed_config` - A configuration object that is the return value of the
+       `parse_config` method. You should set any configuration options needed by
+       the module here.
+* `saml_response_to_user_attributes(self, saml_response, failures)`
+    - Arguments:
+      - `saml_response` - A `saml2.response.AuthnResponse` object to extract user
+                          information from.
+      - `failures` - An `int` that represents the amount of times the returned
+                     mxid localpart mapping has failed.  This should be used
+                     to create a deduplicated mxid localpart which should be
+                     returned instead. For example, if this method returns
+                     `john.doe` as the value of `mxid_localpart` in the returned
+                     dict, and that is already taken on the homeserver, this
+                     method will be called again with the same parameters but
+                     with failures=1. The method should then return a different
+                     `mxid_localpart` value, such as `john.doe1`.
+    - This method must return a dictionary, which will then be used by Synapse
+      to build a new user. The following keys are allowed:
+       * `mxid_localpart` - Required. The mxid localpart of the new user.
+       * `displayname` - The displayname of the new user. If not provided, will default to
+                         the value of `mxid_localpart`.
+* `parse_config(config)`
+    - This method should have the `@staticmethod` decoration.
+    - Arguments:
+        - `config` - A `dict` representing the parsed content of the
+          `saml2_config.user_mapping_provider.config` homeserver config option.
+           Runs on homeserver startup. Providers should extract any option values
+           they need here.
+    - Whatever is returned will be passed back to the user mapping provider module's
+      `__init__` method during construction.
+* `get_saml_attributes(config)`
+    - This method should have the `@staticmethod` decoration.
+    - Arguments:
+        - `config` - A object resulting from a call to `parse_config`.
+    - Returns a tuple of two sets. The first set equates to the saml auth
+      response attributes that are required for the module to function, whereas
+      the second set consists of those attributes which can be used if available,
+      but are not necessary.
+
+## Synapse's Default Provider
+
+Synapse has a built-in SAML mapping provider if a custom provider isn't
+specified in the config. It is located at
+[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 10664ae8f7..4d44e631d1 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1250,33 +1250,58 @@ saml2_config:
   #
   #config_path: "CONFDIR/sp_conf.py"
 
-  # the lifetime of a SAML session. This defines how long a user has to
+  # The lifetime of a SAML session. This defines how long a user has to
   # complete the authentication process, if allow_unsolicited is unset.
   # The default is 5 minutes.
   #
   #saml_session_lifetime: 5m
 
-  # The SAML attribute (after mapping via the attribute maps) to use to derive
-  # the Matrix ID from. 'uid' by default.
+  # An external module can be provided here as a custom solution to
+  # mapping attributes returned from a saml provider onto a matrix user.
   #
-  #mxid_source_attribute: displayName
-
-  # The mapping system to use for mapping the saml attribute onto a matrix ID.
-  # Options include:
-  #  * 'hexencode' (which maps unpermitted characters to '=xx')
-  #  * 'dotreplace' (which replaces unpermitted characters with '.').
-  # The default is 'hexencode'.
-  #
-  #mxid_mapping: dotreplace
+  user_mapping_provider:
+    # The custom module's class. Uncomment to use a custom module.
+    #
+    #module: mapping_provider.SamlMappingProvider
 
-  # In previous versions of synapse, the mapping from SAML attribute to MXID was
-  # always calculated dynamically rather than stored in a table. For backwards-
-  # compatibility, we will look for user_ids matching such a pattern before
-  # creating a new account.
+    # Custom configuration values for the module. Below options are
+    # intended for the built-in provider, they should be changed if
+    # using a custom module. This section will be passed as a Python
+    # dictionary to the module's `parse_config` method.
+    #
+    config:
+      # The SAML attribute (after mapping via the attribute maps) to use
+      # to derive the Matrix ID from. 'uid' by default.
+      #
+      # Note: This used to be configured by the
+      # saml2_config.mxid_source_attribute option. If that is still
+      # defined, its value will be used instead.
+      #
+      #mxid_source_attribute: displayName
+
+      # The mapping system to use for mapping the saml attribute onto a
+      # matrix ID.
+      #
+      # Options include:
+      #  * 'hexencode' (which maps unpermitted characters to '=xx')
+      #  * 'dotreplace' (which replaces unpermitted characters with
+      #     '.').
+      # The default is 'hexencode'.
+      #
+      # Note: This used to be configured by the
+      # saml2_config.mxid_mapping option. If that is still defined, its
+      # value will be used instead.
+      #
+      #mxid_mapping: dotreplace
+
+  # In previous versions of synapse, the mapping from SAML attribute to
+  # MXID was always calculated dynamically rather than stored in a
+  # table. For backwards- compatibility, we will look for user_ids
+  # matching such a pattern before creating a new account.
   #
   # This setting controls the SAML attribute which will be used for this
-  # backwards-compatibility lookup. Typically it should be 'uid', but if the
-  # attribute maps are changed, it may be necessary to change it.
+  # backwards-compatibility lookup. Typically it should be 'uid', but if
+  # the attribute maps are changed, it may be necessary to change it.
   #
   # The default is 'uid'.
   #
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index c5ea2d43a1..b91414aa35 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -14,17 +14,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import re
+import logging
 
 from synapse.python_dependencies import DependencyException, check_requirements
-from synapse.types import (
-    map_username_to_mxid_localpart,
-    mxid_localpart_allowed_characters,
-)
-from synapse.util.module_loader import load_python_module
+from synapse.util.module_loader import load_module, load_python_module
 
 from ._base import Config, ConfigError
 
+logger = logging.getLogger(__name__)
+
+DEFAULT_USER_MAPPING_PROVIDER = (
+    "synapse.handlers.saml_handler.DefaultSamlMappingProvider"
+)
+
 
 def _dict_merge(merge_dict, into_dict):
     """Do a deep merge of two dicts
@@ -75,15 +77,69 @@ class SAML2Config(Config):
 
         self.saml2_enabled = True
 
-        self.saml2_mxid_source_attribute = saml2_config.get(
-            "mxid_source_attribute", "uid"
-        )
-
         self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
             "grandfathered_mxid_source_attribute", "uid"
         )
 
-        saml2_config_dict = self._default_saml_config_dict()
+        # user_mapping_provider may be None if the key is present but has no value
+        ump_dict = saml2_config.get("user_mapping_provider") or {}
+
+        # Use the default user mapping provider if not set
+        ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
+
+        # Ensure a config is present
+        ump_dict["config"] = ump_dict.get("config") or {}
+
+        if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER:
+            # Load deprecated options for use by the default module
+            old_mxid_source_attribute = saml2_config.get("mxid_source_attribute")
+            if old_mxid_source_attribute:
+                logger.warning(
+                    "The config option saml2_config.mxid_source_attribute is deprecated. "
+                    "Please use saml2_config.user_mapping_provider.config"
+                    ".mxid_source_attribute instead."
+                )
+                ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute
+
+            old_mxid_mapping = saml2_config.get("mxid_mapping")
+            if old_mxid_mapping:
+                logger.warning(
+                    "The config option saml2_config.mxid_mapping is deprecated. Please "
+                    "use saml2_config.user_mapping_provider.config.mxid_mapping instead."
+                )
+                ump_dict["config"]["mxid_mapping"] = old_mxid_mapping
+
+        # Retrieve an instance of the module's class
+        # Pass the config dictionary to the module for processing
+        (
+            self.saml2_user_mapping_provider_class,
+            self.saml2_user_mapping_provider_config,
+        ) = load_module(ump_dict)
+
+        # Ensure loaded user mapping module has defined all necessary methods
+        # Note parse_config() is already checked during the call to load_module
+        required_methods = [
+            "get_saml_attributes",
+            "saml_response_to_user_attributes",
+        ]
+        missing_methods = [
+            method
+            for method in required_methods
+            if not hasattr(self.saml2_user_mapping_provider_class, method)
+        ]
+        if missing_methods:
+            raise ConfigError(
+                "Class specified by saml2_config."
+                "user_mapping_provider.module is missing required "
+                "methods: %s" % (", ".join(missing_methods),)
+            )
+
+        # Get the desired saml auth response attributes from the module
+        saml2_config_dict = self._default_saml_config_dict(
+            *self.saml2_user_mapping_provider_class.get_saml_attributes(
+                self.saml2_user_mapping_provider_config
+            )
+        )
         _dict_merge(
             merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict
         )
@@ -103,22 +159,27 @@ class SAML2Config(Config):
             saml2_config.get("saml_session_lifetime", "5m")
         )
 
-        mapping = saml2_config.get("mxid_mapping", "hexencode")
-        try:
-            self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping]
-        except KeyError:
-            raise ConfigError("%s is not a known mxid_mapping" % (mapping,))
-
-    def _default_saml_config_dict(self):
+    def _default_saml_config_dict(
+        self, required_attributes: set, optional_attributes: set
+    ):
+        """Generate a configuration dictionary with required and optional attributes that
+        will be needed to process new user registration
+
+        Args:
+            required_attributes: SAML auth response attributes that are
+                necessary to function
+            optional_attributes: SAML auth response attributes that can be used to add
+                additional information to Synapse user accounts, but are not required
+
+        Returns:
+            dict: A SAML configuration dictionary
+        """
         import saml2
 
         public_baseurl = self.public_baseurl
         if public_baseurl is None:
             raise ConfigError("saml2_config requires a public_baseurl to be set")
 
-        required_attributes = {"uid", self.saml2_mxid_source_attribute}
-
-        optional_attributes = {"displayName"}
         if self.saml2_grandfathered_mxid_source_attribute:
             optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
         optional_attributes -= required_attributes
@@ -207,33 +268,58 @@ class SAML2Config(Config):
           #
           #config_path: "%(config_dir_path)s/sp_conf.py"
 
-          # the lifetime of a SAML session. This defines how long a user has to
+          # The lifetime of a SAML session. This defines how long a user has to
           # complete the authentication process, if allow_unsolicited is unset.
           # The default is 5 minutes.
           #
           #saml_session_lifetime: 5m
 
-          # The SAML attribute (after mapping via the attribute maps) to use to derive
-          # the Matrix ID from. 'uid' by default.
+          # An external module can be provided here as a custom solution to
+          # mapping attributes returned from a saml provider onto a matrix user.
           #
-          #mxid_source_attribute: displayName
-
-          # The mapping system to use for mapping the saml attribute onto a matrix ID.
-          # Options include:
-          #  * 'hexencode' (which maps unpermitted characters to '=xx')
-          #  * 'dotreplace' (which replaces unpermitted characters with '.').
-          # The default is 'hexencode'.
-          #
-          #mxid_mapping: dotreplace
-
-          # In previous versions of synapse, the mapping from SAML attribute to MXID was
-          # always calculated dynamically rather than stored in a table. For backwards-
-          # compatibility, we will look for user_ids matching such a pattern before
-          # creating a new account.
+          user_mapping_provider:
+            # The custom module's class. Uncomment to use a custom module.
+            #
+            #module: mapping_provider.SamlMappingProvider
+
+            # Custom configuration values for the module. Below options are
+            # intended for the built-in provider, they should be changed if
+            # using a custom module. This section will be passed as a Python
+            # dictionary to the module's `parse_config` method.
+            #
+            config:
+              # The SAML attribute (after mapping via the attribute maps) to use
+              # to derive the Matrix ID from. 'uid' by default.
+              #
+              # Note: This used to be configured by the
+              # saml2_config.mxid_source_attribute option. If that is still
+              # defined, its value will be used instead.
+              #
+              #mxid_source_attribute: displayName
+
+              # The mapping system to use for mapping the saml attribute onto a
+              # matrix ID.
+              #
+              # Options include:
+              #  * 'hexencode' (which maps unpermitted characters to '=xx')
+              #  * 'dotreplace' (which replaces unpermitted characters with
+              #     '.').
+              # The default is 'hexencode'.
+              #
+              # Note: This used to be configured by the
+              # saml2_config.mxid_mapping option. If that is still defined, its
+              # value will be used instead.
+              #
+              #mxid_mapping: dotreplace
+
+          # In previous versions of synapse, the mapping from SAML attribute to
+          # MXID was always calculated dynamically rather than stored in a
+          # table. For backwards- compatibility, we will look for user_ids
+          # matching such a pattern before creating a new account.
           #
           # This setting controls the SAML attribute which will be used for this
-          # backwards-compatibility lookup. Typically it should be 'uid', but if the
-          # attribute maps are changed, it may be necessary to change it.
+          # backwards-compatibility lookup. Typically it should be 'uid', but if
+          # the attribute maps are changed, it may be necessary to change it.
           #
           # The default is 'uid'.
           #
@@ -241,23 +327,3 @@ class SAML2Config(Config):
         """ % {
             "config_dir_path": config_dir_path
         }
-
-
-DOT_REPLACE_PATTERN = re.compile(
-    ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
-)
-
-
-def dot_replace_for_mxid(username: str) -> str:
-    username = username.lower()
-    username = DOT_REPLACE_PATTERN.sub(".", username)
-
-    # regular mxids aren't allowed to start with an underscore either
-    username = re.sub("^_", "", username)
-    return username
-
-
-MXID_MAPPER_MAP = {
-    "hexencode": map_username_to_mxid_localpart,
-    "dotreplace": dot_replace_for_mxid,
-}
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index cc9e6b9bd0..0082f85c26 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -13,20 +13,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+import re
+from typing import Tuple
 
 import attr
 import saml2
+import saml2.response
 from saml2.client import Saml2Client
 
 from synapse.api.errors import SynapseError
+from synapse.config import ConfigError
 from synapse.http.servlet import parse_string
 from synapse.rest.client.v1.login import SSOAuthHandler
-from synapse.types import UserID, map_username_to_mxid_localpart
+from synapse.types import (
+    UserID,
+    map_username_to_mxid_localpart,
+    mxid_localpart_allowed_characters,
+)
 from synapse.util.async_helpers import Linearizer
 
 logger = logging.getLogger(__name__)
 
 
+@attr.s
+class Saml2SessionData:
+    """Data we track about SAML2 sessions"""
+
+    # time the session was created, in milliseconds
+    creation_time = attr.ib()
+
+
 class SamlHandler:
     def __init__(self, hs):
         self._saml_client = Saml2Client(hs.config.saml2_sp_config)
@@ -37,11 +53,14 @@ class SamlHandler:
         self._datastore = hs.get_datastore()
         self._hostname = hs.hostname
         self._saml2_session_lifetime = hs.config.saml2_session_lifetime
-        self._mxid_source_attribute = hs.config.saml2_mxid_source_attribute
         self._grandfathered_mxid_source_attribute = (
             hs.config.saml2_grandfathered_mxid_source_attribute
         )
-        self._mxid_mapper = hs.config.saml2_mxid_mapper
+
+        # plugin to do custom mapping from saml response to mxid
+        self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
+            hs.config.saml2_user_mapping_provider_config
+        )
 
         # identifier for the external_ids table
         self._auth_provider_id = "saml"
@@ -118,22 +137,10 @@ class SamlHandler:
             remote_user_id = saml2_auth.ava["uid"][0]
         except KeyError:
             logger.warning("SAML2 response lacks a 'uid' attestation")
-            raise SynapseError(400, "uid not in SAML2 response")
-
-        try:
-            mxid_source = saml2_auth.ava[self._mxid_source_attribute][0]
-        except KeyError:
-            logger.warning(
-                "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute
-            )
-            raise SynapseError(
-                400, "%s not in SAML2 response" % (self._mxid_source_attribute,)
-            )
+            raise SynapseError(400, "'uid' not in SAML2 response")
 
         self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None)
 
-        displayName = saml2_auth.ava.get("displayName", [None])[0]
-
         with (await self._mapping_lock.queue(self._auth_provider_id)):
             # first of all, check if we already have a mapping for this user
             logger.info(
@@ -173,22 +180,46 @@ class SamlHandler:
                     )
                     return registered_user_id
 
-            # figure out a new mxid for this user
-            base_mxid_localpart = self._mxid_mapper(mxid_source)
+            # Map saml response to user attributes using the configured mapping provider
+            for i in range(1000):
+                attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes(
+                    saml2_auth, i
+                )
+
+                logger.debug(
+                    "Retrieved SAML attributes from user mapping provider: %s "
+                    "(attempt %d)",
+                    attribute_dict,
+                    i,
+                )
+
+                localpart = attribute_dict.get("mxid_localpart")
+                if not localpart:
+                    logger.error(
+                        "SAML mapping provider plugin did not return a "
+                        "mxid_localpart object"
+                    )
+                    raise SynapseError(500, "Error parsing SAML2 response")
 
-            suffix = 0
-            while True:
-                localpart = base_mxid_localpart + (str(suffix) if suffix else "")
+                displayname = attribute_dict.get("displayname")
+
+                # Check if this mxid already exists
                 if not await self._datastore.get_users_by_id_case_insensitive(
                     UserID(localpart, self._hostname).to_string()
                 ):
+                    # This mxid is free
                     break
-                suffix += 1
-            logger.info("Allocating mxid for new user with localpart %s", localpart)
+            else:
+                # Unable to generate a username in 1000 iterations
+                # Break and return error to the user
+                raise SynapseError(
+                    500, "Unable to generate a Matrix ID from the SAML response"
+                )
 
             registered_user_id = await self._registration_handler.register_user(
-                localpart=localpart, default_display_name=displayName
+                localpart=localpart, default_display_name=displayname
             )
+
             await self._datastore.record_user_external_id(
                 self._auth_provider_id, remote_user_id, registered_user_id
             )
@@ -205,9 +236,120 @@ class SamlHandler:
             del self._outstanding_requests_dict[reqid]
 
 
+DOT_REPLACE_PATTERN = re.compile(
+    ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
+)
+
+
+def dot_replace_for_mxid(username: str) -> str:
+    username = username.lower()
+    username = DOT_REPLACE_PATTERN.sub(".", username)
+
+    # regular mxids aren't allowed to start with an underscore either
+    username = re.sub("^_", "", username)
+    return username
+
+
+MXID_MAPPER_MAP = {
+    "hexencode": map_username_to_mxid_localpart,
+    "dotreplace": dot_replace_for_mxid,
+}
+
+
 @attr.s
-class Saml2SessionData:
-    """Data we track about SAML2 sessions"""
+class SamlConfig(object):
+    mxid_source_attribute = attr.ib()
+    mxid_mapper = attr.ib()
 
-    # time the session was created, in milliseconds
-    creation_time = attr.ib()
+
+class DefaultSamlMappingProvider(object):
+    __version__ = "0.0.1"
+
+    def __init__(self, parsed_config: SamlConfig):
+        """The default SAML user mapping provider
+
+        Args:
+            parsed_config: Module configuration
+        """
+        self._mxid_source_attribute = parsed_config.mxid_source_attribute
+        self._mxid_mapper = parsed_config.mxid_mapper
+
+    def saml_response_to_user_attributes(
+        self, saml_response: saml2.response.AuthnResponse, failures: int = 0,
+    ) -> dict:
+        """Maps some text from a SAML response to attributes of a new user
+
+        Args:
+            saml_response: A SAML auth response object
+
+            failures: How many times a call to this function with this
+                saml_response has resulted in a failure
+
+        Returns:
+            dict: A dict containing new user attributes. Possible keys:
+                * mxid_localpart (str): Required. The localpart of the user's mxid
+                * displayname (str): The displayname of the user
+        """
+        try:
+            mxid_source = saml_response.ava[self._mxid_source_attribute][0]
+        except KeyError:
+            logger.warning(
+                "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute,
+            )
+            raise SynapseError(
+                400, "%s not in SAML2 response" % (self._mxid_source_attribute,)
+            )
+
+        # Use the configured mapper for this mxid_source
+        base_mxid_localpart = self._mxid_mapper(mxid_source)
+
+        # Append suffix integer if last call to this function failed to produce
+        # a usable mxid
+        localpart = base_mxid_localpart + (str(failures) if failures else "")
+
+        # Retrieve the display name from the saml response
+        # If displayname is None, the mxid_localpart will be used instead
+        displayname = saml_response.ava.get("displayName", [None])[0]
+
+        return {
+            "mxid_localpart": localpart,
+            "displayname": displayname,
+        }
+
+    @staticmethod
+    def parse_config(config: dict) -> SamlConfig:
+        """Parse the dict provided by the homeserver's config
+        Args:
+            config: A dictionary containing configuration options for this provider
+        Returns:
+            SamlConfig: A custom config object for this module
+        """
+        # Parse config options and use defaults where necessary
+        mxid_source_attribute = config.get("mxid_source_attribute", "uid")
+        mapping_type = config.get("mxid_mapping", "hexencode")
+
+        # Retrieve the associating mapping function
+        try:
+            mxid_mapper = MXID_MAPPER_MAP[mapping_type]
+        except KeyError:
+            raise ConfigError(
+                "saml2_config.user_mapping_provider.config: '%s' is not a valid "
+                "mxid_mapping value" % (mapping_type,)
+            )
+
+        return SamlConfig(mxid_source_attribute, mxid_mapper)
+
+    @staticmethod
+    def get_saml_attributes(config: SamlConfig) -> Tuple[set, set]:
+        """Returns the required attributes of a SAML
+
+        Args:
+            config: A SamlConfig object containing configuration params for this provider
+
+        Returns:
+            tuple[set,set]: The first set equates to the saml auth response
+                attributes that are required for the module to function, whereas the
+                second set consists of those attributes which can be used if
+                available, but are not necessary
+        """
+        return {"uid", config.mxid_source_attribute}, {"displayName"}
-- 
cgit 1.4.1


From f8bc2ae8830615698ae683cafe4fdddb9a05a1f9 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 17:42:46 +0000
Subject: Move get_state methods into FederationHandler (#6503)

This is a non-functional refactor as a precursor to some other work.
---
 changelog.d/6503.misc                   |   1 +
 synapse/federation/federation_client.py |  91 ++++------------------------
 synapse/handlers/federation.py          | 101 ++++++++++++++++++++++++++++++--
 3 files changed, 107 insertions(+), 86 deletions(-)
 create mode 100644 changelog.d/6503.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6503.misc b/changelog.d/6503.misc
new file mode 100644
index 0000000000..e4e9a5a3d4
--- /dev/null
+++ b/changelog.d/6503.misc
@@ -0,0 +1 @@
+Move get_state methods into FederationHandler.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 73e1dda6a3..d396e6564f 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -37,9 +37,9 @@ from synapse.api.room_versions import (
 )
 from synapse.events import builder, room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
-from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.context import make_deferred_yieldable
 from synapse.logging.utils import log_function
-from synapse.util import batch_iter, unwrapFirstError
+from synapse.util import unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.retryutils import NotRetryingDestination
 
@@ -308,19 +308,12 @@ class FederationClient(FederationBase):
         return signed_pdu
 
     @defer.inlineCallbacks
-    @log_function
-    def get_state_for_room(self, destination, room_id, event_id):
-        """Requests all of the room state at a given event from a remote homeserver.
-
-        Args:
-            destination (str): The remote homeserver to query for the state.
-            room_id (str): The id of the room we're interested in.
-            event_id (str): The id of the event we want the state at.
+    def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
+        """Calls the /state_ids endpoint to fetch the state at a particular point
+        in the room, and the auth events for the given event
 
         Returns:
-            Deferred[Tuple[List[EventBase], List[EventBase]]]:
-                A list of events in the state, and a list of events in the auth chain
-                for the given event.
+            Tuple[List[str], List[str]]:  a tuple of (state event_ids, auth event_ids)
         """
         result = yield self.transport_layer.get_room_state_ids(
             destination, room_id, event_id=event_id
@@ -329,74 +322,12 @@ class FederationClient(FederationBase):
         state_event_ids = result["pdu_ids"]
         auth_event_ids = result.get("auth_chain_ids", [])
 
-        desired_events = set(state_event_ids + auth_event_ids)
-        event_map = yield self.get_events_from_store_or_dest(
-            destination, room_id, desired_events
-        )
-
-        failed_to_fetch = desired_events - event_map.keys()
-        if failed_to_fetch:
-            logger.warning(
-                "Failed to fetch missing state/auth events for %s: %s",
-                room_id,
-                failed_to_fetch,
-            )
-
-        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
-        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
-
-        auth_chain.sort(key=lambda e: e.depth)
-
-        return pdus, auth_chain
-
-    @defer.inlineCallbacks
-    def get_events_from_store_or_dest(self, destination, room_id, event_ids):
-        """Fetch events from a remote destination, checking if we already have them.
-
-        Args:
-            destination (str)
-            room_id (str)
-            event_ids (Iterable[str])
-
-        Returns:
-            Deferred[dict[str, EventBase]]: A deferred resolving to a map
-            from event_id to event
-        """
-        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
-
-        missing_events = set(event_ids) - fetched_events.keys()
-
-        if not missing_events:
-            return fetched_events
-
-        logger.debug(
-            "Fetching unknown state/auth events %s for room %s",
-            missing_events,
-            event_ids,
-        )
-
-        room_version = yield self.store.get_room_version(room_id)
-
-        # XXX 20 requests at once? really?
-        for batch in batch_iter(missing_events, 20):
-            deferreds = [
-                run_in_background(
-                    self.get_pdu,
-                    destinations=[destination],
-                    event_id=e_id,
-                    room_version=room_version,
-                )
-                for e_id in batch
-            ]
-
-            res = yield make_deferred_yieldable(
-                defer.DeferredList(deferreds, consumeErrors=True)
-            )
-            for success, result in res:
-                if success and result:
-                    fetched_events[result.event_id] = result
+        if not isinstance(state_event_ids, list) or not isinstance(
+            auth_event_ids, list
+        ):
+            raise Exception("invalid response from /state_ids")
 
-        return fetched_events
+        return state_event_ids, auth_event_ids
 
     @defer.inlineCallbacks
     @log_function
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index bc26921768..c0dcf9abf8 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -64,7 +64,7 @@ from synapse.replication.http.federation import (
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.types import UserID, get_domain_from_id
-from synapse.util import unwrapFirstError
+from synapse.util import batch_iter, unwrapFirstError
 from synapse.util.async_helpers import Linearizer
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
@@ -379,11 +379,9 @@ class FederationHandler(BaseHandler):
                             (
                                 remote_state,
                                 got_auth_chain,
-                            ) = yield self.federation_client.get_state_for_room(
-                                origin, room_id, p
-                            )
+                            ) = yield self._get_state_for_room(origin, room_id, p)
 
-                            # we want the state *after* p; get_state_for_room returns the
+                            # we want the state *after* p; _get_state_for_room returns the
                             # state *before* p.
                             remote_event = yield self.federation_client.get_pdu(
                                 [origin], p, room_version, outlier=True
@@ -583,6 +581,97 @@ class FederationHandler(BaseHandler):
                     else:
                         raise
 
+    @defer.inlineCallbacks
+    @log_function
+    def _get_state_for_room(self, destination, room_id, event_id):
+        """Requests all of the room state at a given event from a remote homeserver.
+
+        Args:
+            destination (str): The remote homeserver to query for the state.
+            room_id (str): The id of the room we're interested in.
+            event_id (str): The id of the event we want the state at.
+
+        Returns:
+            Deferred[Tuple[List[EventBase], List[EventBase]]]:
+                A list of events in the state, and a list of events in the auth chain
+                for the given event.
+        """
+        (
+            state_event_ids,
+            auth_event_ids,
+        ) = yield self.federation_client.get_room_state_ids(
+            destination, room_id, event_id=event_id
+        )
+
+        desired_events = set(state_event_ids + auth_event_ids)
+        event_map = yield self._get_events_from_store_or_dest(
+            destination, room_id, desired_events
+        )
+
+        failed_to_fetch = desired_events - event_map.keys()
+        if failed_to_fetch:
+            logger.warning(
+                "Failed to fetch missing state/auth events for %s: %s",
+                room_id,
+                failed_to_fetch,
+            )
+
+        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
+        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
+
+        auth_chain.sort(key=lambda e: e.depth)
+
+        return pdus, auth_chain
+
+    @defer.inlineCallbacks
+    def _get_events_from_store_or_dest(self, destination, room_id, event_ids):
+        """Fetch events from a remote destination, checking if we already have them.
+
+        Args:
+            destination (str)
+            room_id (str)
+            event_ids (Iterable[str])
+
+        Returns:
+            Deferred[dict[str, EventBase]]: A deferred resolving to a map
+            from event_id to event
+        """
+        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
+
+        missing_events = set(event_ids) - fetched_events.keys()
+
+        if not missing_events:
+            return fetched_events
+
+        logger.debug(
+            "Fetching unknown state/auth events %s for room %s",
+            missing_events,
+            event_ids,
+        )
+
+        room_version = yield self.store.get_room_version(room_id)
+
+        # XXX 20 requests at once? really?
+        for batch in batch_iter(missing_events, 20):
+            deferreds = [
+                run_in_background(
+                    self.federation_client.get_pdu,
+                    destinations=[destination],
+                    event_id=e_id,
+                    room_version=room_version,
+                )
+                for e_id in batch
+            ]
+
+            res = yield make_deferred_yieldable(
+                defer.DeferredList(deferreds, consumeErrors=True)
+            )
+            for success, result in res:
+                if success and result:
+                    fetched_events[result.event_id] = result
+
+        return fetched_events
+
     @defer.inlineCallbacks
     def _process_received_pdu(self, origin, event, state, auth_chain):
         """ Called when we have a new pdu. We need to do auth checks and put it
@@ -723,7 +812,7 @@ class FederationHandler(BaseHandler):
         state_events = {}
         events_to_state = {}
         for e_id in edges:
-            state, auth = yield self.federation_client.get_state_for_room(
+            state, auth = yield self._get_state_for_room(
                 destination=dest, room_id=room_id, event_id=e_id
             )
             auth_events.update({a.event_id: a for a in auth})
-- 
cgit 1.4.1


From 72acca6a32697a53f8f659e641d65dbf25ff6b4d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 11 Dec 2019 11:46:55 +0000
Subject: Back out change preventing setting null avatar URLs

---
 changelog.d/6497.bugfix           |  2 +-
 synapse/rest/client/v1/profile.py | 12 ++++++------
 2 files changed, 7 insertions(+), 7 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix
index 92ed08fc40..6a2644d8e6 100644
--- a/changelog.d/6497.bugfix
+++ b/changelog.d/6497.bugfix
@@ -1 +1 @@
-Fix error message when setting your profile's avatar URL mentioning displaynames, and prevent NoneType avatar_urls.
\ No newline at end of file
+Fix incorrect error message for invalid requests when setting user's avatar URL.
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index 4f47562c1b..e7fe50ed72 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -15,6 +15,7 @@
 
 """ This module contains REST servlets to do with profile: /profile/ """
 
+from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.types import UserID
@@ -103,12 +104,11 @@ class ProfileAvatarURLRestServlet(RestServlet):
 
         content = parse_json_object_from_request(request)
         try:
-            new_avatar_url = content.get("avatar_url")
-        except Exception:
-            return 400, "Unable to parse avatar_url"
-
-        if new_avatar_url is None:
-            return 400, "Missing required key: avatar_url"
+            new_avatar_url = content["avatar_url"]
+        except KeyError:
+            raise SynapseError(
+                400, "Missing key 'avatar_url'", errcode=Codes.MISSING_PARAM
+            )
 
         await self.profile_handler.set_avatar_url(
             user, requester, new_avatar_url, is_admin
-- 
cgit 1.4.1


From ea0f0ad4144e3ce0cf10f3ec461ecd8f654955a2 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 11 Dec 2019 13:07:25 +0000
Subject: Prevent message search in upgraded rooms we're not in (#6385)

---
 changelog.d/6385.bugfix                   |  1 +
 synapse/handlers/federation.py            |  4 ++--
 synapse/handlers/search.py                | 34 +++++++++++++++++++++++--------
 synapse/storage/data_stores/main/state.py | 18 +++++++++++-----
 4 files changed, 41 insertions(+), 16 deletions(-)
 create mode 100644 changelog.d/6385.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6385.bugfix b/changelog.d/6385.bugfix
new file mode 100644
index 0000000000..7a2bc02170
--- /dev/null
+++ b/changelog.d/6385.bugfix
@@ -0,0 +1 @@
+Prevent error on trying to search a upgraded room when the server is not in the predecessor room.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c0dcf9abf8..13865c470c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1299,7 +1299,7 @@ class FederationHandler(BaseHandler):
             # Check whether this room is the result of an upgrade of a room we already know
             # about. If so, migrate over user information
             predecessor = yield self.store.get_room_predecessor(room_id)
-            if not predecessor:
+            if not predecessor or not isinstance(predecessor.get("room_id"), str):
                 return
             old_room_id = predecessor["room_id"]
             logger.debug(
@@ -1542,7 +1542,7 @@ class FederationHandler(BaseHandler):
     @defer.inlineCallbacks
     def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content):
         origin, event, event_format_version = yield self._make_and_verify_event(
-            target_hosts, room_id, user_id, "leave", content=content,
+            target_hosts, room_id, user_id, "leave", content=content
         )
         # Mark as outlier as we don't have any state for this event; we're not
         # even in the room.
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 56ed262a1f..ef750d1497 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -21,7 +21,7 @@ from unpaddedbase64 import decode_base64, encode_base64
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import SynapseError
+from synapse.api.errors import NotFoundError, SynapseError
 from synapse.api.filtering import Filter
 from synapse.storage.state import StateFilter
 from synapse.visibility import filter_events_for_client
@@ -37,6 +37,7 @@ class SearchHandler(BaseHandler):
         self._event_serializer = hs.get_event_client_serializer()
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def get_old_rooms_from_upgraded_room(self, room_id):
@@ -53,23 +54,38 @@ class SearchHandler(BaseHandler):
             room_id (str): id of the room to search through.
 
         Returns:
-            Deferred[iterable[unicode]]: predecessor room ids
+            Deferred[iterable[str]]: predecessor room ids
         """
 
         historical_room_ids = []
 
-        while True:
-            predecessor = yield self.store.get_room_predecessor(room_id)
+        # The initial room must have been known for us to get this far
+        predecessor = yield self.store.get_room_predecessor(room_id)
 
-            # If no predecessor, assume we've hit a dead end
+        while True:
             if not predecessor:
+                # We have reached the end of the chain of predecessors
+                break
+
+            if not isinstance(predecessor.get("room_id"), str):
+                # This predecessor object is malformed. Exit here
+                break
+
+            predecessor_room_id = predecessor["room_id"]
+
+            # Don't add it to the list until we have checked that we are in the room
+            try:
+                next_predecessor_room = yield self.store.get_room_predecessor(
+                    predecessor_room_id
+                )
+            except NotFoundError:
+                # The predecessor is not a known room, so we are done here
                 break
 
-            # Add predecessor's room ID
-            historical_room_ids.append(predecessor["room_id"])
+            historical_room_ids.append(predecessor_room_id)
 
-            # Scan through the old room for further predecessors
-            room_id = predecessor["room_id"]
+            # And repeat
+            predecessor = next_predecessor_room
 
         return historical_room_ids
 
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 9ef7b48c74..dcc6b43cdf 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -278,7 +278,7 @@ class StateGroupWorkerStore(
 
     @defer.inlineCallbacks
     def get_room_predecessor(self, room_id):
-        """Get the predecessor room of an upgraded room if one exists.
+        """Get the predecessor of an upgraded room if it exists.
         Otherwise return None.
 
         Args:
@@ -291,14 +291,22 @@ class StateGroupWorkerStore(
                     * room_id (str): The room ID of the predecessor room
                     * event_id (str): The ID of the tombstone event in the predecessor room
 
+                None if a predecessor key is not found, or is not a dictionary.
+
         Raises:
-            NotFoundError if the room is unknown
+            NotFoundError if the given room is unknown
         """
         # Retrieve the room's create event
         create_event = yield self.get_create_event_for_room(room_id)
 
-        # Return predecessor if present
-        return create_event.content.get("predecessor", None)
+        # Retrieve the predecessor key of the create event
+        predecessor = create_event.content.get("predecessor", None)
+
+        # Ensure the key is a dictionary
+        if not isinstance(predecessor, dict):
+            return None
+
+        return predecessor
 
     @defer.inlineCallbacks
     def get_create_event_for_room(self, room_id):
@@ -318,7 +326,7 @@ class StateGroupWorkerStore(
 
         # If we can't find the create event, assume we've hit a dead end
         if not create_id:
-            raise NotFoundError("Unknown room %s" % (room_id))
+            raise NotFoundError("Unknown room %s" % (room_id,))
 
         # Retrieve the room's create event and return
         create_event = yield self.get_event(create_id)
-- 
cgit 1.4.1


From 6676ee9c4a74e15afdd752e05ca38d82da94c2c1 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 11 Dec 2019 13:16:01 +0000
Subject: Add dev script to generate full SQL schema files (#6394)

---
 changelog.d/6394.feature                           |   1 +
 scripts-dev/make_full_schema.sh                    | 184 +++++++++++++++++++++
 .../data_stores/main/schema/full_schemas/README.md |  13 ++
 .../main/schema/full_schemas/README.txt            |  19 ---
 4 files changed, 198 insertions(+), 19 deletions(-)
 create mode 100644 changelog.d/6394.feature
 create mode 100755 scripts-dev/make_full_schema.sh
 create mode 100644 synapse/storage/data_stores/main/schema/full_schemas/README.md
 delete mode 100644 synapse/storage/data_stores/main/schema/full_schemas/README.txt

(limited to 'changelog.d')

diff --git a/changelog.d/6394.feature b/changelog.d/6394.feature
new file mode 100644
index 0000000000..1a0e8845ad
--- /dev/null
+++ b/changelog.d/6394.feature
@@ -0,0 +1 @@
+Add a develop script to generate full SQL schemas.
\ No newline at end of file
diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh
new file mode 100755
index 0000000000..60e8970a35
--- /dev/null
+++ b/scripts-dev/make_full_schema.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+#
+# This script generates SQL files for creating a brand new Synapse DB with the latest
+# schema, on both SQLite3 and Postgres.
+#
+# It does so by having Synapse generate an up-to-date SQLite DB, then running
+# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
+
+POSTGRES_HOST="localhost"
+POSTGRES_DB_NAME="synapse_full_schema.$$"
+
+SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
+POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres"
+
+REQUIRED_DEPS=("matrix-synapse" "psycopg2")
+
+usage() {
+  echo
+  echo "Usage: $0 -p  -o  [-c] [-n] [-h]"
+  echo
+  echo "-p "
+  echo "  Username to connect to local postgres instance. The password will be requested"
+  echo "  during script execution."
+  echo "-c"
+  echo "  CI mode. Enables coverage tracking and prints every command that the script runs."
+  echo "-o "
+  echo "  Directory to output full schema files to."
+  echo "-h"
+  echo "  Display this help text."
+}
+
+while getopts "p:co:h" opt; do
+  case $opt in
+    p)
+      POSTGRES_USERNAME=$OPTARG
+      ;;
+    c)
+      # Print all commands that are being executed
+      set -x
+
+      # Modify required dependencies for coverage
+      REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess")
+
+      COVERAGE=1
+      ;;
+    o)
+      command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1)
+      OUTPUT_DIR="$(realpath "$OPTARG")"
+      ;;
+    h)
+      usage
+      exit
+      ;;
+    \?)
+      echo "ERROR: Invalid option: -$OPTARG" >&2
+      usage
+      exit
+      ;;
+  esac
+done
+
+# Check that required dependencies are installed
+unsatisfied_requirements=()
+for dep in "${REQUIRED_DEPS[@]}"; do
+  pip show "$dep" --quiet || unsatisfied_requirements+=("$dep")
+done
+if [ ${#unsatisfied_requirements} -ne 0 ]; then
+  echo "Please install the following python packages: ${unsatisfied_requirements[*]}"
+  exit 1
+fi
+
+if [ -z "$POSTGRES_USERNAME" ]; then
+  echo "No postgres username supplied"
+  usage
+  exit 1
+fi
+
+if [ -z "$OUTPUT_DIR" ]; then
+  echo "No output directory supplied"
+  usage
+  exit 1
+fi
+
+# Create the output directory if it doesn't exist
+mkdir -p "$OUTPUT_DIR"
+
+read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
+echo ""
+
+# Exit immediately if a command fails
+set -e
+
+# cd to root of the synapse directory
+cd "$(dirname "$0")/.."
+
+# Create temporary SQLite and Postgres homeserver db configs and key file
+TMPDIR=$(mktemp -d)
+KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
+SQLITE_CONFIG=$TMPDIR/sqlite.conf
+SQLITE_DB=$TMPDIR/homeserver.db
+POSTGRES_CONFIG=$TMPDIR/postgres.conf
+
+# Ensure these files are delete on script exit
+trap 'rm -rf $TMPDIR' EXIT
+
+cat > "$SQLITE_CONFIG" < "$POSTGRES_CONFIG" < "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
+
+echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..."
+pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE"
+
+echo "Cleaning up temporary Postgres database..."
+dropdb $POSTGRES_DB_NAME
+
+echo "Done! Files dumped to: $OUTPUT_DIR"
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/data_stores/main/schema/full_schemas/README.md
new file mode 100644
index 0000000000..bbd3f18604
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/full_schemas/README.md
@@ -0,0 +1,13 @@
+# Building full schema dumps
+
+These schemas need to be made from a database that has had all background updates run.
+
+To do so, use `scripts-dev/make_full_schema.sh`. This will produce
+`full.sql.postgres ` and `full.sql.sqlite` files.
+
+Ensure postgres is installed and your user has the ability to run bash commands
+such as `createdb`.
+
+```
+./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
+```
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.txt b/synapse/storage/data_stores/main/schema/full_schemas/README.txt
deleted file mode 100644
index d3f6401344..0000000000
--- a/synapse/storage/data_stores/main/schema/full_schemas/README.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Building full schema dumps
-==========================
-
-These schemas need to be made from a database that has had all background updates run.
-
-Postgres
---------
-
-$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres
-
-SQLite
-------
-
-$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite
-
-After
------
-
-Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas".
\ No newline at end of file
-- 
cgit 1.4.1


From fc316a4894912f49f5d0321e533aabca5624b0ba Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 11 Dec 2019 13:39:47 +0000
Subject: Prevent redacted events from appearing in message search (#6377)

---
 changelog.d/6377.bugfix                           |  1 +
 synapse/handlers/federation.py                    |  7 +-
 synapse/handlers/message.py                       |  5 +-
 synapse/state/__init__.py                         |  3 +-
 synapse/storage/data_stores/main/events_worker.py | 97 ++++++++++++++---------
 synapse/storage/data_stores/main/search.py        |  8 +-
 6 files changed, 78 insertions(+), 43 deletions(-)
 create mode 100644 changelog.d/6377.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6377.bugfix b/changelog.d/6377.bugfix
new file mode 100644
index 0000000000..ccda96962f
--- /dev/null
+++ b/changelog.d/6377.bugfix
@@ -0,0 +1 @@
+Prevent redacted events from being returned during message search.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 13865c470c..8f3c9d7702 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -63,6 +63,7 @@ from synapse.replication.http.federation import (
 )
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
+from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.types import UserID, get_domain_from_id
 from synapse.util import batch_iter, unwrapFirstError
 from synapse.util.async_helpers import Linearizer
@@ -423,7 +424,7 @@ class FederationHandler(BaseHandler):
                     evs = yield self.store.get_events(
                         list(state_map.values()),
                         get_prev_content=False,
-                        check_redacted=False,
+                        redact_behaviour=EventRedactBehaviour.AS_IS,
                     )
                     event_map.update(evs)
 
@@ -1000,7 +1001,9 @@ class FederationHandler(BaseHandler):
         forward_events = yield self.store.get_successor_events(list(extremities))
 
         extremities_events = yield self.store.get_events(
-            forward_events, check_redacted=False, get_prev_content=False
+            forward_events,
+            redact_behaviour=EventRedactBehaviour.AS_IS,
+            get_prev_content=False,
         )
 
         # We set `check_history_visibility_only` as we might otherwise get false
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 54fa216d83..bf9add7fe2 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -46,6 +46,7 @@ from synapse.events.validator import EventValidator
 from synapse.logging.context import run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
+from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.storage.state import StateFilter
 from synapse.types import RoomAlias, UserID, create_requester
 from synapse.util.async_helpers import Linearizer
@@ -875,7 +876,7 @@ class EventCreationHandler(object):
             if event.type == EventTypes.Redaction:
                 original_event = yield self.store.get_event(
                     event.redacts,
-                    check_redacted=False,
+                    redact_behaviour=EventRedactBehaviour.AS_IS,
                     get_prev_content=False,
                     allow_rejected=False,
                     allow_none=True,
@@ -952,7 +953,7 @@ class EventCreationHandler(object):
         if event.type == EventTypes.Redaction:
             original_event = yield self.store.get_event(
                 event.redacts,
-                check_redacted=False,
+                redact_behaviour=EventRedactBehaviour.AS_IS,
                 get_prev_content=False,
                 allow_rejected=False,
                 allow_none=True,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 139beef8ed..3e6d62eef1 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -32,6 +32,7 @@ from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
+from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -645,7 +646,7 @@ class StateResolutionStore(object):
 
         return self.store.get_events(
             event_ids,
-            check_redacted=False,
+            redact_behaviour=EventRedactBehaviour.AS_IS,
             get_prev_content=False,
             allow_rejected=allow_rejected,
         )
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 9ee117ce0f..2c9142814c 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -19,8 +19,10 @@ import itertools
 import logging
 import threading
 from collections import namedtuple
+from typing import List, Optional
 
 from canonicaljson import json
+from constantly import NamedConstant, Names
 
 from twisted.internet import defer
 
@@ -55,6 +57,16 @@ EVENT_QUEUE_TIMEOUT_S = 0.1  # Timeout when waiting for requests for events
 _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
 
 
+class EventRedactBehaviour(Names):
+    """
+    What to do when retrieving a redacted event from the database.
+    """
+
+    AS_IS = NamedConstant()
+    REDACT = NamedConstant()
+    BLOCK = NamedConstant()
+
+
 class EventsWorkerStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
         super(EventsWorkerStore, self).__init__(database, db_conn, hs)
@@ -125,25 +137,27 @@ class EventsWorkerStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_event(
         self,
-        event_id,
-        check_redacted=True,
-        get_prev_content=False,
-        allow_rejected=False,
-        allow_none=False,
-        check_room_id=None,
+        event_id: List[str],
+        redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
+        get_prev_content: bool = False,
+        allow_rejected: bool = False,
+        allow_none: bool = False,
+        check_room_id: Optional[str] = None,
     ):
         """Get an event from the database by event_id.
 
         Args:
-            event_id (str): The event_id of the event to fetch
-            check_redacted (bool): If True, check if event has been redacted
-                and redact it.
-            get_prev_content (bool): If True and event is a state event,
+            event_id: The event_id of the event to fetch
+            redact_behaviour: Determine what to do with a redacted event. Possible values:
+                * AS_IS - Return the full event body with no redacted content
+                * REDACT - Return the event but with a redacted body
+                * DISALLOW - Do not return redacted events
+            get_prev_content: If True and event is a state event,
                 include the previous states content in the unsigned field.
-            allow_rejected (bool): If True return rejected events.
-            allow_none (bool): If True, return None if no event found, if
+            allow_rejected: If True return rejected events.
+            allow_none: If True, return None if no event found, if
                 False throw a NotFoundError
-            check_room_id (str|None): if not None, check the room of the found event.
+            check_room_id: if not None, check the room of the found event.
                 If there is a mismatch, behave as per allow_none.
 
         Returns:
@@ -154,7 +168,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         events = yield self.get_events_as_list(
             [event_id],
-            check_redacted=check_redacted,
+            redact_behaviour=redact_behaviour,
             get_prev_content=get_prev_content,
             allow_rejected=allow_rejected,
         )
@@ -173,27 +187,30 @@ class EventsWorkerStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_events(
         self,
-        event_ids,
-        check_redacted=True,
-        get_prev_content=False,
-        allow_rejected=False,
+        event_ids: List[str],
+        redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
+        get_prev_content: bool = False,
+        allow_rejected: bool = False,
     ):
         """Get events from the database
 
         Args:
-            event_ids (list): The event_ids of the events to fetch
-            check_redacted (bool): If True, check if event has been redacted
-                and redact it.
-            get_prev_content (bool): If True and event is a state event,
+            event_ids: The event_ids of the events to fetch
+            redact_behaviour: Determine what to do with a redacted event. Possible
+                values:
+                * AS_IS - Return the full event body with no redacted content
+                * REDACT - Return the event but with a redacted body
+                * DISALLOW - Do not return redacted events
+            get_prev_content: If True and event is a state event,
                 include the previous states content in the unsigned field.
-            allow_rejected (bool): If True return rejected events.
+            allow_rejected: If True return rejected events.
 
         Returns:
             Deferred : Dict from event_id to event.
         """
         events = yield self.get_events_as_list(
             event_ids,
-            check_redacted=check_redacted,
+            redact_behaviour=redact_behaviour,
             get_prev_content=get_prev_content,
             allow_rejected=allow_rejected,
         )
@@ -203,21 +220,23 @@ class EventsWorkerStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_events_as_list(
         self,
-        event_ids,
-        check_redacted=True,
-        get_prev_content=False,
-        allow_rejected=False,
+        event_ids: List[str],
+        redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
+        get_prev_content: bool = False,
+        allow_rejected: bool = False,
     ):
         """Get events from the database and return in a list in the same order
         as given by `event_ids` arg.
 
         Args:
-            event_ids (list): The event_ids of the events to fetch
-            check_redacted (bool): If True, check if event has been redacted
-                and redact it.
-            get_prev_content (bool): If True and event is a state event,
+            event_ids: The event_ids of the events to fetch
+            redact_behaviour: Determine what to do with a redacted event. Possible values:
+                * AS_IS - Return the full event body with no redacted content
+                * REDACT - Return the event but with a redacted body
+                * DISALLOW - Do not return redacted events
+            get_prev_content: If True and event is a state event,
                 include the previous states content in the unsigned field.
-            allow_rejected (bool): If True return rejected events.
+            allow_rejected: If True, return rejected events.
 
         Returns:
             Deferred[list[EventBase]]: List of events fetched from the database. The
@@ -319,10 +338,14 @@ class EventsWorkerStore(SQLBaseStore):
                     # Update the cache to save doing the checks again.
                     entry.event.internal_metadata.recheck_redaction = False
 
-            if check_redacted and entry.redacted_event:
-                event = entry.redacted_event
-            else:
-                event = entry.event
+            event = entry.event
+
+            if entry.redacted_event:
+                if redact_behaviour == EventRedactBehaviour.BLOCK:
+                    # Skip this event
+                    continue
+                elif redact_behaviour == EventRedactBehaviour.REDACT:
+                    event = entry.redacted_event
 
             events.append(event)
 
diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index 4eec2fae5e..dfb46ee0f8 100644
--- a/synapse/storage/data_stores/main/search.py
+++ b/synapse/storage/data_stores/main/search.py
@@ -25,6 +25,7 @@ from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
+from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.storage.database import Database
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 
@@ -453,7 +454,12 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
 
-        events = yield self.get_events_as_list([r["event_id"] for r in results])
+        # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
+        # search results (which is a data leak)
+        events = yield self.get_events_as_list(
+            [r["event_id"] for r in results],
+            redact_behaviour=EventRedactBehaviour.BLOCK,
+        )
 
         event_map = {ev.event_id: ev for ev in events}
 
-- 
cgit 1.4.1


From d156912c4c4f65b821eab202654d740422008d82 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 11 Dec 2019 13:56:50 +0000
Subject: 1.7.0rc2

---
 CHANGES.md              | 11 +++++++++++
 changelog.d/6497.bugfix |  1 -
 changelog.d/6499.bugfix |  1 -
 changelog.d/6507.bugfix |  1 -
 changelog.d/6509.bugfix |  1 -
 synapse/__init__.py     |  2 +-
 6 files changed, 12 insertions(+), 5 deletions(-)
 delete mode 100644 changelog.d/6497.bugfix
 delete mode 100644 changelog.d/6499.bugfix
 delete mode 100644 changelog.d/6507.bugfix
 delete mode 100644 changelog.d/6509.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index c30ea4718d..c83a6afbcd 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,14 @@
+Synapse 1.7.0rc2 (2019-12-11)
+=============================
+
+Bugfixes
+--------
+
+- Fix incorrect error message for invalid requests when setting user's avatar URL. ([\#6497](https://github.com/matrix-org/synapse/issues/6497))
+- Fix support for SQLite 3.7. ([\#6499](https://github.com/matrix-org/synapse/issues/6499))
+- Fix regression where sending email push would not work when using a pusher worker. ([\#6507](https://github.com/matrix-org/synapse/issues/6507), [\#6509](https://github.com/matrix-org/synapse/issues/6509))
+
+
 Synapse 1.7.0rc1 (2019-12-09)
 =============================
 
diff --git a/changelog.d/6497.bugfix b/changelog.d/6497.bugfix
deleted file mode 100644
index 6a2644d8e6..0000000000
--- a/changelog.d/6497.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix incorrect error message for invalid requests when setting user's avatar URL.
diff --git a/changelog.d/6499.bugfix b/changelog.d/6499.bugfix
deleted file mode 100644
index 299feba0f8..0000000000
--- a/changelog.d/6499.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix support for SQLite 3.7.
diff --git a/changelog.d/6507.bugfix b/changelog.d/6507.bugfix
deleted file mode 100644
index d767a6237f..0000000000
--- a/changelog.d/6507.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix regression where sending email push would not work when using a pusher worker.
diff --git a/changelog.d/6509.bugfix b/changelog.d/6509.bugfix
deleted file mode 100644
index d767a6237f..0000000000
--- a/changelog.d/6509.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix regression where sending email push would not work when using a pusher worker.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index c67a51a8d5..fc2a6e4ee6 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.7.0rc1"
+__version__ = "1.7.0rc2"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 7c429f92d6935a3e9e0140fdd82801edc43b66b8 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 11 Dec 2019 14:32:25 +0000
Subject: Clean up some logging (#6515)

This just makes some of the logging easier to follow when things start going
wrong.
---
 changelog.d/6515.misc          |  1 +
 synapse/handlers/federation.py | 37 +++++++++++++++++++------------------
 2 files changed, 20 insertions(+), 18 deletions(-)
 create mode 100644 changelog.d/6515.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6515.misc b/changelog.d/6515.misc
new file mode 100644
index 0000000000..a9c303ed1c
--- /dev/null
+++ b/changelog.d/6515.misc
@@ -0,0 +1 @@
+Clean up some logging when handling incoming events over federation.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 8f3c9d7702..cf9c46d027 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -183,7 +183,7 @@ class FederationHandler(BaseHandler):
         room_id = pdu.room_id
         event_id = pdu.event_id
 
-        logger.info("[%s %s] handling received PDU: %s", room_id, event_id, pdu)
+        logger.info("handling received PDU: %s", pdu)
 
         # We reprocess pdus when we have seen them only as outliers
         existing = yield self.store.get_event(
@@ -279,9 +279,15 @@ class FederationHandler(BaseHandler):
                             len(missing_prevs),
                         )
 
-                        yield self._get_missing_events_for_pdu(
-                            origin, pdu, prevs, min_depth
-                        )
+                        try:
+                            yield self._get_missing_events_for_pdu(
+                                origin, pdu, prevs, min_depth
+                            )
+                        except Exception as e:
+                            raise Exception(
+                                "Error fetching missing prev_events for %s: %s"
+                                % (event_id, e)
+                            )
 
                         # Update the set of things we've seen after trying to
                         # fetch the missing stuff
@@ -293,14 +299,6 @@ class FederationHandler(BaseHandler):
                                 room_id,
                                 event_id,
                             )
-                elif missing_prevs:
-                    logger.info(
-                        "[%s %s] Not recursively fetching %d missing prev_events: %s",
-                        room_id,
-                        event_id,
-                        len(missing_prevs),
-                        shortstr(missing_prevs),
-                    )
 
             if prevs - seen:
                 # We've still not been able to get all of the prev_events for this event.
@@ -345,6 +343,12 @@ class FederationHandler(BaseHandler):
                         affected=pdu.event_id,
                     )
 
+                logger.info(
+                    "Event %s is missing prev_events: calculating state for a "
+                    "backwards extremity",
+                    event_id,
+                )
+
                 # Calculate the state after each of the previous events, and
                 # resolve them to find the correct state at the current event.
                 auth_chains = set()
@@ -365,10 +369,7 @@ class FederationHandler(BaseHandler):
                     # know about
                     for p in prevs - seen:
                         logger.info(
-                            "[%s %s] Requesting state at missing prev_event %s",
-                            room_id,
-                            event_id,
-                            p,
+                            "Requesting state at missing prev_event %s", event_id,
                         )
 
                         room_version = yield self.store.get_room_version(room_id)
@@ -612,8 +613,8 @@ class FederationHandler(BaseHandler):
         failed_to_fetch = desired_events - event_map.keys()
         if failed_to_fetch:
             logger.warning(
-                "Failed to fetch missing state/auth events for %s: %s",
-                room_id,
+                "Failed to fetch missing state/auth events for %s %s",
+                event_id,
                 failed_to_fetch,
             )
 
-- 
cgit 1.4.1


From 5324bc20a628116aecf4781744cadd611f51d7a6 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 10 Dec 2019 17:59:46 +0000
Subject: changelog

---
 changelog.d/6517.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6517.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6517.misc b/changelog.d/6517.misc
new file mode 100644
index 0000000000..c6ffed9952
--- /dev/null
+++ b/changelog.d/6517.misc
@@ -0,0 +1 @@
+Port some of FederationHandler to async/await.
\ No newline at end of file
-- 
cgit 1.4.1


From 20453565176cfd358212a23cf89dfd2deab1d690 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 11 Dec 2019 16:37:51 +0000
Subject: Add `include_event_in_state` to _get_state_for_room (#6521)

Make it return the state *after* the requested event, rather than the one
before it. This is a bit easier and requires fewer calls to
get_events_from_store_or_dest.
---
 changelog.d/6521.misc          |  1 +
 synapse/handlers/federation.py | 50 +++++++++++++++++++++++-------------------
 2 files changed, 29 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/6521.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6521.misc b/changelog.d/6521.misc
new file mode 100644
index 0000000000..d9a44389b9
--- /dev/null
+++ b/changelog.d/6521.misc
@@ -0,0 +1 @@
+Refactor some code in the event authentication path for clarity.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index bcd3b422aa..62985bab9f 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -378,22 +378,10 @@ class FederationHandler(BaseHandler):
                             (
                                 remote_state,
                                 got_auth_chain,
-                            ) = await self._get_state_for_room(origin, room_id, p)
-
-                            # we want the state *after* p; _get_state_for_room returns the
-                            # state *before* p.
-                            remote_event = await self.federation_client.get_pdu(
-                                [origin], p, room_version, outlier=True
+                            ) = await self._get_state_for_room(
+                                origin, room_id, p, include_event_in_state=True
                             )
 
-                            if remote_event is None:
-                                raise Exception(
-                                    "Unable to get missing prev_event %s" % (p,)
-                                )
-
-                            if remote_event.is_state():
-                                remote_state.append(remote_event)
-
                             # XXX hrm I'm not convinced that duplicate events will compare
                             # for equality, so I'm not sure this does what the author
                             # hoped.
@@ -579,20 +567,25 @@ class FederationHandler(BaseHandler):
                     else:
                         raise
 
-    @log_function
     async def _get_state_for_room(
-        self, destination: str, room_id: str, event_id: str
+        self,
+        destination: str,
+        room_id: str,
+        event_id: str,
+        include_event_in_state: bool = False,
     ) -> Tuple[List[EventBase], List[EventBase]]:
         """Requests all of the room state at a given event from a remote homeserver.
 
         Args:
-            destination:: The remote homeserver to query for the state.
+            destination: The remote homeserver to query for the state.
             room_id: The id of the room we're interested in.
             event_id: The id of the event we want the state at.
+            include_event_in_state: if true, the event itself will be included in the
+                returned state event list.
 
         Returns:
-            A list of events in the state, and a list of events in the auth chain
-            for the given event.
+            A list of events in the state, possibly including the event itself, and
+            a list of events in the auth chain for the given event.
         """
         (
             state_event_ids,
@@ -602,6 +595,10 @@ class FederationHandler(BaseHandler):
         )
 
         desired_events = set(state_event_ids + auth_event_ids)
+
+        if include_event_in_state:
+            desired_events.add(event_id)
+
         event_map = await self._get_events_from_store_or_dest(
             destination, room_id, desired_events
         )
@@ -614,12 +611,21 @@ class FederationHandler(BaseHandler):
                 failed_to_fetch,
             )
 
-        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
-        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
+        remote_state = [
+            event_map[e_id] for e_id in state_event_ids if e_id in event_map
+        ]
+
+        if include_event_in_state:
+            remote_event = event_map.get(event_id)
+            if not remote_event:
+                raise Exception("Unable to get missing prev_event %s" % (event_id,))
+            if remote_event.is_state():
+                remote_state.append(remote_event)
 
+        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
         auth_chain.sort(key=lambda e: e.depth)
 
-        return pdus, auth_chain
+        return remote_state, auth_chain
 
     async def _get_events_from_store_or_dest(
         self, destination: str, room_id: str, event_ids: Iterable[str]
-- 
cgit 1.4.1


From 25f12443298f4995613a475ac304e84d50317e18 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 12 Dec 2019 12:57:45 +0000
Subject: Check the room_id of events when fetching room state/auth (#6524)

When we request the state/auth_events to populate a backwards extremity (on
backfill or in the case of missing events in a transaction push), we should
check that the returned events are in the right room rather than blindly using
them in the room state or auth chain.

Given that _get_events_from_store_or_dest takes a room_id, it seems clear that
it should be sanity-checking the room_id of the requested events, so let's do
it there.
---
 changelog.d/6524.misc          |  2 ++
 synapse/handlers/federation.py | 74 +++++++++++++++++++++++++++++-------------
 2 files changed, 53 insertions(+), 23 deletions(-)
 create mode 100644 changelog.d/6524.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6524.misc b/changelog.d/6524.misc
new file mode 100644
index 0000000000..f885597426
--- /dev/null
+++ b/changelog.d/6524.misc
@@ -0,0 +1,2 @@
+Improve sanity-checking when receiving events over federation.
+
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 62985bab9f..2ea69c5468 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -637,6 +637,10 @@ class FederationHandler(BaseHandler):
             room_id
             event_ids
 
+        If we fail to fetch any of the events, a warning will be logged, and the event
+        will be omitted from the result. Likewise, any events which turn out not to
+        be in the given room.
+
         Returns:
             map from event_id to event
         """
@@ -644,35 +648,59 @@ class FederationHandler(BaseHandler):
 
         missing_events = set(event_ids) - fetched_events.keys()
 
-        if not missing_events:
-            return fetched_events
+        if missing_events:
+            logger.debug(
+                "Fetching unknown state/auth events %s for room %s",
+                missing_events,
+                room_id,
+            )
 
-        logger.debug(
-            "Fetching unknown state/auth events %s for room %s",
-            missing_events,
-            event_ids,
-        )
+            room_version = await self.store.get_room_version(room_id)
 
-        room_version = await self.store.get_room_version(room_id)
+            # XXX 20 requests at once? really?
+            for batch in batch_iter(missing_events, 20):
+                deferreds = [
+                    run_in_background(
+                        self.federation_client.get_pdu,
+                        destinations=[destination],
+                        event_id=e_id,
+                        room_version=room_version,
+                    )
+                    for e_id in batch
+                ]
 
-        # XXX 20 requests at once? really?
-        for batch in batch_iter(missing_events, 20):
-            deferreds = [
-                run_in_background(
-                    self.federation_client.get_pdu,
-                    destinations=[destination],
-                    event_id=e_id,
-                    room_version=room_version,
+                res = await make_deferred_yieldable(
+                    defer.DeferredList(deferreds, consumeErrors=True)
                 )
-                for e_id in batch
-            ]
 
-            res = await make_deferred_yieldable(
-                defer.DeferredList(deferreds, consumeErrors=True)
+                for success, result in res:
+                    if success and result:
+                        fetched_events[result.event_id] = result
+
+        # check for events which were in the wrong room.
+        #
+        # this can happen if a remote server claims that the state or
+        # auth_events at an event in room A are actually events in room B
+
+        bad_events = list(
+            (event_id, event.room_id)
+            for event_id, event in fetched_events.items()
+            if event.room_id != room_id
+        )
+
+        for bad_event_id, bad_room_id in bad_events:
+            # This is a bogus situation, but since we may only discover it a long time
+            # after it happened, we try our best to carry on, by just omitting the
+            # bad events from the returned auth/state set.
+            logger.warning(
+                "Remote server %s claims event %s in room %s is an auth/state "
+                "event in room %s",
+                destination,
+                bad_event_id,
+                bad_room_id,
+                room_id,
             )
-            for success, result in res:
-                if success and result:
-                    fetched_events[result.event_id] = result
+            del fetched_events[bad_event_id]
 
         return fetched_events
 
-- 
cgit 1.4.1


From c965253e4b38b9d941793469ef64b014e5a25947 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 12 Dec 2019 14:54:03 +0000
Subject: Newsfile

---
 changelog.d/6534.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6534.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6534.misc b/changelog.d/6534.misc
new file mode 100644
index 0000000000..7df6bb442a
--- /dev/null
+++ b/changelog.d/6534.misc
@@ -0,0 +1 @@
+Test more folders against mypy.
-- 
cgit 1.4.1


From 5056d6d90a58edc2a70d19233082f24fded4c73f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 12 Dec 2019 15:22:46 +0000
Subject: Newsfile

---
 changelog.d/6537.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6537.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6537.misc b/changelog.d/6537.misc
new file mode 100644
index 0000000000..3543153584
--- /dev/null
+++ b/changelog.d/6537.misc
@@ -0,0 +1 @@
+Update `mypy` to new version.
-- 
cgit 1.4.1


From 5bfd8855d6b9ed8bcf28a107e6654c7cd7d3da2b Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 12 Dec 2019 15:53:49 +0000
Subject: Fix redacted events being returned in search results ordered by
 "recent" (#6522)

---
 changelog.d/6522.bugfix                    |  1 +
 synapse/storage/data_stores/main/search.py | 11 ++++++++---
 2 files changed, 9 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6522.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6522.bugfix b/changelog.d/6522.bugfix
new file mode 100644
index 0000000000..ccda96962f
--- /dev/null
+++ b/changelog.d/6522.bugfix
@@ -0,0 +1 @@
+Prevent redacted events from being returned during message search.
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index dfb46ee0f8..47ebb8a214 100644
--- a/synapse/storage/data_stores/main/search.py
+++ b/synapse/storage/data_stores/main/search.py
@@ -385,7 +385,7 @@ class SearchStore(SearchBackgroundUpdateStore):
         """
         clauses = []
 
-        search_query = search_query = _parse_query(self.database_engine, search_term)
+        search_query = _parse_query(self.database_engine, search_term)
 
         args = []
 
@@ -501,7 +501,7 @@ class SearchStore(SearchBackgroundUpdateStore):
         """
         clauses = []
 
-        search_query = search_query = _parse_query(self.database_engine, search_term)
+        search_query = _parse_query(self.database_engine, search_term)
 
         args = []
 
@@ -606,7 +606,12 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
 
-        events = yield self.get_events_as_list([r["event_id"] for r in results])
+        # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
+        # search results (which is a data leak)
+        events = yield self.get_events_as_list(
+            [r["event_id"] for r in results],
+            redact_behaviour=EventRedactBehaviour.BLOCK,
+        )
 
         event_map = {ev.event_id: ev for ev in events}
 
-- 
cgit 1.4.1


From cb2db179945f567410b565f29725dff28449f013 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Thu, 12 Dec 2019 12:03:28 -0500
Subject: look up cross-signing keys from the DB in bulk (#6486)

---
 changelog.d/6486.bugfix                            |   1 +
 synapse/handlers/e2e_keys.py                       |  35 +++-
 .../storage/data_stores/main/end_to_end_keys.py    | 217 ++++++++++++++++++++-
 synapse/util/caches/descriptors.py                 |   2 +-
 tests/handlers/test_e2e_keys.py                    |   8 -
 5 files changed, 242 insertions(+), 21 deletions(-)
 create mode 100644 changelog.d/6486.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6486.bugfix b/changelog.d/6486.bugfix
new file mode 100644
index 0000000000..b98c5a9ae5
--- /dev/null
+++ b/changelog.d/6486.bugfix
@@ -0,0 +1 @@
+Improve performance of looking up cross-signing keys.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 57a10daefd..2d889364d4 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -264,6 +264,7 @@ class E2eKeysHandler(object):
 
         return ret
 
+    @defer.inlineCallbacks
     def get_cross_signing_keys_from_cache(self, query, from_user_id):
         """Get cross-signing keys for users from the database
 
@@ -283,14 +284,32 @@ class E2eKeysHandler(object):
         self_signing_keys = {}
         user_signing_keys = {}
 
-        # Currently a stub, implementation coming in https://github.com/matrix-org/synapse/pull/6486
-        return defer.succeed(
-            {
-                "master_keys": master_keys,
-                "self_signing_keys": self_signing_keys,
-                "user_signing_keys": user_signing_keys,
-            }
-        )
+        user_ids = list(query)
+
+        keys = yield self.store.get_e2e_cross_signing_keys_bulk(user_ids, from_user_id)
+
+        for user_id, user_info in keys.items():
+            if user_info is None:
+                continue
+            if "master" in user_info:
+                master_keys[user_id] = user_info["master"]
+            if "self_signing" in user_info:
+                self_signing_keys[user_id] = user_info["self_signing"]
+
+        if (
+            from_user_id in keys
+            and keys[from_user_id] is not None
+            and "user_signing" in keys[from_user_id]
+        ):
+            # users can see other users' master and self-signing keys, but can
+            # only see their own user-signing keys
+            user_signing_keys[from_user_id] = keys[from_user_id]["user_signing"]
+
+        return {
+            "master_keys": master_keys,
+            "self_signing_keys": self_signing_keys,
+            "user_signing_keys": user_signing_keys,
+        }
 
     @trace
     @defer.inlineCallbacks
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index 38cd0ca9b8..e551606f9d 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -14,15 +14,18 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Dict, List
+
 from six import iteritems
 
 from canonicaljson import encode_canonical_json, json
 
+from twisted.enterprise.adbapi import Connection
 from twisted.internet import defer
 
 from synapse.logging.opentracing import log_kv, set_tag, trace
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.descriptors import cached, cachedList
 
 
 class EndToEndKeyWorkerStore(SQLBaseStore):
@@ -271,7 +274,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         Args:
             txn (twisted.enterprise.adbapi.Connection): db connection
             user_id (str): the user whose key is being requested
-            key_type (str): the type of key that is being set: either 'master'
+            key_type (str): the type of key that is being requested: either 'master'
                 for a master key, 'self_signing' for a self-signing key, or
                 'user_signing' for a user-signing key
             from_user_id (str): if specified, signatures made by this user on
@@ -316,8 +319,10 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         """Returns a user's cross-signing key.
 
         Args:
-            user_id (str): the user whose self-signing key is being requested
-            key_type (str): the type of cross-signing key to get
+            user_id (str): the user whose key is being requested
+            key_type (str): the type of key that is being requested: either 'master'
+                for a master key, 'self_signing' for a self-signing key, or
+                'user_signing' for a user-signing key
             from_user_id (str): if specified, signatures made by this user on
                 the self-signing key will be included in the result
 
@@ -332,6 +337,206 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             from_user_id,
         )
 
+    @cached(num_args=1)
+    def _get_bare_e2e_cross_signing_keys(self, user_id):
+        """Dummy function.  Only used to make a cache for
+        _get_bare_e2e_cross_signing_keys_bulk.
+        """
+        raise NotImplementedError()
+
+    @cachedList(
+        cached_method_name="_get_bare_e2e_cross_signing_keys",
+        list_name="user_ids",
+        num_args=1,
+    )
+    def _get_bare_e2e_cross_signing_keys_bulk(
+        self, user_ids: List[str]
+    ) -> Dict[str, Dict[str, dict]]:
+        """Returns the cross-signing keys for a set of users.  The output of this
+        function should be passed to _get_e2e_cross_signing_signatures_txn if
+        the signatures for the calling user need to be fetched.
+
+        Args:
+            user_ids (list[str]): the users whose keys are being requested
+
+        Returns:
+            dict[str, dict[str, dict]]: mapping from user ID to key type to key
+                data.  If a user's cross-signing keys were not found, either
+                their user ID will not be in the dict, or their user ID will map
+                to None.
+
+        """
+        return self.db.runInteraction(
+            "get_bare_e2e_cross_signing_keys_bulk",
+            self._get_bare_e2e_cross_signing_keys_bulk_txn,
+            user_ids,
+        )
+
+    def _get_bare_e2e_cross_signing_keys_bulk_txn(
+        self, txn: Connection, user_ids: List[str],
+    ) -> Dict[str, Dict[str, dict]]:
+        """Returns the cross-signing keys for a set of users.  The output of this
+        function should be passed to _get_e2e_cross_signing_signatures_txn if
+        the signatures for the calling user need to be fetched.
+
+        Args:
+            txn (twisted.enterprise.adbapi.Connection): db connection
+            user_ids (list[str]): the users whose keys are being requested
+
+        Returns:
+            dict[str, dict[str, dict]]: mapping from user ID to key type to key
+                data.  If a user's cross-signing keys were not found, their user
+                ID will not be in the dict.
+
+        """
+        result = {}
+
+        batch_size = 100
+        chunks = [
+            user_ids[i : i + batch_size] for i in range(0, len(user_ids), batch_size)
+        ]
+        for user_chunk in chunks:
+            sql = """
+                SELECT k.user_id, k.keytype, k.keydata, k.stream_id
+                  FROM e2e_cross_signing_keys k
+                  INNER JOIN (SELECT user_id, keytype, MAX(stream_id) AS stream_id
+                                FROM e2e_cross_signing_keys
+                               GROUP BY user_id, keytype) s
+                 USING (user_id, stream_id, keytype)
+                 WHERE k.user_id IN (%s)
+            """ % (
+                ",".join("?" for u in user_chunk),
+            )
+            query_params = []
+            query_params.extend(user_chunk)
+
+            txn.execute(sql, query_params)
+            rows = self.db.cursor_to_dict(txn)
+
+            for row in rows:
+                user_id = row["user_id"]
+                key_type = row["keytype"]
+                key = json.loads(row["keydata"])
+                user_info = result.setdefault(user_id, {})
+                user_info[key_type] = key
+
+        return result
+
+    def _get_e2e_cross_signing_signatures_txn(
+        self, txn: Connection, keys: Dict[str, Dict[str, dict]], from_user_id: str,
+    ) -> Dict[str, Dict[str, dict]]:
+        """Returns the cross-signing signatures made by a user on a set of keys.
+
+        Args:
+            txn (twisted.enterprise.adbapi.Connection): db connection
+            keys (dict[str, dict[str, dict]]): a map of user ID to key type to
+                key data.  This dict will be modified to add signatures.
+            from_user_id (str): fetch the signatures made by this user
+
+        Returns:
+            dict[str, dict[str, dict]]: mapping from user ID to key type to key
+                data.  The return value will be the same as the keys argument,
+                with the modifications included.
+        """
+
+        # find out what cross-signing keys (a.k.a. devices) we need to get
+        # signatures for.  This is a map of (user_id, device_id) to key type
+        # (device_id is the key's public part).
+        devices = {}
+
+        for user_id, user_info in keys.items():
+            if user_info is None:
+                continue
+            for key_type, key in user_info.items():
+                device_id = None
+                for k in key["keys"].values():
+                    device_id = k
+                devices[(user_id, device_id)] = key_type
+
+        device_list = list(devices)
+
+        # split into batches
+        batch_size = 100
+        chunks = [
+            device_list[i : i + batch_size]
+            for i in range(0, len(device_list), batch_size)
+        ]
+        for user_chunk in chunks:
+            sql = """
+                SELECT target_user_id, target_device_id, key_id, signature
+                  FROM e2e_cross_signing_signatures
+                 WHERE user_id = ?
+                   AND (%s)
+            """ % (
+                " OR ".join(
+                    "(target_user_id = ? AND target_device_id = ?)" for d in devices
+                )
+            )
+            query_params = [from_user_id]
+            for item in devices:
+                # item is a (user_id, device_id) tuple
+                query_params.extend(item)
+
+            txn.execute(sql, query_params)
+            rows = self.db.cursor_to_dict(txn)
+
+            # and add the signatures to the appropriate keys
+            for row in rows:
+                key_id = row["key_id"]
+                target_user_id = row["target_user_id"]
+                target_device_id = row["target_device_id"]
+                key_type = devices[(target_user_id, target_device_id)]
+                # We need to copy everything, because the result may have come
+                # from the cache.  dict.copy only does a shallow copy, so we
+                # need to recursively copy the dicts that will be modified.
+                user_info = keys[target_user_id] = keys[target_user_id].copy()
+                target_user_key = user_info[key_type] = user_info[key_type].copy()
+                if "signatures" in target_user_key:
+                    signatures = target_user_key["signatures"] = target_user_key[
+                        "signatures"
+                    ].copy()
+                    if from_user_id in signatures:
+                        user_sigs = signatures[from_user_id] = signatures[from_user_id]
+                        user_sigs[key_id] = row["signature"]
+                    else:
+                        signatures[from_user_id] = {key_id: row["signature"]}
+                else:
+                    target_user_key["signatures"] = {
+                        from_user_id: {key_id: row["signature"]}
+                    }
+
+        return keys
+
+    @defer.inlineCallbacks
+    def get_e2e_cross_signing_keys_bulk(
+        self, user_ids: List[str], from_user_id: str = None
+    ) -> defer.Deferred:
+        """Returns the cross-signing keys for a set of users.
+
+        Args:
+            user_ids (list[str]): the users whose keys are being requested
+            from_user_id (str): if specified, signatures made by this user on
+                the self-signing keys will be included in the result
+
+        Returns:
+            Deferred[dict[str, dict[str, dict]]]: map of user ID to key type to
+                key data.  If a user's cross-signing keys were not found, either
+                their user ID will not be in the dict, or their user ID will map
+                to None.
+        """
+
+        result = yield self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
+
+        if from_user_id:
+            result = yield self.db.runInteraction(
+                "get_e2e_cross_signing_signatures",
+                self._get_e2e_cross_signing_signatures_txn,
+                result,
+                from_user_id,
+            )
+
+        return result
+
     def get_all_user_signature_changes_for_remotes(self, from_key, to_key):
         """Return a list of changes from the user signature stream to notify remotes.
         Note that the user signature stream represents when a user signs their
@@ -520,6 +725,10 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 },
             )
 
+        self._invalidate_cache_and_stream(
+            txn, self._get_bare_e2e_cross_signing_keys, (user_id,)
+        )
+
     def set_e2e_cross_signing_key(self, user_id, key_type, key):
         """Set a user's cross-signing key.
 
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 84f5ae22c3..2e8f6543e5 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -271,7 +271,7 @@ class _CacheDescriptorBase(object):
         else:
             self.function_to_call = orig
 
-        arg_spec = inspect.getargspec(orig)
+        arg_spec = inspect.getfullargspec(orig)
         all_args = arg_spec.args
 
         if "cache_context" in all_args:
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index fdfa2cbbc4..854eb6c024 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -183,10 +183,6 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
         )
         self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]})
 
-    test_replace_master_key.skip = (
-        "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486"
-    )
-
     @defer.inlineCallbacks
     def test_reupload_signatures(self):
         """re-uploading a signature should not fail"""
@@ -507,7 +503,3 @@ class E2eKeysHandlerTestCase(unittest.TestCase):
             ],
             other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey],
         )
-
-    test_upload_signatures.skip = (
-        "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486"
-    )
-- 
cgit 1.4.1


From 4ce05ec1716f757eb15c02e615ea9c84cb289b77 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 13 Dec 2019 10:15:20 +0000
Subject: Adjust the sytest blacklist for worker mode (#6538)

Remove tests that got blacklisted while torturing was enabled, and add one that
fails.
---
 .buildkite/worker-blacklist | 33 ++++-----------------------------
 changelog.d/6538.misc       |  1 +
 2 files changed, 5 insertions(+), 29 deletions(-)
 create mode 100644 changelog.d/6538.misc

(limited to 'changelog.d')

diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist
index 7950d19db3..158ab79154 100644
--- a/.buildkite/worker-blacklist
+++ b/.buildkite/worker-blacklist
@@ -34,33 +34,8 @@ Device list doesn't change if remote server is down
 Remote servers cannot set power levels in rooms without existing powerlevels
 Remote servers should reject attempts by non-creators to set the power levels
 
-# new failures as of https://github.com/matrix-org/sytest/pull/753
-GET /rooms/:room_id/messages returns a message
-GET /rooms/:room_id/messages lazy loads members correctly
-Read receipts are sent as events
-Only original members of the room can see messages from erased users
-Device deletion propagates over federation
-If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes
-Changing user-signing key notifies local users
-Newly updated tags appear in an incremental v2 /sync
+# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
 Server correctly handles incoming m.device_list_update
-Local device key changes get to remote servers with correct prev_id
-AS-ghosted users can use rooms via AS
-Ghost user must register before joining room
-Test that a message is pushed
-Invites are pushed
-Rooms with aliases are correctly named in pushed
-Rooms with names are correctly named in pushed
-Rooms with canonical alias are correctly named in pushed
-Rooms with many users are correctly pushed
-Don't get pushed for rooms you've muted
-Rejected events are not pushed
-Test that rejected pushers are removed.
-Events come down the correct room
-
-# https://buildkite.com/matrix-dot-org/sytest/builds/326#cca62404-a88a-4fcb-ad41-175fd3377603
-Presence changes to UNAVAILABLE are reported to remote room members
-If remote user leaves room, changes device and rejoins we see update in sync
-uploading self-signing key notifies over federation
-Inbound federation can receive redacted events
-Outbound federation can request missing events
+
+# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
+Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
diff --git a/changelog.d/6538.misc b/changelog.d/6538.misc
new file mode 100644
index 0000000000..cb4fd56948
--- /dev/null
+++ b/changelog.d/6538.misc
@@ -0,0 +1 @@
+Adjust the sytest blacklist for worker mode.
-- 
cgit 1.4.1


From 971a0702b5fce743c8bb61424a5f1002d3eb63ff Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 13 Dec 2019 11:44:41 +0000
Subject: Sanity-check room ids in event auth (#6530)

When we do an event auth operation, check that all of the events involved are
in the right room.
---
 changelog.d/6530.misc |  2 ++
 synapse/event_auth.py | 12 ++++++++++++
 2 files changed, 14 insertions(+)
 create mode 100644 changelog.d/6530.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6530.misc b/changelog.d/6530.misc
new file mode 100644
index 0000000000..f885597426
--- /dev/null
+++ b/changelog.d/6530.misc
@@ -0,0 +1,2 @@
+Improve sanity-checking when receiving events over federation.
+
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index c940b84470..80ec911b3d 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -50,6 +50,18 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
     if not hasattr(event, "room_id"):
         raise AuthError(500, "Event has no room_id: %s" % event)
 
+    room_id = event.room_id
+
+    # I'm not really expecting to get auth events in the wrong room, but let's
+    # sanity-check it
+    for auth_event in auth_events.values():
+        if auth_event.room_id != room_id:
+            raise Exception(
+                "During auth for event %s in room %s, found event %s in the state "
+                "which is in room %s"
+                % (event.event_id, room_id, auth_event.event_id, auth_event.room_id)
+            )
+
     if do_sig_check:
         sender_domain = get_domain_from_id(event.sender)
 
-- 
cgit 1.4.1


From 1da15f05f5c9c1e47c9fd1323caff869c2e55aa3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 13 Dec 2019 12:55:32 +0000
Subject: sanity-checking for events used in state res (#6531)

When we perform state resolution, check that all of the events involved are in
the right room.
---
 changelog.d/6531.misc          |   1 +
 synapse/handlers/federation.py |   1 +
 synapse/state/__init__.py      |  32 ++++++++-----
 synapse/state/v1.py            |  34 +++++++++++---
 synapse/state/v2.py            | 100 ++++++++++++++++++++++++++++++-----------
 tests/state/test_v2.py         |   3 ++
 6 files changed, 128 insertions(+), 43 deletions(-)
 create mode 100644 changelog.d/6531.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6531.misc b/changelog.d/6531.misc
new file mode 100644
index 0000000000..598efb79fc
--- /dev/null
+++ b/changelog.d/6531.misc
@@ -0,0 +1 @@
+Improve sanity-checking when receiving events over federation.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 2ea69c5468..1d39a9a4f5 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -396,6 +396,7 @@ class FederationHandler(BaseHandler):
                                 event_map[x.event_id] = x
 
                     state_map = await resolve_events_with_store(
+                        room_id,
                         room_version,
                         state_maps,
                         event_map,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 3e6d62eef1..5accc071ab 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,7 +16,7 @@
 
 import logging
 from collections import namedtuple
-from typing import Iterable, Optional
+from typing import Dict, Iterable, List, Optional, Tuple
 
 from six import iteritems, itervalues
 
@@ -417,6 +417,7 @@ class StateHandler(object):
 
         with Measure(self.clock, "state._resolve_events"):
             new_state = yield resolve_events_with_store(
+                event.room_id,
                 room_version,
                 state_set_ids,
                 event_map=state_map,
@@ -462,7 +463,7 @@ class StateResolutionHandler(object):
         not be called for a single state group
 
         Args:
-            room_id (str): room we are resolving for (used for logging)
+            room_id (str): room we are resolving for (used for logging and sanity checks)
             room_version (str): version of the room
             state_groups_ids (dict[int, dict[(str, str), str]]):
                  map from state group id to the state in that state group
@@ -518,6 +519,7 @@ class StateResolutionHandler(object):
                 logger.info("Resolving conflicted state for %r", room_id)
                 with Measure(self.clock, "state._resolve_events"):
                     new_state = yield resolve_events_with_store(
+                        room_id,
                         room_version,
                         list(itervalues(state_groups_ids)),
                         event_map=event_map,
@@ -589,36 +591,44 @@ def _make_state_cache_entry(new_state, state_groups_ids):
     )
 
 
-def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
+def resolve_events_with_store(
+    room_id: str,
+    room_version: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_res_store: "StateResolutionStore",
+):
     """
     Args:
-        room_version(str): Version of the room
+        room_id: the room we are working in
+
+        room_version: Version of the room
 
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
             events will be requested via state_map_factory.
 
-            If None, all events will be fetched via state_map_factory.
+            If None, all events will be fetched via state_res_store.
 
-        state_res_store (StateResolutionStore)
+        state_res_store: a place to fetch events from
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
     v = KNOWN_ROOM_VERSIONS[room_version]
     if v.state_res == StateResolutionVersions.V1:
         return v1.resolve_events_with_store(
-            state_sets, event_map, state_res_store.get_events
+            room_id, state_sets, event_map, state_res_store.get_events
         )
     else:
         return v2.resolve_events_with_store(
-            room_version, state_sets, event_map, state_res_store
+            room_id, room_version, state_sets, event_map, state_res_store
         )
 
 
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index a2f92d9ff9..b2f9865f39 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -15,6 +15,7 @@
 
 import hashlib
 import logging
+from typing import Callable, Dict, List, Optional, Tuple
 
 from six import iteritems, iterkeys, itervalues
 
@@ -24,6 +25,7 @@ from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase
 
 logger = logging.getLogger(__name__)
 
@@ -32,13 +34,20 @@ POWER_KEY = (EventTypes.PowerLevels, "")
 
 
 @defer.inlineCallbacks
-def resolve_events_with_store(state_sets, event_map, state_map_factory):
+def resolve_events_with_store(
+    room_id: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_map_factory: Callable,
+):
     """
     Args:
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        room_id: the room we are working in
+
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
@@ -46,11 +55,11 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
 
             If None, all events will be fetched via state_map_factory.
 
-        state_map_factory(func): will be called
+        state_map_factory: will be called
             with a list of event_ids that are needed, and should return with
             a Deferred of dict of event_id to event.
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
@@ -76,6 +85,14 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
     if event_map is not None:
         state_map.update(event_map)
 
+    # everything in the state map should be in the right room
+    for event in state_map.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     # get the ids of the auth events which allow us to authenticate the
     # conflicted state, picking only from the unconflicting state.
     #
@@ -95,6 +112,13 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
     )
 
     state_map_new = yield state_map_factory(new_needed_events)
+    for event in state_map_new.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     state_map.update(state_map_new)
 
     return _resolve_with_state(
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index b327c86f40..cb77ed5b78 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -16,29 +16,40 @@
 import heapq
 import itertools
 import logging
+from typing import Dict, List, Optional, Tuple
 
 from six import iteritems, itervalues
 
 from twisted.internet import defer
 
+import synapse.state
 from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
+from synapse.events import EventBase
 
 logger = logging.getLogger(__name__)
 
 
 @defer.inlineCallbacks
-def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
+def resolve_events_with_store(
+    room_id: str,
+    room_version: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_res_store: "synapse.state.StateResolutionStore",
+):
     """Resolves the state using the v2 state resolution algorithm
 
     Args:
-        room_version (str): The room version
+        room_id: the room we are working in
+
+        room_version: The room version
 
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
@@ -46,9 +57,9 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
             If None, all events will be fetched via state_res_store.
 
-        state_res_store (StateResolutionStore)
+        state_res_store:
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
@@ -84,6 +95,14 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
     )
     event_map.update(events)
 
+    # everything in the event map should be in the right room
+    for event in event_map.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map)
 
     logger.debug("%d full_conflicted_set entries", len(full_conflicted_set))
@@ -94,13 +113,14 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
     )
 
     sorted_power_events = yield _reverse_topological_power_sort(
-        power_events, event_map, state_res_store, full_conflicted_set
+        room_id, power_events, event_map, state_res_store, full_conflicted_set
     )
 
     logger.debug("sorted %d power events", len(sorted_power_events))
 
     # Now sequentially auth each one
     resolved_state = yield _iterative_auth_checks(
+        room_id,
         room_version,
         sorted_power_events,
         unconflicted_state,
@@ -121,13 +141,18 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
     pl = resolved_state.get((EventTypes.PowerLevels, ""), None)
     leftover_events = yield _mainline_sort(
-        leftover_events, pl, event_map, state_res_store
+        room_id, leftover_events, pl, event_map, state_res_store
     )
 
     logger.debug("resolving remaining events")
 
     resolved_state = yield _iterative_auth_checks(
-        room_version, leftover_events, resolved_state, event_map, state_res_store
+        room_id,
+        room_version,
+        leftover_events,
+        resolved_state,
+        event_map,
+        state_res_store,
     )
 
     logger.debug("resolved")
@@ -141,11 +166,12 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
 
 @defer.inlineCallbacks
-def _get_power_level_for_sender(event_id, event_map, state_res_store):
+def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
     """Return the power level of the sender of the given event according to
     their auth events.
 
     Args:
+        room_id (str)
         event_id (str)
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -153,11 +179,11 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
     Returns:
         Deferred[int]
     """
-    event = yield _get_event(event_id, event_map, state_res_store)
+    event = yield _get_event(room_id, event_id, event_map, state_res_store)
 
     pl = None
     for aid in event.auth_event_ids():
-        aev = yield _get_event(aid, event_map, state_res_store)
+        aev = yield _get_event(room_id, aid, event_map, state_res_store)
         if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
             pl = aev
             break
@@ -165,7 +191,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
     if pl is None:
         # Couldn't find power level. Check if they're the creator of the room
         for aid in event.auth_event_ids():
-            aev = yield _get_event(aid, event_map, state_res_store)
+            aev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.Create, ""):
                 if aev.content.get("creator") == event.sender:
                     return 100
@@ -279,7 +305,7 @@ def _is_power_event(event):
 
 @defer.inlineCallbacks
 def _add_event_and_auth_chain_to_graph(
-    graph, event_id, event_map, state_res_store, auth_diff
+    graph, room_id, event_id, event_map, state_res_store, auth_diff
 ):
     """Helper function for _reverse_topological_power_sort that add the event
     and its auth chain (that is in the auth diff) to the graph
@@ -287,6 +313,7 @@ def _add_event_and_auth_chain_to_graph(
     Args:
         graph (dict[str, set[str]]): A map from event ID to the events auth
             event IDs
+        room_id (str): the room we are working in
         event_id (str): Event to add to the graph
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -298,7 +325,7 @@ def _add_event_and_auth_chain_to_graph(
         eid = state.pop()
         graph.setdefault(eid, set())
 
-        event = yield _get_event(eid, event_map, state_res_store)
+        event = yield _get_event(room_id, eid, event_map, state_res_store)
         for aid in event.auth_event_ids():
             if aid in auth_diff:
                 if aid not in graph:
@@ -308,11 +335,14 @@ def _add_event_and_auth_chain_to_graph(
 
 
 @defer.inlineCallbacks
-def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_diff):
+def _reverse_topological_power_sort(
+    room_id, event_ids, event_map, state_res_store, auth_diff
+):
     """Returns a list of the event_ids sorted by reverse topological ordering,
     and then by power level and origin_server_ts
 
     Args:
+        room_id (str): the room we are working in
         event_ids (list[str]): The events to sort
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -325,12 +355,14 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
     graph = {}
     for event_id in event_ids:
         yield _add_event_and_auth_chain_to_graph(
-            graph, event_id, event_map, state_res_store, auth_diff
+            graph, room_id, event_id, event_map, state_res_store, auth_diff
         )
 
     event_to_pl = {}
     for event_id in graph:
-        pl = yield _get_power_level_for_sender(event_id, event_map, state_res_store)
+        pl = yield _get_power_level_for_sender(
+            room_id, event_id, event_map, state_res_store
+        )
         event_to_pl[event_id] = pl
 
     def _get_power_order(event_id):
@@ -348,12 +380,13 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
 
 @defer.inlineCallbacks
 def _iterative_auth_checks(
-    room_version, event_ids, base_state, event_map, state_res_store
+    room_id, room_version, event_ids, base_state, event_map, state_res_store
 ):
     """Sequentially apply auth checks to each event in given list, updating the
     state as it goes along.
 
     Args:
+        room_id (str)
         room_version (str)
         event_ids (list[str]): Ordered list of events to apply auth checks to
         base_state (dict[tuple[str, str], str]): The set of state to start with
@@ -370,7 +403,7 @@ def _iterative_auth_checks(
 
         auth_events = {}
         for aid in event.auth_event_ids():
-            ev = yield _get_event(aid, event_map, state_res_store)
+            ev = yield _get_event(room_id, aid, event_map, state_res_store)
 
             if ev.rejected_reason is None:
                 auth_events[(ev.type, ev.state_key)] = ev
@@ -378,7 +411,7 @@ def _iterative_auth_checks(
         for key in event_auth.auth_types_for_event(event):
             if key in resolved_state:
                 ev_id = resolved_state[key]
-                ev = yield _get_event(ev_id, event_map, state_res_store)
+                ev = yield _get_event(room_id, ev_id, event_map, state_res_store)
 
                 if ev.rejected_reason is None:
                     auth_events[key] = event_map[ev_id]
@@ -400,11 +433,14 @@ def _iterative_auth_checks(
 
 
 @defer.inlineCallbacks
-def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_store):
+def _mainline_sort(
+    room_id, event_ids, resolved_power_event_id, event_map, state_res_store
+):
     """Returns a sorted list of event_ids sorted by mainline ordering based on
     the given event resolved_power_event_id
 
     Args:
+        room_id (str): room we're working in
         event_ids (list[str]): Events to sort
         resolved_power_event_id (str): The final resolved power level event ID
         event_map (dict[str,FrozenEvent])
@@ -417,11 +453,11 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor
     pl = resolved_power_event_id
     while pl:
         mainline.append(pl)
-        pl_ev = yield _get_event(pl, event_map, state_res_store)
+        pl_ev = yield _get_event(room_id, pl, event_map, state_res_store)
         auth_events = pl_ev.auth_event_ids()
         pl = None
         for aid in auth_events:
-            ev = yield _get_event(aid, event_map, state_res_store)
+            ev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
                 pl = aid
                 break
@@ -457,6 +493,8 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         Deferred[int]
     """
 
+    room_id = event.room_id
+
     # We do an iterative search, replacing `event with the power level in its
     # auth events (if any)
     while event:
@@ -468,7 +506,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         event = None
 
         for aid in auth_events:
-            aev = yield _get_event(aid, event_map, state_res_store)
+            aev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
                 event = aev
                 break
@@ -478,11 +516,12 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
 
 
 @defer.inlineCallbacks
-def _get_event(event_id, event_map, state_res_store):
+def _get_event(room_id, event_id, event_map, state_res_store):
     """Helper function to look up event in event_map, falling back to looking
     it up in the store
 
     Args:
+        room_id (str)
         event_id (str)
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -493,7 +532,14 @@ def _get_event(event_id, event_map, state_res_store):
     if event_id not in event_map:
         events = yield state_res_store.get_events([event_id], allow_rejected=True)
         event_map.update(events)
-    return event_map[event_id]
+    event = event_map[event_id]
+    assert event is not None
+    if event.room_id != room_id:
+        raise Exception(
+            "In state res for room %s, event %s is in %s"
+            % (room_id, event_id, event.room_id)
+        )
+    return event
 
 
 def lexicographical_topological_sort(graph, key):
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 8d3845c870..0f341d3ac3 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -58,6 +58,7 @@ class FakeEvent(object):
         self.type = type
         self.state_key = state_key
         self.content = content
+        self.room_id = ROOM_ID
 
     def to_event(self, auth_events, prev_events):
         """Given the auth_events and prev_events, convert to a Frozen Event
@@ -418,6 +419,7 @@ class StateTestCase(unittest.TestCase):
                 state_before = dict(state_at_event[prev_events[0]])
             else:
                 state_d = resolve_events_with_store(
+                    ROOM_ID,
                     RoomVersions.V2.identifier,
                     [state_at_event[n] for n in prev_events],
                     event_map=event_map,
@@ -565,6 +567,7 @@ class SimpleParamStateTestCase(unittest.TestCase):
         # Test that we correctly handle passing `None` as the event_map
 
         state_d = resolve_events_with_store(
+            ROOM_ID,
             RoomVersions.V2.identifier,
             [self.state_at_bob, self.state_at_charlie],
             event_map=None,
-- 
cgit 1.4.1


From 0b90fc6ed22e6ebb137041a1f5006f52cea081e4 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 13 Dec 2019 15:28:48 +0000
Subject: Document Shutdown Room admin API (#6541)

---
 changelog.d/6541.doc            |  1 +
 docs/admin_api/shutdown_room.md | 72 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+)
 create mode 100644 changelog.d/6541.doc
 create mode 100644 docs/admin_api/shutdown_room.md

(limited to 'changelog.d')

diff --git a/changelog.d/6541.doc b/changelog.d/6541.doc
new file mode 100644
index 0000000000..c20029edc0
--- /dev/null
+++ b/changelog.d/6541.doc
@@ -0,0 +1 @@
+Document the Room Shutdown Admin API.
\ No newline at end of file
diff --git a/docs/admin_api/shutdown_room.md b/docs/admin_api/shutdown_room.md
new file mode 100644
index 0000000000..54ce1cd234
--- /dev/null
+++ b/docs/admin_api/shutdown_room.md
@@ -0,0 +1,72 @@
+# Shutdown room API
+
+Shuts down a room, preventing new joins and moves local users and room aliases automatically
+to a new room. The new room will be created with the user specified by the
+`new_room_user_id` parameter as room administrator and will contain a message
+explaining what happened. Users invited to the new room will have power level
+-10 by default, and thus be unable to speak. The old room's power levels will be changed to
+disallow any further invites or joins.
+
+The local server will only have the power to move local user and room aliases to
+the new room. Users on other servers will be unaffected.
+
+## API
+
+You will need to authenticate with an access token for an admin user.
+
+### URL
+
+`POST /_synapse/admin/v1/shutdown_room/{room_id}`
+
+### URL Parameters
+
+* `room_id` - The ID of the room (e.g `!someroom:example.com`)
+
+### JSON Body Parameters
+
+* `new_room_user_id` - Required. A string representing the user ID of the user that will admin
+                       the new room that all users in the old room will be moved to.
+* `room_name` - Optional. A string representing the name of the room that new users will be
+                invited to.
+* `message` - Optional. A string containing the first message that will be sent as
+              `new_room_user_id` in the new room. Ideally this will clearly convey why the
+               original room was shut down.
+              
+If not specified, the default value of `room_name` is "Content Violation
+Notification". The default value of `message` is "Sharing illegal content on
+othis server is not permitted and rooms in violation will be blocked."
+
+### Response Parameters
+
+* `kicked_users` - An integer number representing the number of users that
+                   were kicked.
+* `failed_to_kick_users` - An integer number representing the number of users
+                           that were not kicked.
+* `local_aliases` - An array of strings representing the local aliases that were migrated from
+                    the old room to the new.
+* `new_room_id` - A string representing the room ID of the new room.
+
+## Example
+
+Request:
+
+```
+POST /_synapse/admin/v1/shutdown_room/!somebadroom%3Aexample.com
+
+{
+    "new_room_user_id": "@someuser:example.com",
+    "room_name": "Content Violation Notification",
+    "message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service."
+}
+```
+
+Response:
+
+```
+{
+    "kicked_users": 5,
+    "failed_to_kick_users": 0,
+    "local_aliases": ["#badroom:example.com", "#evilsaloon:example.com],
+    "new_room_id": "!newroomid:example.com",
+},
+```
-- 
cgit 1.4.1


From 9d173b312cc3ab170de3a7c58ac24778eddf93f6 Mon Sep 17 00:00:00 2001
From: Werner Sembach 
Date: Mon, 16 Dec 2019 13:12:40 +0100
Subject: Automatically delete empty groups/communities (#6453)

Signed-off-by: Werner Sembach 
---
 AUTHORS.rst                                        |  3 +++
 changelog.d/6453.feature                           |  1 +
 synapse/groups/groups_server.py                    |  5 ++++
 .../delta/56/nuke_empty_communities_from_db.sql    | 29 ++++++++++++++++++++++
 4 files changed, 38 insertions(+)
 create mode 100644 changelog.d/6453.feature
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql

(limited to 'changelog.d')

diff --git a/AUTHORS.rst b/AUTHORS.rst
index b8b31a5b47..014f16d4a2 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -46,3 +46,6 @@ Joseph Weston 
 
 Benjamin Saunders 
  * Documentation improvements
+
+Werner Sembach 
+ * Automatically remove a group/community when it is empty
diff --git a/changelog.d/6453.feature b/changelog.d/6453.feature
new file mode 100644
index 0000000000..e7bb801c6a
--- /dev/null
+++ b/changelog.d/6453.feature
@@ -0,0 +1 @@
+Automatically delete empty groups/communities.
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 29e8ffc295..0ec9be3cb5 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -773,6 +773,11 @@ class GroupsServerHandler(object):
         if not self.hs.is_mine_id(user_id):
             yield self.store.maybe_delete_remote_profile_cache(user_id)
 
+        # Delete group if the last user has left
+        users = yield self.store.get_users_in_group(group_id, include_private=True)
+        if not users:
+            yield self.store.delete_group(group_id)
+
         return {}
 
     @defer.inlineCallbacks
diff --git a/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql
new file mode 100644
index 0000000000..4f24c1405d
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql
@@ -0,0 +1,29 @@
+/* Copyright 2019 Werner Sembach
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Groups/communities now get deleted when the last member leaves. This is a one time cleanup to remove old groups/communities that were already empty before that change was made.
+DELETE FROM group_attestations_remote WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_attestations_renewals WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_invites WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_roles WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_room_categories WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_rooms WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_summary_roles WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_summary_room_categories WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_summary_rooms WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM group_summary_users WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM local_group_membership WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM local_group_updates WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
+DELETE FROM groups WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id));
-- 
cgit 1.4.1


From 8b9f5c21c35ff7a5491121ffc381bd8c97e879ce Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 16 Dec 2019 12:19:35 +0000
Subject: Changelog

---
 changelog.d/6553.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6553.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6553.bugfix b/changelog.d/6553.bugfix
new file mode 100644
index 0000000000..e8f55e2a76
--- /dev/null
+++ b/changelog.d/6553.bugfix
@@ -0,0 +1 @@
+Fix a bug causing responses to the `/context` client endpoint to not use the pruned version of the event the request is for.
-- 
cgit 1.4.1


From bc7de87650c2646cdc388f06a2a5bb6f94fc5c5c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 16 Dec 2019 12:26:28 +0000
Subject: Persist auth/state events at backwards extremities when we fetch them
 (#6526)

The main point here is to make sure that the state returned by _get_state_in_room has been authed before we try to use it as state in the room.
---
 changelog.d/6526.bugfix        |   1 +
 synapse/handlers/federation.py | 247 +++++++++++++----------------------------
 synapse/util/async_helpers.py  |   4 +-
 3 files changed, 83 insertions(+), 169 deletions(-)
 create mode 100644 changelog.d/6526.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6526.bugfix b/changelog.d/6526.bugfix
new file mode 100644
index 0000000000..53214b0748
--- /dev/null
+++ b/changelog.d/6526.bugfix
@@ -0,0 +1 @@
+Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 1d39a9a4f5..3f480f2056 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -65,8 +65,7 @@ from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRes
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.types import UserID, get_domain_from_id
-from synapse.util import batch_iter, unwrapFirstError
-from synapse.util.async_helpers import Linearizer
+from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
 from synapse.visibility import filter_events_for_server
@@ -238,7 +237,6 @@ class FederationHandler(BaseHandler):
             return None
 
         state = None
-        auth_chain = []
 
         # Get missing pdus if necessary.
         if not pdu.internal_metadata.is_outlier():
@@ -348,7 +346,6 @@ class FederationHandler(BaseHandler):
 
                 # Calculate the state after each of the previous events, and
                 # resolve them to find the correct state at the current event.
-                auth_chains = set()
                 event_map = {event_id: pdu}
                 try:
                     # Get the state of the events we know about
@@ -369,24 +366,14 @@ class FederationHandler(BaseHandler):
                             "Requesting state at missing prev_event %s", event_id,
                         )
 
-                        room_version = await self.store.get_room_version(room_id)
-
                         with nested_logging_context(p):
                             # note that if any of the missing prevs share missing state or
                             # auth events, the requests to fetch those events are deduped
                             # by the get_pdu_cache in federation_client.
-                            (
-                                remote_state,
-                                got_auth_chain,
-                            ) = await self._get_state_for_room(
+                            (remote_state, _,) = await self._get_state_for_room(
                                 origin, room_id, p, include_event_in_state=True
                             )
 
-                            # XXX hrm I'm not convinced that duplicate events will compare
-                            # for equality, so I'm not sure this does what the author
-                            # hoped.
-                            auth_chains.update(got_auth_chain)
-
                             remote_state_map = {
                                 (x.type, x.state_key): x.event_id for x in remote_state
                             }
@@ -395,6 +382,7 @@ class FederationHandler(BaseHandler):
                             for x in remote_state:
                                 event_map[x.event_id] = x
 
+                    room_version = await self.store.get_room_version(room_id)
                     state_map = await resolve_events_with_store(
                         room_id,
                         room_version,
@@ -416,7 +404,6 @@ class FederationHandler(BaseHandler):
                     event_map.update(evs)
 
                     state = [event_map[e] for e in six.itervalues(state_map)]
-                    auth_chain = list(auth_chains)
                 except Exception:
                     logger.warning(
                         "[%s %s] Error attempting to resolve state at missing "
@@ -432,9 +419,7 @@ class FederationHandler(BaseHandler):
                         affected=event_id,
                     )
 
-        await self._process_received_pdu(
-            origin, pdu, state=state, auth_chain=auth_chain
-        )
+        await self._process_received_pdu(origin, pdu, state=state)
 
     async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
         """
@@ -633,10 +618,7 @@ class FederationHandler(BaseHandler):
     ) -> Dict[str, EventBase]:
         """Fetch events from a remote destination, checking if we already have them.
 
-        Args:
-            destination
-            room_id
-            event_ids
+        Persists any events we don't already have as outliers.
 
         If we fail to fetch any of the events, a warning will be logged, and the event
         will be omitted from the result. Likewise, any events which turn out not to
@@ -656,27 +638,15 @@ class FederationHandler(BaseHandler):
                 room_id,
             )
 
-            room_version = await self.store.get_room_version(room_id)
-
-            # XXX 20 requests at once? really?
-            for batch in batch_iter(missing_events, 20):
-                deferreds = [
-                    run_in_background(
-                        self.federation_client.get_pdu,
-                        destinations=[destination],
-                        event_id=e_id,
-                        room_version=room_version,
-                    )
-                    for e_id in batch
-                ]
-
-                res = await make_deferred_yieldable(
-                    defer.DeferredList(deferreds, consumeErrors=True)
-                )
+            await self._get_events_and_persist(
+                destination=destination, room_id=room_id, events=missing_events
+            )
 
-                for success, result in res:
-                    if success and result:
-                        fetched_events[result.event_id] = result
+            # we need to make sure we re-load from the database to get the rejected
+            # state correct.
+            fetched_events.update(
+                (await self.store.get_events(missing_events, allow_rejected=True))
+            )
 
         # check for events which were in the wrong room.
         #
@@ -705,50 +675,26 @@ class FederationHandler(BaseHandler):
 
         return fetched_events
 
-    async def _process_received_pdu(self, origin, event, state, auth_chain):
+    async def _process_received_pdu(
+        self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]],
+    ):
         """ Called when we have a new pdu. We need to do auth checks and put it
         through the StateHandler.
+
+        Args:
+            origin: server sending the event
+
+            event: event to be persisted
+
+            state: Normally None, but if we are handling a gap in the graph
+                (ie, we are missing one or more prev_events), the resolved state at the
+                event
         """
         room_id = event.room_id
         event_id = event.event_id
 
         logger.debug("[%s %s] Processing event: %s", room_id, event_id, event)
 
-        event_ids = set()
-        if state:
-            event_ids |= {e.event_id for e in state}
-        if auth_chain:
-            event_ids |= {e.event_id for e in auth_chain}
-
-        seen_ids = await self.store.have_seen_events(event_ids)
-
-        if state and auth_chain is not None:
-            # If we have any state or auth_chain given to us by the replication
-            # layer, then we should handle them (if we haven't before.)
-
-            event_infos = []
-
-            for e in itertools.chain(auth_chain, state):
-                if e.event_id in seen_ids:
-                    continue
-                e.internal_metadata.outlier = True
-                auth_ids = e.auth_event_ids()
-                auth = {
-                    (e.type, e.state_key): e
-                    for e in auth_chain
-                    if e.event_id in auth_ids or e.type == EventTypes.Create
-                }
-                event_infos.append(_NewEventInfo(event=e, auth_events=auth))
-                seen_ids.add(e.event_id)
-
-            logger.info(
-                "[%s %s] persisting newly-received auth/state events %s",
-                room_id,
-                event_id,
-                [e.event.event_id for e in event_infos],
-            )
-            await self._handle_new_events(origin, event_infos)
-
         try:
             context = await self._handle_new_event(origin, event, state=state)
         except AuthError as e:
@@ -803,8 +749,6 @@ class FederationHandler(BaseHandler):
         if dest == self.server_name:
             raise SynapseError(400, "Can't backfill from self.")
 
-        room_version = await self.store.get_room_version(room_id)
-
         events = await self.federation_client.backfill(
             dest, room_id, limit=limit, extremities=extremities
         )
@@ -833,6 +777,9 @@ class FederationHandler(BaseHandler):
 
         event_ids = set(e.event_id for e in events)
 
+        # build a list of events whose prev_events weren't in the batch.
+        # (XXX: this will include events whose prev_events we already have; that doesn't
+        # sound right?)
         edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids]
 
         logger.info("backfill: Got %d events with %d edges", len(events), len(edges))
@@ -861,95 +808,11 @@ class FederationHandler(BaseHandler):
         auth_events.update(
             {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
         )
-        missing_auth = required_auth - set(auth_events)
-        failed_to_fetch = set()
-
-        # Try and fetch any missing auth events from both DB and remote servers.
-        # We repeatedly do this until we stop finding new auth events.
-        while missing_auth - failed_to_fetch:
-            logger.info("Missing auth for backfill: %r", missing_auth)
-            ret_events = await self.store.get_events(missing_auth - failed_to_fetch)
-            auth_events.update(ret_events)
-
-            required_auth.update(
-                a_id for event in ret_events.values() for a_id in event.auth_event_ids()
-            )
-            missing_auth = required_auth - set(auth_events)
-
-            if missing_auth - failed_to_fetch:
-                logger.info(
-                    "Fetching missing auth for backfill: %r",
-                    missing_auth - failed_to_fetch,
-                )
-
-                results = await make_deferred_yieldable(
-                    defer.gatherResults(
-                        [
-                            run_in_background(
-                                self.federation_client.get_pdu,
-                                [dest],
-                                event_id,
-                                room_version=room_version,
-                                outlier=True,
-                                timeout=10000,
-                            )
-                            for event_id in missing_auth - failed_to_fetch
-                        ],
-                        consumeErrors=True,
-                    )
-                ).addErrback(unwrapFirstError)
-                auth_events.update({a.event_id: a for a in results if a})
-                required_auth.update(
-                    a_id
-                    for event in results
-                    if event
-                    for a_id in event.auth_event_ids()
-                )
-                missing_auth = required_auth - set(auth_events)
-
-                failed_to_fetch = missing_auth - set(auth_events)
-
-        seen_events = await self.store.have_seen_events(
-            set(auth_events.keys()) | set(state_events.keys())
-        )
-
-        # We now have a chunk of events plus associated state and auth chain to
-        # persist. We do the persistence in two steps:
-        #   1. Auth events and state get persisted as outliers, plus the
-        #      backward extremities get persisted (as non-outliers).
-        #   2. The rest of the events in the chunk get persisted one by one, as
-        #      each one depends on the previous event for its state.
-        #
-        # The important thing is that events in the chunk get persisted as
-        # non-outliers, including when those events are also in the state or
-        # auth chain. Caution must therefore be taken to ensure that they are
-        # not accidentally marked as outliers.
 
-        # Step 1a: persist auth events that *don't* appear in the chunk
         ev_infos = []
-        for a in auth_events.values():
-            # We only want to persist auth events as outliers that we haven't
-            # seen and aren't about to persist as part of the backfilled chunk.
-            if a.event_id in seen_events or a.event_id in event_map:
-                continue
 
-            a.internal_metadata.outlier = True
-            ev_infos.append(
-                _NewEventInfo(
-                    event=a,
-                    auth_events={
-                        (
-                            auth_events[a_id].type,
-                            auth_events[a_id].state_key,
-                        ): auth_events[a_id]
-                        for a_id in a.auth_event_ids()
-                        if a_id in auth_events
-                    },
-                )
-            )
-
-        # Step 1b: persist the events in the chunk we fetched state for (i.e.
-        # the backwards extremities) as non-outliers.
+        # Step 1: persist the events in the chunk we fetched state for (i.e.
+        # the backwards extremities), with custom auth events and state
         for e_id in events_to_state:
             # For paranoia we ensure that these events are marked as
             # non-outliers
@@ -1191,6 +1054,56 @@ class FederationHandler(BaseHandler):
 
         return False
 
+    async def _get_events_and_persist(
+        self, destination: str, room_id: str, events: Iterable[str]
+    ):
+        """Fetch the given events from a server, and persist them as outliers.
+
+        Logs a warning if we can't find the given event.
+        """
+
+        room_version = await self.store.get_room_version(room_id)
+
+        event_infos = []
+
+        async def get_event(event_id: str):
+            with nested_logging_context(event_id):
+                try:
+                    event = await self.federation_client.get_pdu(
+                        [destination], event_id, room_version, outlier=True,
+                    )
+                    if event is None:
+                        logger.warning(
+                            "Server %s didn't return event %s", destination, event_id,
+                        )
+                        return
+
+                    # recursively fetch the auth events for this event
+                    auth_events = await self._get_events_from_store_or_dest(
+                        destination, room_id, event.auth_event_ids()
+                    )
+                    auth = {}
+                    for auth_event_id in event.auth_event_ids():
+                        ae = auth_events.get(auth_event_id)
+                        if ae:
+                            auth[(ae.type, ae.state_key)] = ae
+
+                    event_infos.append(_NewEventInfo(event, None, auth))
+
+                except Exception as e:
+                    logger.warning(
+                        "Error fetching missing state/auth event %s: %s %s",
+                        event_id,
+                        type(e),
+                        e,
+                    )
+
+        await concurrently_execute(get_event, events, 5)
+
+        await self._handle_new_events(
+            destination, event_infos,
+        )
+
     def _sanity_check_event(self, ev):
         """
         Do some early sanity checks of a received event
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 5c4de2e69f..04b6abdc24 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -140,8 +140,8 @@ def concurrently_execute(func, args, limit):
 
     Args:
         func (func): Function to execute, should return a deferred or coroutine.
-        args (list): List of arguments to pass to func, each invocation of func
-            gets a signle argument.
+        args (Iterable): List of arguments to pass to func, each invocation of func
+            gets a single argument.
         limit (int): Maximum number of conccurent executions.
 
     Returns:
-- 
cgit 1.4.1


From 6920d88892e77aec787b6afc0e01e6e09dc36216 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 16 Dec 2019 13:14:37 +0000
Subject: Exclude rejected state events when calculating state at backwards
 extrems (#6527)

This fixes a weird bug where, if you were determined enough, you could end up with a rejected event forming part of the state at a backwards-extremity. Authing that backwards extrem would then lead to us trying to pull the rejected event from the db (with allow_rejected=False), which would fail with a 404.
---
 changelog.d/6527.bugfix        | 1 +
 synapse/handlers/federation.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6527.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6527.bugfix b/changelog.d/6527.bugfix
new file mode 100644
index 0000000000..53214b0748
--- /dev/null
+++ b/changelog.d/6527.bugfix
@@ -0,0 +1 @@
+Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 3f480f2056..3fccccfecd 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -605,7 +605,7 @@ class FederationHandler(BaseHandler):
             remote_event = event_map.get(event_id)
             if not remote_event:
                 raise Exception("Unable to get missing prev_event %s" % (event_id,))
-            if remote_event.is_state():
+            if remote_event.is_state() and remote_event.rejected_reason is None:
                 remote_state.append(remote_event)
 
         auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
-- 
cgit 1.4.1


From 4c7b1bb6cccd726ae9a9f91b3309554a7fe6d262 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 16:22:00 +0000
Subject: Refactor get_events_from_store_or_dest to return a dict (#6501)

There was a bunch of unnecessary conversion back and forth between dict and
list going on here. We can simplify a bunch of the code.
---
 changelog.d/6501.misc                   |  1 +
 synapse/federation/federation_client.py | 44 +++++++++++----------------------
 2 files changed, 16 insertions(+), 29 deletions(-)
 create mode 100644 changelog.d/6501.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6501.misc b/changelog.d/6501.misc
new file mode 100644
index 0000000000..255f45a9c3
--- /dev/null
+++ b/changelog.d/6501.misc
@@ -0,0 +1 @@
+Refactor get_events_from_store_or_dest to return a dict.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 709449c9e3..73e1dda6a3 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -18,8 +18,6 @@ import copy
 import itertools
 import logging
 
-from six.moves import range
-
 from prometheus_client import Counter
 
 from twisted.internet import defer
@@ -41,7 +39,7 @@ from synapse.events import builder, room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.logging.utils import log_function
-from synapse.util import unwrapFirstError
+from synapse.util import batch_iter, unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.retryutils import NotRetryingDestination
 
@@ -331,10 +329,12 @@ class FederationClient(FederationBase):
         state_event_ids = result["pdu_ids"]
         auth_event_ids = result.get("auth_chain_ids", [])
 
-        fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
-            destination, room_id, set(state_event_ids + auth_event_ids)
+        desired_events = set(state_event_ids + auth_event_ids)
+        event_map = yield self.get_events_from_store_or_dest(
+            destination, room_id, desired_events
         )
 
+        failed_to_fetch = desired_events - event_map.keys()
         if failed_to_fetch:
             logger.warning(
                 "Failed to fetch missing state/auth events for %s: %s",
@@ -342,8 +342,6 @@ class FederationClient(FederationBase):
                 failed_to_fetch,
             )
 
-        event_map = {ev.event_id: ev for ev in fetched_events}
-
         pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
         auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
 
@@ -358,23 +356,18 @@ class FederationClient(FederationBase):
         Args:
             destination (str)
             room_id (str)
-            event_ids (list)
+            event_ids (Iterable[str])
 
         Returns:
-            Deferred: A deferred resolving to a 2-tuple where the first is a list of
-            events and the second is a list of event ids that we failed to fetch.
+            Deferred[dict[str, EventBase]]: A deferred resolving to a map
+            from event_id to event
         """
-        seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
-        signed_events = list(seen_events.values())
-
-        failed_to_fetch = set()
+        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
 
-        missing_events = set(event_ids)
-        for k in seen_events:
-            missing_events.discard(k)
+        missing_events = set(event_ids) - fetched_events.keys()
 
         if not missing_events:
-            return signed_events, failed_to_fetch
+            return fetched_events
 
         logger.debug(
             "Fetching unknown state/auth events %s for room %s",
@@ -384,11 +377,8 @@ class FederationClient(FederationBase):
 
         room_version = yield self.store.get_room_version(room_id)
 
-        batch_size = 20
-        missing_events = list(missing_events)
-        for i in range(0, len(missing_events), batch_size):
-            batch = set(missing_events[i : i + batch_size])
-
+        # XXX 20 requests at once? really?
+        for batch in batch_iter(missing_events, 20):
             deferreds = [
                 run_in_background(
                     self.get_pdu,
@@ -404,13 +394,9 @@ class FederationClient(FederationBase):
             )
             for success, result in res:
                 if success and result:
-                    signed_events.append(result)
-                    batch.discard(result.event_id)
-
-            # We removed all events we successfully fetched from `batch`
-            failed_to_fetch.update(batch)
+                    fetched_events[result.event_id] = result
 
-        return signed_events, failed_to_fetch
+        return fetched_events
 
     @defer.inlineCallbacks
     @log_function
-- 
cgit 1.4.1


From be294d6fde1b8b37b9d557e56973deb92790ddb8 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 10 Dec 2019 17:42:46 +0000
Subject: Move get_state methods into FederationHandler (#6503)

This is a non-functional refactor as a precursor to some other work.
---
 changelog.d/6503.misc                   |   1 +
 synapse/federation/federation_client.py |  91 ++++------------------------
 synapse/handlers/federation.py          | 101 ++++++++++++++++++++++++++++++--
 3 files changed, 107 insertions(+), 86 deletions(-)
 create mode 100644 changelog.d/6503.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6503.misc b/changelog.d/6503.misc
new file mode 100644
index 0000000000..e4e9a5a3d4
--- /dev/null
+++ b/changelog.d/6503.misc
@@ -0,0 +1 @@
+Move get_state methods into FederationHandler.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 73e1dda6a3..d396e6564f 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -37,9 +37,9 @@ from synapse.api.room_versions import (
 )
 from synapse.events import builder, room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
-from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.context import make_deferred_yieldable
 from synapse.logging.utils import log_function
-from synapse.util import batch_iter, unwrapFirstError
+from synapse.util import unwrapFirstError
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.retryutils import NotRetryingDestination
 
@@ -308,19 +308,12 @@ class FederationClient(FederationBase):
         return signed_pdu
 
     @defer.inlineCallbacks
-    @log_function
-    def get_state_for_room(self, destination, room_id, event_id):
-        """Requests all of the room state at a given event from a remote homeserver.
-
-        Args:
-            destination (str): The remote homeserver to query for the state.
-            room_id (str): The id of the room we're interested in.
-            event_id (str): The id of the event we want the state at.
+    def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
+        """Calls the /state_ids endpoint to fetch the state at a particular point
+        in the room, and the auth events for the given event
 
         Returns:
-            Deferred[Tuple[List[EventBase], List[EventBase]]]:
-                A list of events in the state, and a list of events in the auth chain
-                for the given event.
+            Tuple[List[str], List[str]]:  a tuple of (state event_ids, auth event_ids)
         """
         result = yield self.transport_layer.get_room_state_ids(
             destination, room_id, event_id=event_id
@@ -329,74 +322,12 @@ class FederationClient(FederationBase):
         state_event_ids = result["pdu_ids"]
         auth_event_ids = result.get("auth_chain_ids", [])
 
-        desired_events = set(state_event_ids + auth_event_ids)
-        event_map = yield self.get_events_from_store_or_dest(
-            destination, room_id, desired_events
-        )
-
-        failed_to_fetch = desired_events - event_map.keys()
-        if failed_to_fetch:
-            logger.warning(
-                "Failed to fetch missing state/auth events for %s: %s",
-                room_id,
-                failed_to_fetch,
-            )
-
-        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
-        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
-
-        auth_chain.sort(key=lambda e: e.depth)
-
-        return pdus, auth_chain
-
-    @defer.inlineCallbacks
-    def get_events_from_store_or_dest(self, destination, room_id, event_ids):
-        """Fetch events from a remote destination, checking if we already have them.
-
-        Args:
-            destination (str)
-            room_id (str)
-            event_ids (Iterable[str])
-
-        Returns:
-            Deferred[dict[str, EventBase]]: A deferred resolving to a map
-            from event_id to event
-        """
-        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
-
-        missing_events = set(event_ids) - fetched_events.keys()
-
-        if not missing_events:
-            return fetched_events
-
-        logger.debug(
-            "Fetching unknown state/auth events %s for room %s",
-            missing_events,
-            event_ids,
-        )
-
-        room_version = yield self.store.get_room_version(room_id)
-
-        # XXX 20 requests at once? really?
-        for batch in batch_iter(missing_events, 20):
-            deferreds = [
-                run_in_background(
-                    self.get_pdu,
-                    destinations=[destination],
-                    event_id=e_id,
-                    room_version=room_version,
-                )
-                for e_id in batch
-            ]
-
-            res = yield make_deferred_yieldable(
-                defer.DeferredList(deferreds, consumeErrors=True)
-            )
-            for success, result in res:
-                if success and result:
-                    fetched_events[result.event_id] = result
+        if not isinstance(state_event_ids, list) or not isinstance(
+            auth_event_ids, list
+        ):
+            raise Exception("invalid response from /state_ids")
 
-        return fetched_events
+        return state_event_ids, auth_event_ids
 
     @defer.inlineCallbacks
     @log_function
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index bc26921768..c0dcf9abf8 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -64,7 +64,7 @@ from synapse.replication.http.federation import (
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.types import UserID, get_domain_from_id
-from synapse.util import unwrapFirstError
+from synapse.util import batch_iter, unwrapFirstError
 from synapse.util.async_helpers import Linearizer
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
@@ -379,11 +379,9 @@ class FederationHandler(BaseHandler):
                             (
                                 remote_state,
                                 got_auth_chain,
-                            ) = yield self.federation_client.get_state_for_room(
-                                origin, room_id, p
-                            )
+                            ) = yield self._get_state_for_room(origin, room_id, p)
 
-                            # we want the state *after* p; get_state_for_room returns the
+                            # we want the state *after* p; _get_state_for_room returns the
                             # state *before* p.
                             remote_event = yield self.federation_client.get_pdu(
                                 [origin], p, room_version, outlier=True
@@ -583,6 +581,97 @@ class FederationHandler(BaseHandler):
                     else:
                         raise
 
+    @defer.inlineCallbacks
+    @log_function
+    def _get_state_for_room(self, destination, room_id, event_id):
+        """Requests all of the room state at a given event from a remote homeserver.
+
+        Args:
+            destination (str): The remote homeserver to query for the state.
+            room_id (str): The id of the room we're interested in.
+            event_id (str): The id of the event we want the state at.
+
+        Returns:
+            Deferred[Tuple[List[EventBase], List[EventBase]]]:
+                A list of events in the state, and a list of events in the auth chain
+                for the given event.
+        """
+        (
+            state_event_ids,
+            auth_event_ids,
+        ) = yield self.federation_client.get_room_state_ids(
+            destination, room_id, event_id=event_id
+        )
+
+        desired_events = set(state_event_ids + auth_event_ids)
+        event_map = yield self._get_events_from_store_or_dest(
+            destination, room_id, desired_events
+        )
+
+        failed_to_fetch = desired_events - event_map.keys()
+        if failed_to_fetch:
+            logger.warning(
+                "Failed to fetch missing state/auth events for %s: %s",
+                room_id,
+                failed_to_fetch,
+            )
+
+        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
+        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
+
+        auth_chain.sort(key=lambda e: e.depth)
+
+        return pdus, auth_chain
+
+    @defer.inlineCallbacks
+    def _get_events_from_store_or_dest(self, destination, room_id, event_ids):
+        """Fetch events from a remote destination, checking if we already have them.
+
+        Args:
+            destination (str)
+            room_id (str)
+            event_ids (Iterable[str])
+
+        Returns:
+            Deferred[dict[str, EventBase]]: A deferred resolving to a map
+            from event_id to event
+        """
+        fetched_events = yield self.store.get_events(event_ids, allow_rejected=True)
+
+        missing_events = set(event_ids) - fetched_events.keys()
+
+        if not missing_events:
+            return fetched_events
+
+        logger.debug(
+            "Fetching unknown state/auth events %s for room %s",
+            missing_events,
+            event_ids,
+        )
+
+        room_version = yield self.store.get_room_version(room_id)
+
+        # XXX 20 requests at once? really?
+        for batch in batch_iter(missing_events, 20):
+            deferreds = [
+                run_in_background(
+                    self.federation_client.get_pdu,
+                    destinations=[destination],
+                    event_id=e_id,
+                    room_version=room_version,
+                )
+                for e_id in batch
+            ]
+
+            res = yield make_deferred_yieldable(
+                defer.DeferredList(deferreds, consumeErrors=True)
+            )
+            for success, result in res:
+                if success and result:
+                    fetched_events[result.event_id] = result
+
+        return fetched_events
+
     @defer.inlineCallbacks
     def _process_received_pdu(self, origin, event, state, auth_chain):
         """ Called when we have a new pdu. We need to do auth checks and put it
@@ -723,7 +812,7 @@ class FederationHandler(BaseHandler):
         state_events = {}
         events_to_state = {}
         for e_id in edges:
-            state, auth = yield self.federation_client.get_state_for_room(
+            state, auth = yield self._get_state_for_room(
                 destination=dest, room_id=room_id, event_id=e_id
             )
             auth_events.update({a.event_id: a for a in auth})
-- 
cgit 1.4.1


From 20d5ba16e626aa4217492c83dda9fabd36bd5d2b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 11 Dec 2019 16:37:51 +0000
Subject: Add `include_event_in_state` to _get_state_for_room (#6521)

Make it return the state *after* the requested event, rather than the one
before it. This is a bit easier and requires fewer calls to
get_events_from_store_or_dest.
---
 changelog.d/6521.misc          |  1 +
 synapse/handlers/federation.py | 39 +++++++++++++++++++++------------------
 2 files changed, 22 insertions(+), 18 deletions(-)
 create mode 100644 changelog.d/6521.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6521.misc b/changelog.d/6521.misc
new file mode 100644
index 0000000000..d9a44389b9
--- /dev/null
+++ b/changelog.d/6521.misc
@@ -0,0 +1 @@
+Refactor some code in the event authentication path for clarity.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c0dcf9abf8..31c9132ae9 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -379,22 +379,10 @@ class FederationHandler(BaseHandler):
                             (
                                 remote_state,
                                 got_auth_chain,
-                            ) = yield self._get_state_for_room(origin, room_id, p)
-
-                            # we want the state *after* p; _get_state_for_room returns the
-                            # state *before* p.
-                            remote_event = yield self.federation_client.get_pdu(
-                                [origin], p, room_version, outlier=True
+                            ) = yield self._get_state_for_room(
+                                origin, room_id, p, include_event_in_state=True
                             )
 
-                            if remote_event is None:
-                                raise Exception(
-                                    "Unable to get missing prev_event %s" % (p,)
-                                )
-
-                            if remote_event.is_state():
-                                remote_state.append(remote_event)
-
                             # XXX hrm I'm not convinced that duplicate events will compare
                             # for equality, so I'm not sure this does what the author
                             # hoped.
@@ -583,13 +571,15 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     @log_function
-    def _get_state_for_room(self, destination, room_id, event_id):
+    def _get_state_for_room(self, destination, room_id, event_id, include_event_in_state):
         """Requests all of the room state at a given event from a remote homeserver.
 
         Args:
             destination (str): The remote homeserver to query for the state.
             room_id (str): The id of the room we're interested in.
             event_id (str): The id of the event we want the state at.
+            include_event_in_state: if true, the event itself will be included in the
+                returned state event list.
 
         Returns:
             Deferred[Tuple[List[EventBase], List[EventBase]]]:
@@ -604,6 +594,10 @@ class FederationHandler(BaseHandler):
         )
 
         desired_events = set(state_event_ids + auth_event_ids)
+
+        if include_event_in_state:
+            desired_events.add(event_id)
+
         event_map = yield self._get_events_from_store_or_dest(
             destination, room_id, desired_events
         )
@@ -616,12 +610,21 @@ class FederationHandler(BaseHandler):
                 failed_to_fetch,
             )
 
-        pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
-        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
+        remote_state = [
+            event_map[e_id] for e_id in state_event_ids if e_id in event_map
+        ]
 
+        if include_event_in_state:
+            remote_event = event_map.get(event_id)
+            if not remote_event:
+                raise Exception("Unable to get missing prev_event %s" % (event_id,))
+            if remote_event.is_state():
+                remote_state.append(remote_event)
+
+        auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
         auth_chain.sort(key=lambda e: e.depth)
 
-        return pdus, auth_chain
+        return remote_state, auth_chain
 
     @defer.inlineCallbacks
     def _get_events_from_store_or_dest(self, destination, room_id, event_ids):
-- 
cgit 1.4.1


From 35bbe4ca794d7b7b1c5b008211a377f54deecb5d Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 12 Dec 2019 12:57:45 +0000
Subject: Check the room_id of events when fetching room state/auth (#6524)

When we request the state/auth_events to populate a backwards extremity (on
backfill or in the case of missing events in a transaction push), we should
check that the returned events are in the right room rather than blindly using
them in the room state or auth chain.

Given that _get_events_from_store_or_dest takes a room_id, it seems clear that
it should be sanity-checking the room_id of the requested events, so let's do
it there.
---
 changelog.d/6524.misc          |  2 ++
 synapse/handlers/federation.py | 78 +++++++++++++++++++++++++++++-------------
 2 files changed, 56 insertions(+), 24 deletions(-)
 create mode 100644 changelog.d/6524.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6524.misc b/changelog.d/6524.misc
new file mode 100644
index 0000000000..f885597426
--- /dev/null
+++ b/changelog.d/6524.misc
@@ -0,0 +1,2 @@
+Improve sanity-checking when receiving events over federation.
+
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 31c9132ae9..ebeffbb768 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -571,7 +571,9 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     @log_function
-    def _get_state_for_room(self, destination, room_id, event_id, include_event_in_state):
+    def _get_state_for_room(
+        self, destination, room_id, event_id, include_event_in_state
+    ):
         """Requests all of the room state at a given event from a remote homeserver.
 
         Args:
@@ -635,6 +637,10 @@ class FederationHandler(BaseHandler):
             room_id (str)
             event_ids (Iterable[str])
 
+        If we fail to fetch any of the events, a warning will be logged, and the event
+        will be omitted from the result. Likewise, any events which turn out not to
+        be in the given room.
+
         Returns:
             Deferred[dict[str, EventBase]]: A deferred resolving to a map
             from event_id to event
@@ -643,35 +649,59 @@ class FederationHandler(BaseHandler):
 
         missing_events = set(event_ids) - fetched_events.keys()
 
-        if not missing_events:
-            return fetched_events
+        if missing_events:
+            logger.debug(
+                "Fetching unknown state/auth events %s for room %s",
+                missing_events,
+                room_id,
+            )
 
-        logger.debug(
-            "Fetching unknown state/auth events %s for room %s",
-            missing_events,
-            event_ids,
-        )
+            room_version = yield self.store.get_room_version(room_id)
 
-        room_version = yield self.store.get_room_version(room_id)
+            # XXX 20 requests at once? really?
+            for batch in batch_iter(missing_events, 20):
+                deferreds = [
+                    run_in_background(
+                        self.federation_client.get_pdu,
+                        destinations=[destination],
+                        event_id=e_id,
+                        room_version=room_version,
+                    )
+                    for e_id in batch
+                ]
 
-        # XXX 20 requests at once? really?
-        for batch in batch_iter(missing_events, 20):
-            deferreds = [
-                run_in_background(
-                    self.federation_client.get_pdu,
-                    destinations=[destination],
-                    event_id=e_id,
-                    room_version=room_version,
+                res = yield make_deferred_yieldable(
+                    defer.DeferredList(deferreds, consumeErrors=True)
                 )
-                for e_id in batch
-            ]
 
-            res = yield make_deferred_yieldable(
-                defer.DeferredList(deferreds, consumeErrors=True)
+                for success, result in res:
+                    if success and result:
+                        fetched_events[result.event_id] = result
+
+        # check for events which were in the wrong room.
+        #
+        # this can happen if a remote server claims that the state or
+        # auth_events at an event in room A are actually events in room B
+
+        bad_events = list(
+            (event_id, event.room_id)
+            for event_id, event in fetched_events.items()
+            if event.room_id != room_id
+        )
+
+        for bad_event_id, bad_room_id in bad_events:
+            # This is a bogus situation, but since we may only discover it a long time
+            # after it happened, we try our best to carry on, by just omitting the
+            # bad events from the returned auth/state set.
+            logger.warning(
+                "Remote server %s claims event %s in room %s is an auth/state "
+                "event in room %s",
+                destination,
+                bad_event_id,
+                bad_room_id,
+                room_id,
             )
-            for success, result in res:
-                if success and result:
-                    fetched_events[result.event_id] = result
+            del fetched_events[bad_event_id]
 
         return fetched_events
 
-- 
cgit 1.4.1


From 6577f2d8877b89f7198f7fb03cf57f10a75728ca Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 13 Dec 2019 11:44:41 +0000
Subject: Sanity-check room ids in event auth (#6530)

When we do an event auth operation, check that all of the events involved are
in the right room.
---
 changelog.d/6530.misc |  2 ++
 synapse/event_auth.py | 12 ++++++++++++
 2 files changed, 14 insertions(+)
 create mode 100644 changelog.d/6530.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6530.misc b/changelog.d/6530.misc
new file mode 100644
index 0000000000..f885597426
--- /dev/null
+++ b/changelog.d/6530.misc
@@ -0,0 +1,2 @@
+Improve sanity-checking when receiving events over federation.
+
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index ec3243b27b..d184b0273b 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -48,6 +48,18 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
     if not hasattr(event, "room_id"):
         raise AuthError(500, "Event has no room_id: %s" % event)
 
+    room_id = event.room_id
+
+    # I'm not really expecting to get auth events in the wrong room, but let's
+    # sanity-check it
+    for auth_event in auth_events.values():
+        if auth_event.room_id != room_id:
+            raise Exception(
+                "During auth for event %s in room %s, found event %s in the state "
+                "which is in room %s"
+                % (event.event_id, room_id, auth_event.event_id, auth_event.room_id)
+            )
+
     if do_sig_check:
         sender_domain = get_domain_from_id(event.sender)
 
-- 
cgit 1.4.1


From 83895316d4e18d4a52c43524942d98f864bac6f9 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 13 Dec 2019 12:55:32 +0000
Subject: sanity-checking for events used in state res (#6531)

When we perform state resolution, check that all of the events involved are in
the right room.
---
 changelog.d/6531.misc          |   1 +
 synapse/handlers/federation.py |   1 +
 synapse/state/__init__.py      |  32 ++++++++-----
 synapse/state/v1.py            |  34 +++++++++++---
 synapse/state/v2.py            | 100 ++++++++++++++++++++++++++++++-----------
 tests/state/test_v2.py         |   3 ++
 6 files changed, 128 insertions(+), 43 deletions(-)
 create mode 100644 changelog.d/6531.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6531.misc b/changelog.d/6531.misc
new file mode 100644
index 0000000000..598efb79fc
--- /dev/null
+++ b/changelog.d/6531.misc
@@ -0,0 +1 @@
+Improve sanity-checking when receiving events over federation.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index ebeffbb768..fd3f5ced55 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -397,6 +397,7 @@ class FederationHandler(BaseHandler):
                                 event_map[x.event_id] = x
 
                     state_map = yield resolve_events_with_store(
+                        room_id,
                         room_version,
                         state_maps,
                         event_map,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 139beef8ed..0e75e94c6f 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,7 +16,7 @@
 
 import logging
 from collections import namedtuple
-from typing import Iterable, Optional
+from typing import Dict, Iterable, List, Optional, Tuple
 
 from six import iteritems, itervalues
 
@@ -416,6 +416,7 @@ class StateHandler(object):
 
         with Measure(self.clock, "state._resolve_events"):
             new_state = yield resolve_events_with_store(
+                event.room_id,
                 room_version,
                 state_set_ids,
                 event_map=state_map,
@@ -461,7 +462,7 @@ class StateResolutionHandler(object):
         not be called for a single state group
 
         Args:
-            room_id (str): room we are resolving for (used for logging)
+            room_id (str): room we are resolving for (used for logging and sanity checks)
             room_version (str): version of the room
             state_groups_ids (dict[int, dict[(str, str), str]]):
                  map from state group id to the state in that state group
@@ -517,6 +518,7 @@ class StateResolutionHandler(object):
                 logger.info("Resolving conflicted state for %r", room_id)
                 with Measure(self.clock, "state._resolve_events"):
                     new_state = yield resolve_events_with_store(
+                        room_id,
                         room_version,
                         list(itervalues(state_groups_ids)),
                         event_map=event_map,
@@ -588,36 +590,44 @@ def _make_state_cache_entry(new_state, state_groups_ids):
     )
 
 
-def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
+def resolve_events_with_store(
+    room_id: str,
+    room_version: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_res_store: "StateResolutionStore",
+):
     """
     Args:
-        room_version(str): Version of the room
+        room_id: the room we are working in
+
+        room_version: Version of the room
 
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
             events will be requested via state_map_factory.
 
-            If None, all events will be fetched via state_map_factory.
+            If None, all events will be fetched via state_res_store.
 
-        state_res_store (StateResolutionStore)
+        state_res_store: a place to fetch events from
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
     v = KNOWN_ROOM_VERSIONS[room_version]
     if v.state_res == StateResolutionVersions.V1:
         return v1.resolve_events_with_store(
-            state_sets, event_map, state_res_store.get_events
+            room_id, state_sets, event_map, state_res_store.get_events
         )
     else:
         return v2.resolve_events_with_store(
-            room_version, state_sets, event_map, state_res_store
+            room_id, room_version, state_sets, event_map, state_res_store
         )
 
 
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index a2f92d9ff9..b2f9865f39 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -15,6 +15,7 @@
 
 import hashlib
 import logging
+from typing import Callable, Dict, List, Optional, Tuple
 
 from six import iteritems, iterkeys, itervalues
 
@@ -24,6 +25,7 @@ from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase
 
 logger = logging.getLogger(__name__)
 
@@ -32,13 +34,20 @@ POWER_KEY = (EventTypes.PowerLevels, "")
 
 
 @defer.inlineCallbacks
-def resolve_events_with_store(state_sets, event_map, state_map_factory):
+def resolve_events_with_store(
+    room_id: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_map_factory: Callable,
+):
     """
     Args:
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        room_id: the room we are working in
+
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
@@ -46,11 +55,11 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
 
             If None, all events will be fetched via state_map_factory.
 
-        state_map_factory(func): will be called
+        state_map_factory: will be called
             with a list of event_ids that are needed, and should return with
             a Deferred of dict of event_id to event.
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
@@ -76,6 +85,14 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
     if event_map is not None:
         state_map.update(event_map)
 
+    # everything in the state map should be in the right room
+    for event in state_map.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     # get the ids of the auth events which allow us to authenticate the
     # conflicted state, picking only from the unconflicting state.
     #
@@ -95,6 +112,13 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
     )
 
     state_map_new = yield state_map_factory(new_needed_events)
+    for event in state_map_new.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     state_map.update(state_map_new)
 
     return _resolve_with_state(
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index b327c86f40..cb77ed5b78 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -16,29 +16,40 @@
 import heapq
 import itertools
 import logging
+from typing import Dict, List, Optional, Tuple
 
 from six import iteritems, itervalues
 
 from twisted.internet import defer
 
+import synapse.state
 from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
+from synapse.events import EventBase
 
 logger = logging.getLogger(__name__)
 
 
 @defer.inlineCallbacks
-def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
+def resolve_events_with_store(
+    room_id: str,
+    room_version: str,
+    state_sets: List[Dict[Tuple[str, str], str]],
+    event_map: Optional[Dict[str, EventBase]],
+    state_res_store: "synapse.state.StateResolutionStore",
+):
     """Resolves the state using the v2 state resolution algorithm
 
     Args:
-        room_version (str): The room version
+        room_id: the room we are working in
+
+        room_version: The room version
 
-        state_sets(list): List of dicts of (type, state_key) -> event_id,
+        state_sets: List of dicts of (type, state_key) -> event_id,
             which are the different state groups to resolve.
 
-        event_map(dict[str,FrozenEvent]|None):
+        event_map:
             a dict from event_id to event, for any events that we happen to
             have in flight (eg, those currently being persisted). This will be
             used as a starting point fof finding the state we need; any missing
@@ -46,9 +57,9 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
             If None, all events will be fetched via state_res_store.
 
-        state_res_store (StateResolutionStore)
+        state_res_store:
 
-    Returns
+    Returns:
         Deferred[dict[(str, str), str]]:
             a map from (type, state_key) to event_id.
     """
@@ -84,6 +95,14 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
     )
     event_map.update(events)
 
+    # everything in the event map should be in the right room
+    for event in event_map.values():
+        if event.room_id != room_id:
+            raise Exception(
+                "Attempting to state-resolve for room %s with event %s which is in %s"
+                % (room_id, event.event_id, event.room_id,)
+            )
+
     full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map)
 
     logger.debug("%d full_conflicted_set entries", len(full_conflicted_set))
@@ -94,13 +113,14 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
     )
 
     sorted_power_events = yield _reverse_topological_power_sort(
-        power_events, event_map, state_res_store, full_conflicted_set
+        room_id, power_events, event_map, state_res_store, full_conflicted_set
     )
 
     logger.debug("sorted %d power events", len(sorted_power_events))
 
     # Now sequentially auth each one
     resolved_state = yield _iterative_auth_checks(
+        room_id,
         room_version,
         sorted_power_events,
         unconflicted_state,
@@ -121,13 +141,18 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
     pl = resolved_state.get((EventTypes.PowerLevels, ""), None)
     leftover_events = yield _mainline_sort(
-        leftover_events, pl, event_map, state_res_store
+        room_id, leftover_events, pl, event_map, state_res_store
     )
 
     logger.debug("resolving remaining events")
 
     resolved_state = yield _iterative_auth_checks(
-        room_version, leftover_events, resolved_state, event_map, state_res_store
+        room_id,
+        room_version,
+        leftover_events,
+        resolved_state,
+        event_map,
+        state_res_store,
     )
 
     logger.debug("resolved")
@@ -141,11 +166,12 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
 
 @defer.inlineCallbacks
-def _get_power_level_for_sender(event_id, event_map, state_res_store):
+def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
     """Return the power level of the sender of the given event according to
     their auth events.
 
     Args:
+        room_id (str)
         event_id (str)
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -153,11 +179,11 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
     Returns:
         Deferred[int]
     """
-    event = yield _get_event(event_id, event_map, state_res_store)
+    event = yield _get_event(room_id, event_id, event_map, state_res_store)
 
     pl = None
     for aid in event.auth_event_ids():
-        aev = yield _get_event(aid, event_map, state_res_store)
+        aev = yield _get_event(room_id, aid, event_map, state_res_store)
         if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
             pl = aev
             break
@@ -165,7 +191,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
     if pl is None:
         # Couldn't find power level. Check if they're the creator of the room
         for aid in event.auth_event_ids():
-            aev = yield _get_event(aid, event_map, state_res_store)
+            aev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.Create, ""):
                 if aev.content.get("creator") == event.sender:
                     return 100
@@ -279,7 +305,7 @@ def _is_power_event(event):
 
 @defer.inlineCallbacks
 def _add_event_and_auth_chain_to_graph(
-    graph, event_id, event_map, state_res_store, auth_diff
+    graph, room_id, event_id, event_map, state_res_store, auth_diff
 ):
     """Helper function for _reverse_topological_power_sort that add the event
     and its auth chain (that is in the auth diff) to the graph
@@ -287,6 +313,7 @@ def _add_event_and_auth_chain_to_graph(
     Args:
         graph (dict[str, set[str]]): A map from event ID to the events auth
             event IDs
+        room_id (str): the room we are working in
         event_id (str): Event to add to the graph
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -298,7 +325,7 @@ def _add_event_and_auth_chain_to_graph(
         eid = state.pop()
         graph.setdefault(eid, set())
 
-        event = yield _get_event(eid, event_map, state_res_store)
+        event = yield _get_event(room_id, eid, event_map, state_res_store)
         for aid in event.auth_event_ids():
             if aid in auth_diff:
                 if aid not in graph:
@@ -308,11 +335,14 @@ def _add_event_and_auth_chain_to_graph(
 
 
 @defer.inlineCallbacks
-def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_diff):
+def _reverse_topological_power_sort(
+    room_id, event_ids, event_map, state_res_store, auth_diff
+):
     """Returns a list of the event_ids sorted by reverse topological ordering,
     and then by power level and origin_server_ts
 
     Args:
+        room_id (str): the room we are working in
         event_ids (list[str]): The events to sort
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -325,12 +355,14 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
     graph = {}
     for event_id in event_ids:
         yield _add_event_and_auth_chain_to_graph(
-            graph, event_id, event_map, state_res_store, auth_diff
+            graph, room_id, event_id, event_map, state_res_store, auth_diff
         )
 
     event_to_pl = {}
     for event_id in graph:
-        pl = yield _get_power_level_for_sender(event_id, event_map, state_res_store)
+        pl = yield _get_power_level_for_sender(
+            room_id, event_id, event_map, state_res_store
+        )
         event_to_pl[event_id] = pl
 
     def _get_power_order(event_id):
@@ -348,12 +380,13 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
 
 @defer.inlineCallbacks
 def _iterative_auth_checks(
-    room_version, event_ids, base_state, event_map, state_res_store
+    room_id, room_version, event_ids, base_state, event_map, state_res_store
 ):
     """Sequentially apply auth checks to each event in given list, updating the
     state as it goes along.
 
     Args:
+        room_id (str)
         room_version (str)
         event_ids (list[str]): Ordered list of events to apply auth checks to
         base_state (dict[tuple[str, str], str]): The set of state to start with
@@ -370,7 +403,7 @@ def _iterative_auth_checks(
 
         auth_events = {}
         for aid in event.auth_event_ids():
-            ev = yield _get_event(aid, event_map, state_res_store)
+            ev = yield _get_event(room_id, aid, event_map, state_res_store)
 
             if ev.rejected_reason is None:
                 auth_events[(ev.type, ev.state_key)] = ev
@@ -378,7 +411,7 @@ def _iterative_auth_checks(
         for key in event_auth.auth_types_for_event(event):
             if key in resolved_state:
                 ev_id = resolved_state[key]
-                ev = yield _get_event(ev_id, event_map, state_res_store)
+                ev = yield _get_event(room_id, ev_id, event_map, state_res_store)
 
                 if ev.rejected_reason is None:
                     auth_events[key] = event_map[ev_id]
@@ -400,11 +433,14 @@ def _iterative_auth_checks(
 
 
 @defer.inlineCallbacks
-def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_store):
+def _mainline_sort(
+    room_id, event_ids, resolved_power_event_id, event_map, state_res_store
+):
     """Returns a sorted list of event_ids sorted by mainline ordering based on
     the given event resolved_power_event_id
 
     Args:
+        room_id (str): room we're working in
         event_ids (list[str]): Events to sort
         resolved_power_event_id (str): The final resolved power level event ID
         event_map (dict[str,FrozenEvent])
@@ -417,11 +453,11 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor
     pl = resolved_power_event_id
     while pl:
         mainline.append(pl)
-        pl_ev = yield _get_event(pl, event_map, state_res_store)
+        pl_ev = yield _get_event(room_id, pl, event_map, state_res_store)
         auth_events = pl_ev.auth_event_ids()
         pl = None
         for aid in auth_events:
-            ev = yield _get_event(aid, event_map, state_res_store)
+            ev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
                 pl = aid
                 break
@@ -457,6 +493,8 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         Deferred[int]
     """
 
+    room_id = event.room_id
+
     # We do an iterative search, replacing `event with the power level in its
     # auth events (if any)
     while event:
@@ -468,7 +506,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         event = None
 
         for aid in auth_events:
-            aev = yield _get_event(aid, event_map, state_res_store)
+            aev = yield _get_event(room_id, aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
                 event = aev
                 break
@@ -478,11 +516,12 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
 
 
 @defer.inlineCallbacks
-def _get_event(event_id, event_map, state_res_store):
+def _get_event(room_id, event_id, event_map, state_res_store):
     """Helper function to look up event in event_map, falling back to looking
     it up in the store
 
     Args:
+        room_id (str)
         event_id (str)
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
@@ -493,7 +532,14 @@ def _get_event(event_id, event_map, state_res_store):
     if event_id not in event_map:
         events = yield state_res_store.get_events([event_id], allow_rejected=True)
         event_map.update(events)
-    return event_map[event_id]
+    event = event_map[event_id]
+    assert event is not None
+    if event.room_id != room_id:
+        raise Exception(
+            "In state res for room %s, event %s is in %s"
+            % (room_id, event_id, event.room_id)
+        )
+    return event
 
 
 def lexicographical_topological_sort(graph, key):
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 8d3845c870..0f341d3ac3 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -58,6 +58,7 @@ class FakeEvent(object):
         self.type = type
         self.state_key = state_key
         self.content = content
+        self.room_id = ROOM_ID
 
     def to_event(self, auth_events, prev_events):
         """Given the auth_events and prev_events, convert to a Frozen Event
@@ -418,6 +419,7 @@ class StateTestCase(unittest.TestCase):
                 state_before = dict(state_at_event[prev_events[0]])
             else:
                 state_d = resolve_events_with_store(
+                    ROOM_ID,
                     RoomVersions.V2.identifier,
                     [state_at_event[n] for n in prev_events],
                     event_map=event_map,
@@ -565,6 +567,7 @@ class SimpleParamStateTestCase(unittest.TestCase):
         # Test that we correctly handle passing `None` as the event_map
 
         state_d = resolve_events_with_store(
+            ROOM_ID,
             RoomVersions.V2.identifier,
             [self.state_at_bob, self.state_at_charlie],
             event_map=None,
-- 
cgit 1.4.1


From ff773ff7243fbbe88fabff952d6faded0241c64e Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 16 Dec 2019 12:26:28 +0000
Subject: Persist auth/state events at backwards extremities when we fetch them
 (#6526)

The main point here is to make sure that the state returned by _get_state_in_room has been authed before we try to use it as state in the room.
---
 changelog.d/6526.bugfix        |   1 +
 synapse/handlers/federation.py | 243 ++++++++++++++---------------------------
 synapse/util/async_helpers.py  |   4 +-
 3 files changed, 83 insertions(+), 165 deletions(-)
 create mode 100644 changelog.d/6526.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6526.bugfix b/changelog.d/6526.bugfix
new file mode 100644
index 0000000000..53214b0748
--- /dev/null
+++ b/changelog.d/6526.bugfix
@@ -0,0 +1 @@
+Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index fd3f5ced55..f4ac0bfbc8 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -64,8 +64,7 @@ from synapse.replication.http.federation import (
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.types import UserID, get_domain_from_id
-from synapse.util import batch_iter, unwrapFirstError
-from synapse.util.async_helpers import Linearizer
+from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
 from synapse.visibility import filter_events_for_server
@@ -240,7 +239,6 @@ class FederationHandler(BaseHandler):
             return None
 
         state = None
-        auth_chain = []
 
         # Get missing pdus if necessary.
         if not pdu.internal_metadata.is_outlier():
@@ -346,7 +344,6 @@ class FederationHandler(BaseHandler):
 
                 # Calculate the state after each of the previous events, and
                 # resolve them to find the correct state at the current event.
-                auth_chains = set()
                 event_map = {event_id: pdu}
                 try:
                     # Get the state of the events we know about
@@ -370,24 +367,14 @@ class FederationHandler(BaseHandler):
                             p,
                         )
 
-                        room_version = yield self.store.get_room_version(room_id)
-
                         with nested_logging_context(p):
                             # note that if any of the missing prevs share missing state or
                             # auth events, the requests to fetch those events are deduped
                             # by the get_pdu_cache in federation_client.
-                            (
-                                remote_state,
-                                got_auth_chain,
-                            ) = yield self._get_state_for_room(
+                            (remote_state, _,) = yield self._get_state_for_room(
                                 origin, room_id, p, include_event_in_state=True
                             )
 
-                            # XXX hrm I'm not convinced that duplicate events will compare
-                            # for equality, so I'm not sure this does what the author
-                            # hoped.
-                            auth_chains.update(got_auth_chain)
-
                             remote_state_map = {
                                 (x.type, x.state_key): x.event_id for x in remote_state
                             }
@@ -396,6 +383,7 @@ class FederationHandler(BaseHandler):
                             for x in remote_state:
                                 event_map[x.event_id] = x
 
+                    room_version = yield self.store.get_room_version(room_id)
                     state_map = yield resolve_events_with_store(
                         room_id,
                         room_version,
@@ -417,7 +405,6 @@ class FederationHandler(BaseHandler):
                     event_map.update(evs)
 
                     state = [event_map[e] for e in six.itervalues(state_map)]
-                    auth_chain = list(auth_chains)
                 except Exception:
                     logger.warning(
                         "[%s %s] Error attempting to resolve state at missing "
@@ -433,9 +420,7 @@ class FederationHandler(BaseHandler):
                         affected=event_id,
                     )
 
-        yield self._process_received_pdu(
-            origin, pdu, state=state, auth_chain=auth_chain
-        )
+        yield self._process_received_pdu(origin, pdu, state=state)
 
     @defer.inlineCallbacks
     def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
@@ -638,6 +623,8 @@ class FederationHandler(BaseHandler):
             room_id (str)
             event_ids (Iterable[str])
 
+        Persists any events we don't already have as outliers.
+
         If we fail to fetch any of the events, a warning will be logged, and the event
         will be omitted from the result. Likewise, any events which turn out not to
         be in the given room.
@@ -657,27 +644,15 @@ class FederationHandler(BaseHandler):
                 room_id,
             )
 
-            room_version = yield self.store.get_room_version(room_id)
-
-            # XXX 20 requests at once? really?
-            for batch in batch_iter(missing_events, 20):
-                deferreds = [
-                    run_in_background(
-                        self.federation_client.get_pdu,
-                        destinations=[destination],
-                        event_id=e_id,
-                        room_version=room_version,
-                    )
-                    for e_id in batch
-                ]
-
-                res = yield make_deferred_yieldable(
-                    defer.DeferredList(deferreds, consumeErrors=True)
-                )
+            yield self._get_events_and_persist(
+                destination=destination, room_id=room_id, events=missing_events
+            )
 
-                for success, result in res:
-                    if success and result:
-                        fetched_events[result.event_id] = result
+            # we need to make sure we re-load from the database to get the rejected
+            # state correct.
+            fetched_events.update(
+                (yield self.store.get_events(missing_events, allow_rejected=True))
+            )
 
         # check for events which were in the wrong room.
         #
@@ -707,50 +682,24 @@ class FederationHandler(BaseHandler):
         return fetched_events
 
     @defer.inlineCallbacks
-    def _process_received_pdu(self, origin, event, state, auth_chain):
+    def _process_received_pdu(self, origin, event, state):
         """ Called when we have a new pdu. We need to do auth checks and put it
         through the StateHandler.
+
+        Args:
+            origin: server sending the event
+
+            event: event to be persisted
+
+            state: Normally None, but if we are handling a gap in the graph
+                (ie, we are missing one or more prev_events), the resolved state at the
+                event
         """
         room_id = event.room_id
         event_id = event.event_id
 
         logger.debug("[%s %s] Processing event: %s", room_id, event_id, event)
 
-        event_ids = set()
-        if state:
-            event_ids |= {e.event_id for e in state}
-        if auth_chain:
-            event_ids |= {e.event_id for e in auth_chain}
-
-        seen_ids = yield self.store.have_seen_events(event_ids)
-
-        if state and auth_chain is not None:
-            # If we have any state or auth_chain given to us by the replication
-            # layer, then we should handle them (if we haven't before.)
-
-            event_infos = []
-
-            for e in itertools.chain(auth_chain, state):
-                if e.event_id in seen_ids:
-                    continue
-                e.internal_metadata.outlier = True
-                auth_ids = e.auth_event_ids()
-                auth = {
-                    (e.type, e.state_key): e
-                    for e in auth_chain
-                    if e.event_id in auth_ids or e.type == EventTypes.Create
-                }
-                event_infos.append(_NewEventInfo(event=e, auth_events=auth))
-                seen_ids.add(e.event_id)
-
-            logger.info(
-                "[%s %s] persisting newly-received auth/state events %s",
-                room_id,
-                event_id,
-                [e.event.event_id for e in event_infos],
-            )
-            yield self._handle_new_events(origin, event_infos)
-
         try:
             context = yield self._handle_new_event(origin, event, state=state)
         except AuthError as e:
@@ -806,8 +755,6 @@ class FederationHandler(BaseHandler):
         if dest == self.server_name:
             raise SynapseError(400, "Can't backfill from self.")
 
-        room_version = yield self.store.get_room_version(room_id)
-
         events = yield self.federation_client.backfill(
             dest, room_id, limit=limit, extremities=extremities
         )
@@ -836,6 +783,9 @@ class FederationHandler(BaseHandler):
 
         event_ids = set(e.event_id for e in events)
 
+        # build a list of events whose prev_events weren't in the batch.
+        # (XXX: this will include events whose prev_events we already have; that doesn't
+        # sound right?)
         edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids]
 
         logger.info("backfill: Got %d events with %d edges", len(events), len(edges))
@@ -864,95 +814,11 @@ class FederationHandler(BaseHandler):
         auth_events.update(
             {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
         )
-        missing_auth = required_auth - set(auth_events)
-        failed_to_fetch = set()
-
-        # Try and fetch any missing auth events from both DB and remote servers.
-        # We repeatedly do this until we stop finding new auth events.
-        while missing_auth - failed_to_fetch:
-            logger.info("Missing auth for backfill: %r", missing_auth)
-            ret_events = yield self.store.get_events(missing_auth - failed_to_fetch)
-            auth_events.update(ret_events)
-
-            required_auth.update(
-                a_id for event in ret_events.values() for a_id in event.auth_event_ids()
-            )
-            missing_auth = required_auth - set(auth_events)
 
-            if missing_auth - failed_to_fetch:
-                logger.info(
-                    "Fetching missing auth for backfill: %r",
-                    missing_auth - failed_to_fetch,
-                )
-
-                results = yield make_deferred_yieldable(
-                    defer.gatherResults(
-                        [
-                            run_in_background(
-                                self.federation_client.get_pdu,
-                                [dest],
-                                event_id,
-                                room_version=room_version,
-                                outlier=True,
-                                timeout=10000,
-                            )
-                            for event_id in missing_auth - failed_to_fetch
-                        ],
-                        consumeErrors=True,
-                    )
-                ).addErrback(unwrapFirstError)
-                auth_events.update({a.event_id: a for a in results if a})
-                required_auth.update(
-                    a_id
-                    for event in results
-                    if event
-                    for a_id in event.auth_event_ids()
-                )
-                missing_auth = required_auth - set(auth_events)
-
-                failed_to_fetch = missing_auth - set(auth_events)
-
-        seen_events = yield self.store.have_seen_events(
-            set(auth_events.keys()) | set(state_events.keys())
-        )
-
-        # We now have a chunk of events plus associated state and auth chain to
-        # persist. We do the persistence in two steps:
-        #   1. Auth events and state get persisted as outliers, plus the
-        #      backward extremities get persisted (as non-outliers).
-        #   2. The rest of the events in the chunk get persisted one by one, as
-        #      each one depends on the previous event for its state.
-        #
-        # The important thing is that events in the chunk get persisted as
-        # non-outliers, including when those events are also in the state or
-        # auth chain. Caution must therefore be taken to ensure that they are
-        # not accidentally marked as outliers.
-
-        # Step 1a: persist auth events that *don't* appear in the chunk
         ev_infos = []
-        for a in auth_events.values():
-            # We only want to persist auth events as outliers that we haven't
-            # seen and aren't about to persist as part of the backfilled chunk.
-            if a.event_id in seen_events or a.event_id in event_map:
-                continue
 
-            a.internal_metadata.outlier = True
-            ev_infos.append(
-                _NewEventInfo(
-                    event=a,
-                    auth_events={
-                        (
-                            auth_events[a_id].type,
-                            auth_events[a_id].state_key,
-                        ): auth_events[a_id]
-                        for a_id in a.auth_event_ids()
-                        if a_id in auth_events
-                    },
-                )
-            )
-
-        # Step 1b: persist the events in the chunk we fetched state for (i.e.
-        # the backwards extremities) as non-outliers.
+        # Step 1: persist the events in the chunk we fetched state for (i.e.
+        # the backwards extremities), with custom auth events and state
         for e_id in events_to_state:
             # For paranoia we ensure that these events are marked as
             # non-outliers
@@ -1194,6 +1060,57 @@ class FederationHandler(BaseHandler):
 
         return False
 
+    @defer.inlineCallbacks
+    def _get_events_and_persist(
+        self, destination: str, room_id: str, events: Iterable[str]
+    ):
+        """Fetch the given events from a server, and persist them as outliers.
+
+        Logs a warning if we can't find the given event.
+        """
+
+        room_version = yield self.store.get_room_version(room_id)
+
+        event_infos = []
+
+        async def get_event(event_id: str):
+            with nested_logging_context(event_id):
+                try:
+                    event = await self.federation_client.get_pdu(
+                        [destination], event_id, room_version, outlier=True,
+                    )
+                    if event is None:
+                        logger.warning(
+                            "Server %s didn't return event %s", destination, event_id,
+                        )
+                        return
+
+                    # recursively fetch the auth events for this event
+                    auth_events = await self._get_events_from_store_or_dest(
+                        destination, room_id, event.auth_event_ids()
+                    )
+                    auth = {}
+                    for auth_event_id in event.auth_event_ids():
+                        ae = auth_events.get(auth_event_id)
+                        if ae:
+                            auth[(ae.type, ae.state_key)] = ae
+
+                    event_infos.append(_NewEventInfo(event, None, auth))
+
+                except Exception as e:
+                    logger.warning(
+                        "Error fetching missing state/auth event %s: %s %s",
+                        event_id,
+                        type(e),
+                        e,
+                    )
+
+        yield concurrently_execute(get_event, events, 5)
+
+        yield self._handle_new_events(
+            destination, event_infos,
+        )
+
     def _sanity_check_event(self, ev):
         """
         Do some early sanity checks of a received event
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 5c4de2e69f..04b6abdc24 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -140,8 +140,8 @@ def concurrently_execute(func, args, limit):
 
     Args:
         func (func): Function to execute, should return a deferred or coroutine.
-        args (list): List of arguments to pass to func, each invocation of func
-            gets a signle argument.
+        args (Iterable): List of arguments to pass to func, each invocation of func
+            gets a single argument.
         limit (int): Maximum number of conccurent executions.
 
     Returns:
-- 
cgit 1.4.1


From bbb75ff6eeda25e2f0eebd0a6639efd48b4dbb3c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 16 Dec 2019 13:14:37 +0000
Subject: Exclude rejected state events when calculating state at backwards
 extrems (#6527)

This fixes a weird bug where, if you were determined enough, you could end up with a rejected event forming part of the state at a backwards-extremity. Authing that backwards extrem would then lead to us trying to pull the rejected event from the db (with allow_rejected=False), which would fail with a 404.
---
 changelog.d/6527.bugfix        | 1 +
 synapse/handlers/federation.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6527.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6527.bugfix b/changelog.d/6527.bugfix
new file mode 100644
index 0000000000..53214b0748
--- /dev/null
+++ b/changelog.d/6527.bugfix
@@ -0,0 +1 @@
+Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs.
\ No newline at end of file
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index f4ac0bfbc8..abe02907b9 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -606,7 +606,7 @@ class FederationHandler(BaseHandler):
             remote_event = event_map.get(event_id)
             if not remote_event:
                 raise Exception("Unable to get missing prev_event %s" % (event_id,))
-            if remote_event.is_state():
+            if remote_event.is_state() and remote_event.rejected_reason is None:
                 remote_state.append(remote_event)
 
         auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
-- 
cgit 1.4.1


From 284e690aa0e37c0d4d7516fc2f02b2b2fede4601 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 16 Dec 2019 14:56:05 +0000
Subject: Update changelog.d/6553.bugfix

Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
 changelog.d/6553.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6553.bugfix b/changelog.d/6553.bugfix
index e8f55e2a76..4fe576b873 100644
--- a/changelog.d/6553.bugfix
+++ b/changelog.d/6553.bugfix
@@ -1 +1 @@
-Fix a bug causing responses to the `/context` client endpoint to not use the pruned version of the event the request is for.
+Fix a bug causing responses to the `/context` client endpoint to not use the pruned version of the event.
-- 
cgit 1.4.1


From bfb95654c97a8d3aa164eff96ecc13755c1c326d Mon Sep 17 00:00:00 2001
From: Will Hunt 
Date: Mon, 16 Dec 2019 16:11:55 +0000
Subject: Add option to allow profile queries without sharing a room (#6523)

---
 changelog.d/6523.feature             |  1 +
 docs/sample_config.yaml              |  7 +++++++
 synapse/config/server.py             | 13 +++++++++++++
 synapse/handlers/profile.py          |  6 +++++-
 tests/rest/client/v1/test_profile.py |  2 ++
 5 files changed, 28 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6523.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6523.feature b/changelog.d/6523.feature
new file mode 100644
index 0000000000..798fa143df
--- /dev/null
+++ b/changelog.d/6523.feature
@@ -0,0 +1 @@
+Add option `limit_profile_requests_to_users_who_share_rooms` to prevent requirement of a local user sharing a room with another user to query their profile information.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 4d44e631d1..1787248f53 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -54,6 +54,13 @@ pid_file: DATADIR/homeserver.pid
 #
 #require_auth_for_profile_requests: true
 
+# Uncomment to require a user to share a room with another user in order
+# to retrieve their profile information. Only checked on Client-Server
+# requests. Profile requests from other servers should be checked by the
+# requesting server. Defaults to 'false'.
+#
+#limit_profile_requests_to_users_who_share_rooms: true
+
 # If set to 'true', removes the need for authentication to access the server's
 # public rooms directory through the client API, meaning that anyone can
 # query the room directory. Defaults to 'false'.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 50af858c76..38f6ff9edc 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -102,6 +102,12 @@ class ServerConfig(Config):
             "require_auth_for_profile_requests", False
         )
 
+        # Whether to require sharing a room with a user to retrieve their
+        # profile data
+        self.limit_profile_requests_to_users_who_share_rooms = config.get(
+            "limit_profile_requests_to_users_who_share_rooms", False,
+        )
+
         if "restrict_public_rooms_to_local_users" in config and (
             "allow_public_rooms_without_auth" in config
             or "allow_public_rooms_over_federation" in config
@@ -621,6 +627,13 @@ class ServerConfig(Config):
         #
         #require_auth_for_profile_requests: true
 
+        # Uncomment to require a user to share a room with another user in order
+        # to retrieve their profile information. Only checked on Client-Server
+        # requests. Profile requests from other servers should be checked by the
+        # requesting server. Defaults to 'false'.
+        #
+        #limit_profile_requests_to_users_who_share_rooms: true
+
         # If set to 'true', removes the need for authentication to access the server's
         # public rooms directory through the client API, meaning that anyone can
         # query the room directory. Defaults to 'false'.
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 1e5a4613c9..f9579d69ee 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -295,12 +295,16 @@ class BaseProfileHandler(BaseHandler):
                 be found to be in any room the server is in, and therefore the query
                 is denied.
         """
+
         # Implementation of MSC1301: don't allow looking up profiles if the
         # requester isn't in the same room as the target. We expect requester to
         # be None when this function is called outside of a profile query, e.g.
         # when building a membership event. In this case, we must allow the
         # lookup.
-        if not self.hs.config.require_auth_for_profile_requests or not requester:
+        if (
+            not self.hs.config.limit_profile_requests_to_users_who_share_rooms
+            or not requester
+        ):
             return
 
         # Always allow the user to query their own profile.
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index 12c5e95cb5..8df58b4a63 100644
--- a/tests/rest/client/v1/test_profile.py
+++ b/tests/rest/client/v1/test_profile.py
@@ -237,6 +237,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
 
         config = self.default_config()
         config["require_auth_for_profile_requests"] = True
+        config["limit_profile_requests_to_users_who_share_rooms"] = True
         self.hs = self.setup_test_homeserver(config=config)
 
         return self.hs
@@ -309,6 +310,7 @@ class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
         config = self.default_config()
         config["require_auth_for_profile_requests"] = True
+        config["limit_profile_requests_to_users_who_share_rooms"] = True
         self.hs = self.setup_test_homeserver(config=config)
 
         return self.hs
-- 
cgit 1.4.1


From 3fbe5b7ec3abd2864d8a64893fa494e9651c430a Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 16 Dec 2019 16:59:32 +0000
Subject: Add auth events as per spec. (#6556)

Previously we tried to be clever and filter out some unnecessary event
IDs to keep the auth chain small, but that had some annoying
interactions with state res v2 so we stop doing that for now.
---
 changelog.d/6556.bugfix |   1 +
 synapse/api/auth.py     | 101 ++++++++++++++++--------------------------------
 2 files changed, 35 insertions(+), 67 deletions(-)
 create mode 100644 changelog.d/6556.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6556.bugfix b/changelog.d/6556.bugfix
new file mode 100644
index 0000000000..e75639f5b4
--- /dev/null
+++ b/changelog.d/6556.bugfix
@@ -0,0 +1 @@
+Fix a cause of state resets in room versions 2 onwards.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 5d0b7d2801..9fd52a8c77 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, Tuple
 
 from six import itervalues
 
@@ -25,13 +26,7 @@ from twisted.internet import defer
 import synapse.logging.opentracing as opentracing
 import synapse.types
 from synapse import event_auth
-from synapse.api.constants import (
-    EventTypes,
-    JoinRules,
-    LimitBlockingTypes,
-    Membership,
-    UserTypes,
-)
+from synapse.api.constants import EventTypes, LimitBlockingTypes, Membership, UserTypes
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -513,71 +508,43 @@ class Auth(object):
         """
         return self.store.is_server_admin(user)
 
-    @defer.inlineCallbacks
-    def compute_auth_events(self, event, current_state_ids, for_verification=False):
-        if event.type == EventTypes.Create:
-            return []
-
-        auth_ids = []
-
-        key = (EventTypes.PowerLevels, "")
-        power_level_event_id = current_state_ids.get(key)
-
-        if power_level_event_id:
-            auth_ids.append(power_level_event_id)
-
-        key = (EventTypes.JoinRules, "")
-        join_rule_event_id = current_state_ids.get(key)
+    def compute_auth_events(
+        self,
+        event,
+        current_state_ids: Dict[Tuple[str, str], str],
+        for_verification: bool = False,
+    ):
+        """Given an event and current state return the list of event IDs used
+        to auth an event.
 
-        key = (EventTypes.Member, event.sender)
-        member_event_id = current_state_ids.get(key)
+        If `for_verification` is False then only return auth events that
+        should be added to the event's `auth_events`.
 
-        key = (EventTypes.Create, "")
-        create_event_id = current_state_ids.get(key)
-        if create_event_id:
-            auth_ids.append(create_event_id)
+        Returns:
+            defer.Deferred(list[str]): List of event IDs.
+        """
 
-        if join_rule_event_id:
-            join_rule_event = yield self.store.get_event(join_rule_event_id)
-            join_rule = join_rule_event.content.get("join_rule")
-            is_public = join_rule == JoinRules.PUBLIC if join_rule else False
-        else:
-            is_public = False
+        if event.type == EventTypes.Create:
+            return defer.succeed([])
+
+        # Currently we ignore the `for_verification` flag even though there are
+        # some situations where we can drop particular auth events when adding
+        # to the event's `auth_events` (e.g. joins pointing to previous joins
+        # when room is publically joinable). Dropping event IDs has the
+        # advantage that the auth chain for the room grows slower, but we use
+        # the auth chain in state resolution v2 to order events, which means
+        # care must be taken if dropping events to ensure that it doesn't
+        # introduce undesirable "state reset" behaviour.
+        #
+        # All of which sounds a bit tricky so we don't bother for now.
 
-        if event.type == EventTypes.Member:
-            e_type = event.content["membership"]
-            if e_type in [Membership.JOIN, Membership.INVITE]:
-                if join_rule_event_id:
-                    auth_ids.append(join_rule_event_id)
+        auth_ids = []
+        for etype, state_key in event_auth.auth_types_for_event(event):
+            auth_ev_id = current_state_ids.get((etype, state_key))
+            if auth_ev_id:
+                auth_ids.append(auth_ev_id)
 
-            if e_type == Membership.JOIN:
-                if member_event_id and not is_public:
-                    auth_ids.append(member_event_id)
-            else:
-                if member_event_id:
-                    auth_ids.append(member_event_id)
-
-                if for_verification:
-                    key = (EventTypes.Member, event.state_key)
-                    existing_event_id = current_state_ids.get(key)
-                    if existing_event_id:
-                        auth_ids.append(existing_event_id)
-
-            if e_type == Membership.INVITE:
-                if "third_party_invite" in event.content:
-                    key = (
-                        EventTypes.ThirdPartyInvite,
-                        event.content["third_party_invite"]["signed"]["token"],
-                    )
-                    third_party_invite_id = current_state_ids.get(key)
-                    if third_party_invite_id:
-                        auth_ids.append(third_party_invite_id)
-        elif member_event_id:
-            member_event = yield self.store.get_event(member_event_id)
-            if member_event.content["membership"] == Membership.JOIN:
-                auth_ids.append(member_event.event_id)
-
-        return auth_ids
+        return defer.succeed(auth_ids)
 
     @defer.inlineCallbacks
     def check_can_change_room_list(self, room_id, user):
-- 
cgit 1.4.1


From 5ca2cfadc36359c1203ea38c2d1953ce0e7ced2f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 16 Dec 2019 16:59:32 +0000
Subject: Add auth events as per spec. (#6556)

Previously we tried to be clever and filter out some unnecessary event
IDs to keep the auth chain small, but that had some annoying
interactions with state res v2 so we stop doing that for now.
---
 changelog.d/6556.bugfix |   1 +
 synapse/api/auth.py     | 101 ++++++++++++++++--------------------------------
 2 files changed, 35 insertions(+), 67 deletions(-)
 create mode 100644 changelog.d/6556.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6556.bugfix b/changelog.d/6556.bugfix
new file mode 100644
index 0000000000..e75639f5b4
--- /dev/null
+++ b/changelog.d/6556.bugfix
@@ -0,0 +1 @@
+Fix a cause of state resets in room versions 2 onwards.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 5d0b7d2801..9fd52a8c77 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, Tuple
 
 from six import itervalues
 
@@ -25,13 +26,7 @@ from twisted.internet import defer
 import synapse.logging.opentracing as opentracing
 import synapse.types
 from synapse import event_auth
-from synapse.api.constants import (
-    EventTypes,
-    JoinRules,
-    LimitBlockingTypes,
-    Membership,
-    UserTypes,
-)
+from synapse.api.constants import EventTypes, LimitBlockingTypes, Membership, UserTypes
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -513,71 +508,43 @@ class Auth(object):
         """
         return self.store.is_server_admin(user)
 
-    @defer.inlineCallbacks
-    def compute_auth_events(self, event, current_state_ids, for_verification=False):
-        if event.type == EventTypes.Create:
-            return []
-
-        auth_ids = []
-
-        key = (EventTypes.PowerLevels, "")
-        power_level_event_id = current_state_ids.get(key)
-
-        if power_level_event_id:
-            auth_ids.append(power_level_event_id)
-
-        key = (EventTypes.JoinRules, "")
-        join_rule_event_id = current_state_ids.get(key)
+    def compute_auth_events(
+        self,
+        event,
+        current_state_ids: Dict[Tuple[str, str], str],
+        for_verification: bool = False,
+    ):
+        """Given an event and current state return the list of event IDs used
+        to auth an event.
 
-        key = (EventTypes.Member, event.sender)
-        member_event_id = current_state_ids.get(key)
+        If `for_verification` is False then only return auth events that
+        should be added to the event's `auth_events`.
 
-        key = (EventTypes.Create, "")
-        create_event_id = current_state_ids.get(key)
-        if create_event_id:
-            auth_ids.append(create_event_id)
+        Returns:
+            defer.Deferred(list[str]): List of event IDs.
+        """
 
-        if join_rule_event_id:
-            join_rule_event = yield self.store.get_event(join_rule_event_id)
-            join_rule = join_rule_event.content.get("join_rule")
-            is_public = join_rule == JoinRules.PUBLIC if join_rule else False
-        else:
-            is_public = False
+        if event.type == EventTypes.Create:
+            return defer.succeed([])
+
+        # Currently we ignore the `for_verification` flag even though there are
+        # some situations where we can drop particular auth events when adding
+        # to the event's `auth_events` (e.g. joins pointing to previous joins
+        # when room is publically joinable). Dropping event IDs has the
+        # advantage that the auth chain for the room grows slower, but we use
+        # the auth chain in state resolution v2 to order events, which means
+        # care must be taken if dropping events to ensure that it doesn't
+        # introduce undesirable "state reset" behaviour.
+        #
+        # All of which sounds a bit tricky so we don't bother for now.
 
-        if event.type == EventTypes.Member:
-            e_type = event.content["membership"]
-            if e_type in [Membership.JOIN, Membership.INVITE]:
-                if join_rule_event_id:
-                    auth_ids.append(join_rule_event_id)
+        auth_ids = []
+        for etype, state_key in event_auth.auth_types_for_event(event):
+            auth_ev_id = current_state_ids.get((etype, state_key))
+            if auth_ev_id:
+                auth_ids.append(auth_ev_id)
 
-            if e_type == Membership.JOIN:
-                if member_event_id and not is_public:
-                    auth_ids.append(member_event_id)
-            else:
-                if member_event_id:
-                    auth_ids.append(member_event_id)
-
-                if for_verification:
-                    key = (EventTypes.Member, event.state_key)
-                    existing_event_id = current_state_ids.get(key)
-                    if existing_event_id:
-                        auth_ids.append(existing_event_id)
-
-            if e_type == Membership.INVITE:
-                if "third_party_invite" in event.content:
-                    key = (
-                        EventTypes.ThirdPartyInvite,
-                        event.content["third_party_invite"]["signed"]["token"],
-                    )
-                    third_party_invite_id = current_state_ids.get(key)
-                    if third_party_invite_id:
-                        auth_ids.append(third_party_invite_id)
-        elif member_event_id:
-            member_event = yield self.store.get_event(member_event_id)
-            if member_event.content["membership"] == Membership.JOIN:
-                auth_ids.append(member_event.event_id)
-
-        return auth_ids
+        return defer.succeed(auth_ids)
 
     @defer.inlineCallbacks
     def check_can_change_room_list(self, room_id, user):
-- 
cgit 1.4.1


From 02553901ce94461c6f140efc804443069b97f401 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 17 Dec 2019 11:44:32 +0000
Subject: Remove unused `get_pagination_rows` methods. (#6557)

Remove unused get_pagination_rows methods
---
 changelog.d/6557.misc            |  1 +
 synapse/handlers/account_data.py |  3 ---
 synapse/handlers/room.py         | 12 ------------
 synapse/handlers/typing.py       |  3 ---
 4 files changed, 1 insertion(+), 18 deletions(-)
 create mode 100644 changelog.d/6557.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6557.misc b/changelog.d/6557.misc
new file mode 100644
index 0000000000..80e7eaedb8
--- /dev/null
+++ b/changelog.d/6557.misc
@@ -0,0 +1 @@
+Remove unused `get_pagination_rows` methods from `EventSource` classes.
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 20ec1ca01b..a8d3fbc6de 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -50,6 +50,3 @@ class AccountDataEventSource(object):
                 )
 
         return results, current_stream_id
-
-    async def get_pagination_rows(self, user, config, key):
-        return [], config.to_id
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 22768e97ff..2d7925547d 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1008,15 +1008,3 @@ class RoomEventSource(object):
 
     def get_current_key_for_room(self, room_id):
         return self.store.get_room_events_max_id(room_id)
-
-    @defer.inlineCallbacks
-    def get_pagination_rows(self, user, config, key):
-        events, next_key = yield self.store.paginate_room_events(
-            room_id=key,
-            from_key=config.from_key,
-            to_key=config.to_key,
-            direction=config.direction,
-            limit=config.limit,
-        )
-
-        return (events, next_key)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 6f78454322..b635c339ed 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -317,6 +317,3 @@ class TypingNotificationEventSource(object):
 
     def get_current_key(self):
         return self.get_typing_handler()._latest_room_serial
-
-    def get_pagination_rows(self, user, pagination_config, key):
-        return [], pagination_config.from_key
-- 
cgit 1.4.1


From 50294225300602ec91712963736a523738195a01 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 17 Dec 2019 15:06:08 +0000
Subject: Fix bug where we added duplicate event IDs as auth_events (#6560)

---
 changelog.d/6560.bugfix |  1 +
 synapse/event_auth.py   | 15 ++++++++-------
 2 files changed, 9 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6560.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6560.bugfix b/changelog.d/6560.bugfix
new file mode 100644
index 0000000000..e75639f5b4
--- /dev/null
+++ b/changelog.d/6560.bugfix
@@ -0,0 +1 @@
+Fix a cause of state resets in room versions 2 onwards.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index d184b0273b..350ed9351f 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Set, Tuple
 
 from canonicaljson import encode_canonical_json
 from signedjson.key import decode_verify_key_bytes
@@ -637,7 +638,7 @@ def get_public_keys(invite_event):
     return public_keys
 
 
-def auth_types_for_event(event):
+def auth_types_for_event(event) -> Set[Tuple[str]]:
     """Given an event, return a list of (EventType, StateKey) that may be
     needed to auth the event. The returned list may be a superset of what
     would actually be required depending on the full state of the room.
@@ -646,20 +647,20 @@ def auth_types_for_event(event):
     actually auth the event.
     """
     if event.type == EventTypes.Create:
-        return []
+        return set()
 
-    auth_types = [
+    auth_types = {
         (EventTypes.PowerLevels, ""),
         (EventTypes.Member, event.sender),
         (EventTypes.Create, ""),
-    ]
+    }
 
     if event.type == EventTypes.Member:
         membership = event.content["membership"]
         if membership in [Membership.JOIN, Membership.INVITE]:
-            auth_types.append((EventTypes.JoinRules, ""))
+            auth_types.add((EventTypes.JoinRules, ""))
 
-        auth_types.append((EventTypes.Member, event.state_key))
+        auth_types.add((EventTypes.Member, event.state_key))
 
         if membership == Membership.INVITE:
             if "third_party_invite" in event.content:
@@ -667,6 +668,6 @@ def auth_types_for_event(event):
                     EventTypes.ThirdPartyInvite,
                     event.content["third_party_invite"]["signed"]["token"],
                 )
-                auth_types.append(key)
+                auth_types.add(key)
 
     return auth_types
-- 
cgit 1.4.1


From 2284eb3a533a2df04784df08da28e67d6588a5ea Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 18 Dec 2019 10:45:12 +0000
Subject: Add database config class (#6513)

This encapsulates config for a given database and is the way to get new
connections.
---
 changelog.d/6513.misc                          |  1 +
 scripts-dev/update_database                    |  9 +--
 scripts/synapse_port_db                        | 58 ++++++++-----------
 synapse/config/database.py                     | 78 ++++++++++++++++++++------
 synapse/handlers/presence.py                   |  2 +-
 synapse/server.py                              | 41 ++------------
 synapse/storage/_base.py                       |  2 +-
 synapse/storage/data_stores/__init__.py        | 40 ++++++++++---
 synapse/storage/data_stores/main/client_ips.py |  2 +-
 synapse/storage/database.py                    | 45 ++++++++++++++-
 synapse/storage/engines/sqlite.py              | 16 +++++-
 synapse/storage/prepare_database.py            |  7 +--
 tests/handlers/test_typing.py                  | 39 ++++++-------
 tests/replication/slave/storage/_base.py       |  6 +-
 tests/server.py                                | 55 +++++++++---------
 tests/storage/test_appservice.py               | 37 ++++++++----
 tests/storage/test_base.py                     | 14 +++--
 tests/storage/test_registration.py             |  1 -
 tests/utils.py                                 | 43 +++++---------
 19 files changed, 287 insertions(+), 209 deletions(-)
 create mode 100644 changelog.d/6513.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6513.misc b/changelog.d/6513.misc
new file mode 100644
index 0000000000..36700f5657
--- /dev/null
+++ b/changelog.d/6513.misc
@@ -0,0 +1 @@
+Remove all assumptions of there being a single phyiscal DB apart from the `synapse.config`.
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
index 23017c21f8..1d62f0403a 100755
--- a/scripts-dev/update_database
+++ b/scripts-dev/update_database
@@ -26,7 +26,6 @@ from synapse.config.homeserver import HomeServerConfig
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.server import HomeServer
 from synapse.storage import DataStore
-from synapse.storage.prepare_database import prepare_database
 
 logger = logging.getLogger("update_database")
 
@@ -77,12 +76,8 @@ if __name__ == "__main__":
     # Instantiate and initialise the homeserver object.
     hs = MockHomeserver(config)
 
-    db_conn = hs.get_db_conn()
-    # Update the database to the latest schema.
-    prepare_database(db_conn, hs.database_engine, config=config)
-    db_conn.commit()
-
-    # setup instantiates the store within the homeserver object.
+    # Setup instantiates the store within the homeserver object and updates the
+    # DB.
     hs.setup()
     store = hs.get_datastore()
 
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index e393a9b2f7..5b5368988c 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -30,6 +30,7 @@ import yaml
 from twisted.enterprise import adbapi
 from twisted.internet import defer, reactor
 
+from synapse.config.database import DatabaseConnectionConfig
 from synapse.config.homeserver import HomeServerConfig
 from synapse.logging.context import PreserveLoggingContext
 from synapse.storage._base import LoggingTransaction
@@ -55,7 +56,7 @@ from synapse.storage.data_stores.main.stats import StatsStore
 from synapse.storage.data_stores.main.user_directory import (
     UserDirectoryBackgroundUpdateStore,
 )
-from synapse.storage.database import Database
+from synapse.storage.database import Database, make_conn
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 from synapse.util import Clock
@@ -165,23 +166,17 @@ class Store(
 
 
 class MockHomeserver:
-    def __init__(self, config, database_engine, db_conn, db_pool):
-        self.database_engine = database_engine
-        self.db_conn = db_conn
-        self.db_pool = db_pool
+    def __init__(self, config):
         self.clock = Clock(reactor)
         self.config = config
         self.hostname = config.server_name
 
-    def get_db_conn(self):
-        return self.db_conn
-
-    def get_db_pool(self):
-        return self.db_pool
-
     def get_clock(self):
         return self.clock
 
+    def get_reactor(self):
+        return reactor
+
 
 class Porter(object):
     def __init__(self, **kwargs):
@@ -445,45 +440,36 @@ class Porter(object):
             else:
                 return
 
-    def setup_db(self, db_config, database_engine):
-        db_conn = database_engine.module.connect(
-            **{
-                k: v
-                for k, v in db_config.get("args", {}).items()
-                if not k.startswith("cp_")
-            }
-        )
-
-        prepare_database(db_conn, database_engine, config=None)
+    def setup_db(self, db_config: DatabaseConnectionConfig, engine):
+        db_conn = make_conn(db_config, engine)
+        prepare_database(db_conn, engine, config=None)
 
         db_conn.commit()
 
         return db_conn
 
     @defer.inlineCallbacks
-    def build_db_store(self, config):
+    def build_db_store(self, db_config: DatabaseConnectionConfig):
         """Builds and returns a database store using the provided configuration.
 
         Args:
-            config: The database configuration, i.e. a dict following the structure of
-                the "database" section of Synapse's configuration file.
+            config: The database configuration
 
         Returns:
             The built Store object.
         """
-        engine = create_engine(config)
-
-        self.progress.set_state("Preparing %s" % config["name"])
-        conn = self.setup_db(config, engine)
+        self.progress.set_state("Preparing %s" % db_config.config["name"])
 
-        db_pool = adbapi.ConnectionPool(config["name"], **config["args"])
+        engine = create_engine(db_config.config)
+        conn = self.setup_db(db_config, engine)
 
-        hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
+        hs = MockHomeserver(self.hs_config)
 
-        store = Store(Database(hs), conn, hs)
+        store = Store(Database(hs, db_config, engine), conn, hs)
 
         yield store.db.runInteraction(
-            "%s_engine.check_database" % config["name"], engine.check_database,
+            "%s_engine.check_database" % db_config.config["name"],
+            engine.check_database,
         )
 
         return store
@@ -509,7 +495,11 @@ class Porter(object):
     @defer.inlineCallbacks
     def run(self):
         try:
-            self.sqlite_store = yield self.build_db_store(self.sqlite_config)
+            self.sqlite_store = yield self.build_db_store(
+                DatabaseConnectionConfig(
+                    "master", self.sqlite_config, data_stores=["main"]
+                )
+            )
 
             # Check if all background updates are done, abort if not.
             updates_complete = (
@@ -524,7 +514,7 @@ class Porter(object):
                 defer.returnValue(None)
 
             self.postgres_store = yield self.build_db_store(
-                self.hs_config.database_config
+                self.hs_config.get_single_database()
             )
 
             yield self.run_background_updates_on_postgres()
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 0e2509f0b1..5f2f3c7cfd 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -12,12 +12,43 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
 import os
 from textwrap import indent
+from typing import List
 
 import yaml
 
-from ._base import Config
+from synapse.config._base import Config, ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+class DatabaseConnectionConfig:
+    """Contains the connection config for a particular database.
+
+    Args:
+        name: A label for the database, used for logging.
+        db_config: The config for a particular database, as per `database`
+            section of main config. Has two fields: `name` for database
+            module name, and `args` for the args to give to the database
+            connector.
+        data_stores: The list of data stores that should be provisioned on the
+            database.
+    """
+
+    def __init__(self, name: str, db_config: dict, data_stores: List[str]):
+        if db_config["name"] not in ("sqlite3", "psycopg2"):
+            raise ConfigError("Unsupported database type %r" % (db_config["name"],))
+
+        if db_config["name"] == "sqlite3":
+            db_config.setdefault("args", {}).update(
+                {"cp_min": 1, "cp_max": 1, "check_same_thread": False}
+            )
+
+        self.name = name
+        self.config = db_config
+        self.data_stores = data_stores
 
 
 class DatabaseConfig(Config):
@@ -26,20 +57,14 @@ class DatabaseConfig(Config):
     def read_config(self, config, **kwargs):
         self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K"))
 
-        self.database_config = config.get("database")
+        database_config = config.get("database")
 
-        if self.database_config is None:
-            self.database_config = {"name": "sqlite3", "args": {}}
+        if database_config is None:
+            database_config = {"name": "sqlite3", "args": {}}
 
-        name = self.database_config.get("name", None)
-        if name == "psycopg2":
-            pass
-        elif name == "sqlite3":
-            self.database_config.setdefault("args", {}).update(
-                {"cp_min": 1, "cp_max": 1, "check_same_thread": False}
-            )
-        else:
-            raise RuntimeError("Unsupported database type '%s'" % (name,))
+        self.databases = [
+            DatabaseConnectionConfig("master", database_config, data_stores=["main"])
+        ]
 
         self.set_databasepath(config.get("database_path"))
 
@@ -76,11 +101,24 @@ class DatabaseConfig(Config):
         self.set_databasepath(args.database_path)
 
     def set_databasepath(self, database_path):
+        if database_path is None:
+            return
+
         if database_path != ":memory:":
             database_path = self.abspath(database_path)
-        if self.database_config.get("name", None) == "sqlite3":
-            if database_path is not None:
-                self.database_config["args"]["database"] = database_path
+
+        # We only support setting a database path if we have a single sqlite3
+        # database.
+        if len(self.databases) != 1:
+            raise ConfigError("Cannot specify 'database_path' with multiple databases")
+
+        database = self.get_single_database()
+        if database.config["name"] != "sqlite3":
+            # We don't raise here as we haven't done so before for this case.
+            logger.warn("Ignoring 'database_path' for non-sqlite3 database")
+            return
+
+        database.config["args"]["database"] = database_path
 
     @staticmethod
     def add_arguments(parser):
@@ -91,3 +129,11 @@ class DatabaseConfig(Config):
             metavar="SQLITE_DATABASE_PATH",
             help="The path to a sqlite database to use.",
         )
+
+    def get_single_database(self) -> DatabaseConnectionConfig:
+        """Returns the database if there is only one, useful for e.g. tests
+        """
+        if len(self.databases) != 1:
+            raise Exception("More than one database exists")
+
+        return self.databases[0]
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index eda15bc623..240c4add12 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -230,7 +230,7 @@ class PresenceHandler(object):
         is some spurious presence changes that will self-correct.
         """
         # If the DB pool has already terminated, don't try updating
-        if not self.hs.get_db_pool().running:
+        if not self.store.database.is_running():
             return
 
         logger.info(
diff --git a/synapse/server.py b/synapse/server.py
index 5021068ce0..7926867b77 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -25,7 +25,6 @@ import abc
 import logging
 import os
 
-from twisted.enterprise import adbapi
 from twisted.mail.smtp import sendmail
 from twisted.web.client import BrowserLikePolicyForHTTPS
 
@@ -98,7 +97,6 @@ from synapse.server_notices.worker_server_notices_sender import (
 )
 from synapse.state import StateHandler, StateResolutionHandler
 from synapse.storage import DataStores, Storage
-from synapse.storage.engines import create_engine
 from synapse.streams.events import EventSources
 from synapse.util import Clock
 from synapse.util.distributor import Distributor
@@ -134,7 +132,6 @@ class HomeServer(object):
 
     DEPENDENCIES = [
         "http_client",
-        "db_pool",
         "federation_client",
         "federation_server",
         "handlers",
@@ -233,12 +230,6 @@ class HomeServer(object):
         self.admin_redaction_ratelimiter = Ratelimiter()
         self.registration_ratelimiter = Ratelimiter()
 
-        self.database_engine = create_engine(config.database_config)
-        config.database_config.setdefault("args", {})[
-            "cp_openfun"
-        ] = self.database_engine.on_new_connection
-        self.db_config = config.database_config
-
         self.datastores = None
 
         # Other kwargs are explicit dependencies
@@ -247,10 +238,8 @@ class HomeServer(object):
 
     def setup(self):
         logger.info("Setting up.")
-        with self.get_db_conn() as conn:
-            self.datastores = DataStores(self.DATASTORE_CLASS, conn, self)
-            conn.commit()
         self.start_time = int(self.get_clock().time())
+        self.datastores = DataStores(self.DATASTORE_CLASS, self)
         logger.info("Finished setting up.")
 
     def setup_master(self):
@@ -284,6 +273,9 @@ class HomeServer(object):
     def get_datastore(self):
         return self.datastores.main
 
+    def get_datastores(self):
+        return self.datastores
+
     def get_config(self):
         return self.config
 
@@ -433,31 +425,6 @@ class HomeServer(object):
         )
         return MatrixFederationHttpClient(self, tls_client_options_factory)
 
-    def build_db_pool(self):
-        name = self.db_config["name"]
-
-        return adbapi.ConnectionPool(
-            name, cp_reactor=self.get_reactor(), **self.db_config.get("args", {})
-        )
-
-    def get_db_conn(self, run_new_connection=True):
-        """Makes a new connection to the database, skipping the db pool
-
-        Returns:
-            Connection: a connection object implementing the PEP-249 spec
-        """
-        # Any param beginning with cp_ is a parameter for adbapi, and should
-        # not be passed to the database engine.
-        db_params = {
-            k: v
-            for k, v in self.db_config.get("args", {}).items()
-            if not k.startswith("cp_")
-        }
-        db_conn = self.database_engine.module.connect(**db_params)
-        if run_new_connection:
-            self.database_engine.on_new_connection(db_conn)
-        return db_conn
-
     def build_media_repository_resource(self):
         # build the media repo resource. This indirects through the HomeServer
         # to ensure that we only have a single instance of
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index b7637b5dc0..88546ad614 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -40,7 +40,7 @@ class SQLBaseStore(object):
     def __init__(self, database: Database, db_conn, hs):
         self.hs = hs
         self._clock = hs.get_clock()
-        self.database_engine = hs.database_engine
+        self.database_engine = database.engine
         self.db = database
         self.rand = random.SystemRandom()
 
diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py
index cafedd5c0d..0983e059c0 100644
--- a/synapse/storage/data_stores/__init__.py
+++ b/synapse/storage/data_stores/__init__.py
@@ -13,24 +13,50 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.database import Database
+import logging
+
+from synapse.storage.database import Database, make_conn
+from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 
+logger = logging.getLogger(__name__)
+
 
 class DataStores(object):
     """The various data stores.
 
     These are low level interfaces to physical databases.
+
+    Attributes:
+        main (DataStore)
     """
 
-    def __init__(self, main_store_class, db_conn, hs):
+    def __init__(self, main_store_class, hs):
         # Note we pass in the main store class here as workers use a different main
         # store.
-        database = Database(hs)
 
-        # Check that db is correctly configured.
-        database.engine.check_database(db_conn.cursor())
+        self.databases = []
+
+        for database_config in hs.config.database.databases:
+            db_name = database_config.name
+            engine = create_engine(database_config.config)
+
+            with make_conn(database_config, engine) as db_conn:
+                logger.info("Preparing database %r...", db_name)
+
+                engine.check_database(db_conn.cursor())
+                prepare_database(
+                    db_conn, engine, hs.config, data_stores=database_config.data_stores,
+                )
+
+                database = Database(hs, database_config, engine)
+
+                if "main" in database_config.data_stores:
+                    logger.info("Starting 'main' data store")
+                    self.main = main_store_class(database, db_conn, hs)
+
+                db_conn.commit()
 
-        prepare_database(db_conn, database.engine, config=hs.config)
+                self.databases.append(database)
 
-        self.main = main_store_class(database, db_conn, hs)
+                logger.info("Database %r prepared", db_name)
diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index add3037b69..13f4c9c72e 100644
--- a/synapse/storage/data_stores/main/client_ips.py
+++ b/synapse/storage/data_stores/main/client_ips.py
@@ -412,7 +412,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
     def _update_client_ips_batch(self):
 
         # If the DB pool has already terminated, don't try updating
-        if not self.hs.get_db_pool().running:
+        if not self.db.is_running():
             return
 
         to_update = self._batch_row_update
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index ec19ae1d9d..1003dd84a5 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -24,9 +24,11 @@ from six.moves import intern, range
 
 from prometheus_client import Histogram
 
+from twisted.enterprise import adbapi
 from twisted.internet import defer
 
 from synapse.api.errors import StoreError
+from synapse.config.database import DatabaseConnectionConfig
 from synapse.logging.context import LoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.background_updates import BackgroundUpdater
@@ -74,6 +76,37 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
 }
 
 
+def make_pool(
+    reactor, db_config: DatabaseConnectionConfig, engine
+) -> adbapi.ConnectionPool:
+    """Get the connection pool for the database.
+    """
+
+    return adbapi.ConnectionPool(
+        db_config.config["name"],
+        cp_reactor=reactor,
+        cp_openfun=engine.on_new_connection,
+        **db_config.config.get("args", {})
+    )
+
+
+def make_conn(db_config: DatabaseConnectionConfig, engine):
+    """Make a new connection to the database and return it.
+
+    Returns:
+        Connection
+    """
+
+    db_params = {
+        k: v
+        for k, v in db_config.config.get("args", {}).items()
+        if not k.startswith("cp_")
+    }
+    db_conn = engine.module.connect(**db_params)
+    engine.on_new_connection(db_conn)
+    return db_conn
+
+
 class LoggingTransaction(object):
     """An object that almost-transparently proxies for the 'txn' object
     passed to the constructor. Adds logging and metrics to the .execute()
@@ -218,10 +251,11 @@ class Database(object):
 
     _TXN_ID = 0
 
-    def __init__(self, hs):
+    def __init__(self, hs, database_config: DatabaseConnectionConfig, engine):
         self.hs = hs
         self._clock = hs.get_clock()
-        self._db_pool = hs.get_db_pool()
+        self._database_config = database_config
+        self._db_pool = make_pool(hs.get_reactor(), database_config, engine)
 
         self.updates = BackgroundUpdater(hs, self)
 
@@ -234,7 +268,7 @@ class Database(object):
         #   to watch it
         self._txn_perf_counters = PerformanceCounters()
 
-        self.engine = hs.database_engine
+        self.engine = engine
 
         # A set of tables that are not safe to use native upserts in.
         self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
@@ -255,6 +289,11 @@ class Database(object):
                 self._check_safe_to_upsert,
             )
 
+    def is_running(self):
+        """Is the database pool currently running
+        """
+        return self._db_pool.running
+
     @defer.inlineCallbacks
     def _check_safe_to_upsert(self):
         """
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index ddad17dc5a..df039a072d 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -16,8 +16,6 @@
 import struct
 import threading
 
-from synapse.storage.prepare_database import prepare_database
-
 
 class Sqlite3Engine(object):
     single_threaded = True
@@ -25,6 +23,9 @@ class Sqlite3Engine(object):
     def __init__(self, database_module, database_config):
         self.module = database_module
 
+        database = database_config.get("args", {}).get("database")
+        self._is_in_memory = database in (None, ":memory:",)
+
         # The current max state_group, or None if we haven't looked
         # in the DB yet.
         self._current_state_group_id = None
@@ -59,7 +60,16 @@ class Sqlite3Engine(object):
         return sql
 
     def on_new_connection(self, db_conn):
-        prepare_database(db_conn, self, config=None)
+
+        # We need to import here to avoid an import loop.
+        from synapse.storage.prepare_database import prepare_database
+
+        if self._is_in_memory:
+            # In memory databases need to be rebuilt each time. Ideally we'd
+            # reuse the same connection as we do when starting up, but that
+            # would involve using adbapi before we have started the reactor.
+            prepare_database(db_conn, self, config=None)
+
         db_conn.create_function("rank", 1, _rank)
 
     def is_deadlock(self, error):
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 731e1c9d9c..b4194b44ee 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -41,7 +41,7 @@ class UpgradeDatabaseException(PrepareDatabaseException):
     pass
 
 
-def prepare_database(db_conn, database_engine, config):
+def prepare_database(db_conn, database_engine, config, data_stores=["main"]):
     """Prepares a database for usage. Will either create all necessary tables
     or upgrade from an older schema version.
 
@@ -54,11 +54,10 @@ def prepare_database(db_conn, database_engine, config):
         config (synapse.config.homeserver.HomeServerConfig|None):
             application config, or None if we are connecting to an existing
             database which we expect to be configured already
+        data_stores (list[str]): The name of the data stores that will be used
+            with this database. Defaults to all data stores.
     """
 
-    # For now we only have the one datastore.
-    data_stores = ["main"]
-
     try:
         cur = db_conn.cursor()
         version_info = _get_or_create_schema_state(cur, database_engine)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 92b8726093..596ddc6970 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -64,28 +64,29 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         mock_federation_client = Mock(spec=["put_json"])
         mock_federation_client.put_json.return_value = defer.succeed((200, "OK"))
 
+        datastores = Mock()
+        datastores.main = Mock(
+            spec=[
+                # Bits that Federation needs
+                "prep_send_transaction",
+                "delivered_txn",
+                "get_received_txn_response",
+                "set_received_txn_response",
+                "get_destination_retry_timings",
+                "get_devices_by_remote",
+                # Bits that user_directory needs
+                "get_user_directory_stream_pos",
+                "get_current_state_deltas",
+                "get_device_updates_by_remote",
+            ]
+        )
+
         hs = self.setup_test_homeserver(
-            datastore=(
-                Mock(
-                    spec=[
-                        # Bits that Federation needs
-                        "prep_send_transaction",
-                        "delivered_txn",
-                        "get_received_txn_response",
-                        "set_received_txn_response",
-                        "get_destination_retry_timings",
-                        "get_device_updates_by_remote",
-                        # Bits that user_directory needs
-                        "get_user_directory_stream_pos",
-                        "get_current_state_deltas",
-                    ]
-                )
-            ),
-            notifier=Mock(),
-            http_client=mock_federation_client,
-            keyring=mock_keyring,
+            notifier=Mock(), http_client=mock_federation_client, keyring=mock_keyring
         )
 
+        hs.datastores = datastores
+
         return hs
 
     def prepare(self, reactor, clock, hs):
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py
index 3dae83c543..2a1e7c7166 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/slave/storage/_base.py
@@ -20,7 +20,7 @@ from synapse.replication.tcp.client import (
     ReplicationClientHandler,
 )
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
-from synapse.storage.database import Database
+from synapse.storage.database import make_conn
 
 from tests import unittest
 from tests.server import FakeTransport
@@ -41,10 +41,12 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase):
 
     def prepare(self, reactor, clock, hs):
 
+        db_config = hs.config.database.get_single_database()
         self.master_store = self.hs.get_datastore()
         self.storage = hs.get_storage()
+        database = hs.get_datastores().databases[0]
         self.slaved_store = self.STORE_TYPE(
-            Database(hs), self.hs.get_db_conn(), self.hs
+            database, make_conn(db_config, database.engine), self.hs
         )
         self.event_id = 0
 
diff --git a/tests/server.py b/tests/server.py
index 2b7cf4242e..a554dfdd57 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -302,41 +302,42 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
     Set up a synchronous test server, driven by the reactor used by
     the homeserver.
     """
-    d = _sth(cleanup_func, *args, **kwargs).result
+    server = _sth(cleanup_func, *args, **kwargs)
 
-    if isinstance(d, Failure):
-        d.raiseException()
+    database = server.config.database.get_single_database()
 
     # Make the thread pool synchronous.
-    clock = d.get_clock()
-    pool = d.get_db_pool()
-
-    def runWithConnection(func, *args, **kwargs):
-        return threads.deferToThreadPool(
-            pool._reactor,
-            pool.threadpool,
-            pool._runWithConnection,
-            func,
-            *args,
-            **kwargs
-        )
-
-    def runInteraction(interaction, *args, **kwargs):
-        return threads.deferToThreadPool(
-            pool._reactor,
-            pool.threadpool,
-            pool._runInteraction,
-            interaction,
-            *args,
-            **kwargs
-        )
+    clock = server.get_clock()
+
+    for database in server.get_datastores().databases:
+        pool = database._db_pool
+
+        def runWithConnection(func, *args, **kwargs):
+            return threads.deferToThreadPool(
+                pool._reactor,
+                pool.threadpool,
+                pool._runWithConnection,
+                func,
+                *args,
+                **kwargs
+            )
+
+        def runInteraction(interaction, *args, **kwargs):
+            return threads.deferToThreadPool(
+                pool._reactor,
+                pool.threadpool,
+                pool._runInteraction,
+                interaction,
+                *args,
+                **kwargs
+            )
 
-    if pool:
         pool.runWithConnection = runWithConnection
         pool.runInteraction = runInteraction
         pool.threadpool = ThreadPool(clock._reactor)
         pool.running = True
-    return d
+
+    return server
 
 
 def get_clock():
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 2e521e9ab7..fd52512696 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -28,7 +28,7 @@ from synapse.storage.data_stores.main.appservice import (
     ApplicationServiceStore,
     ApplicationServiceTransactionStore,
 )
-from synapse.storage.database import Database
+from synapse.storage.database import Database, make_conn
 
 from tests import unittest
 from tests.utils import setup_test_homeserver
@@ -55,8 +55,10 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
         self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob")
         self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob")
         # must be done after inserts
-        database = Database(hs)
-        self.store = ApplicationServiceStore(database, hs.get_db_conn(), hs)
+        database = hs.get_datastores().databases[0]
+        self.store = ApplicationServiceStore(
+            database, make_conn(database._database_config, database.engine), hs
+        )
 
     def tearDown(self):
         # TODO: suboptimal that we need to create files for tests!
@@ -111,9 +113,6 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         hs.config.event_cache_size = 1
         hs.config.password_providers = []
 
-        self.db_pool = hs.get_db_pool()
-        self.engine = hs.database_engine
-
         self.as_list = [
             {"token": "token1", "url": "https://matrix-as.org", "id": "id_1"},
             {"token": "alpha_tok", "url": "https://alpha.com", "id": "id_alpha"},
@@ -125,8 +124,15 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
 
         self.as_yaml_files = []
 
-        database = Database(hs)
-        self.store = TestTransactionStore(database, hs.get_db_conn(), hs)
+        # We assume there is only one database in these tests
+        database = hs.get_datastores().databases[0]
+        self.db_pool = database._db_pool
+        self.engine = database.engine
+
+        db_config = hs.config.get_single_database()
+        self.store = TestTransactionStore(
+            database, make_conn(db_config, self.engine), hs
+        )
 
     def _add_service(self, url, as_token, id):
         as_yaml = dict(
@@ -419,7 +425,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         hs.config.event_cache_size = 1
         hs.config.password_providers = []
 
-        ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs)
+        database = hs.get_datastores().databases[0]
+        ApplicationServiceStore(
+            database, make_conn(database._database_config, database.engine), hs
+        )
 
     @defer.inlineCallbacks
     def test_duplicate_ids(self):
@@ -435,7 +444,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         hs.config.password_providers = []
 
         with self.assertRaises(ConfigError) as cm:
-            ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs)
+            database = hs.get_datastores().databases[0]
+            ApplicationServiceStore(
+                database, make_conn(database._database_config, database.engine), hs
+            )
 
         e = cm.exception
         self.assertIn(f1, str(e))
@@ -456,7 +468,10 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase):
         hs.config.password_providers = []
 
         with self.assertRaises(ConfigError) as cm:
-            ApplicationServiceStore(Database(hs), hs.get_db_conn(), hs)
+            database = hs.get_datastores().databases[0]
+            ApplicationServiceStore(
+                database, make_conn(database._database_config, database.engine), hs
+            )
 
         e = cm.exception
         self.assertIn(f1, str(e))
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 537cfe9f64..cdee0a9e60 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -52,15 +52,17 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         config = Mock()
         config._disable_native_upserts = True
         config.event_cache_size = 1
-        config.database_config = {"name": "sqlite3"}
-        engine = create_engine(config.database_config)
+        hs = TestHomeServer("test", config=config)
+
+        sqlite_config = {"name": "sqlite3"}
+        engine = create_engine(sqlite_config)
         fake_engine = Mock(wraps=engine)
         fake_engine.can_native_upsert = False
-        hs = TestHomeServer(
-            "test", db_pool=self.db_pool, config=config, database_engine=fake_engine
-        )
 
-        self.datastore = SQLBaseStore(Database(hs), None, hs)
+        db = Database(Mock(), Mock(config=sqlite_config), fake_engine)
+        db._db_pool = self.db_pool
+
+        self.datastore = SQLBaseStore(db, None, hs)
 
     @defer.inlineCallbacks
     def test_insert_1col(self):
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 4578cc3b60..ed5786865a 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -26,7 +26,6 @@ class RegistrationStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def setUp(self):
         hs = yield setup_test_homeserver(self.addCleanup)
-        self.db_pool = hs.get_db_pool()
 
         self.store = hs.get_datastore()
 
diff --git a/tests/utils.py b/tests/utils.py
index 585f305b9a..9f5bf40b4b 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -30,6 +30,7 @@ from twisted.internet import defer, reactor
 from synapse.api.constants import EventTypes
 from synapse.api.errors import CodeMessageException, cs_error
 from synapse.api.room_versions import RoomVersions
+from synapse.config.database import DatabaseConnectionConfig
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.server import DEFAULT_ROOM_VERSION
 from synapse.federation.transport import server as federation_server
@@ -177,7 +178,6 @@ class TestHomeServer(HomeServer):
     DATASTORE_CLASS = DataStore
 
 
-@defer.inlineCallbacks
 def setup_test_homeserver(
     cleanup_func,
     name="test",
@@ -214,7 +214,7 @@ def setup_test_homeserver(
     if USE_POSTGRES_FOR_TESTS:
         test_db = "synapse_test_%s" % uuid.uuid4().hex
 
-        config.database_config = {
+        database_config = {
             "name": "psycopg2",
             "args": {
                 "database": test_db,
@@ -226,12 +226,15 @@ def setup_test_homeserver(
             },
         }
     else:
-        config.database_config = {
+        database_config = {
             "name": "sqlite3",
             "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1},
         }
 
-    db_engine = create_engine(config.database_config)
+    database = DatabaseConnectionConfig("master", database_config, ["main"])
+    config.database.databases = [database]
+
+    db_engine = create_engine(database.config)
 
     # Create the database before we actually try and connect to it, based off
     # the template database we generate in setupdb()
@@ -251,11 +254,6 @@ def setup_test_homeserver(
         cur.close()
         db_conn.close()
 
-    # we need to configure the connection pool to run the on_new_connection
-    # function, so that we can test code that uses custom sqlite functions
-    # (like rank).
-    config.database_config["args"]["cp_openfun"] = db_engine.on_new_connection
-
     if datastore is None:
         hs = homeserverToUse(
             name,
@@ -267,21 +265,19 @@ def setup_test_homeserver(
             **kargs
         )
 
-        # Prepare the DB on SQLite -- PostgreSQL is a copy of an already up to
-        # date db
-        if not isinstance(db_engine, PostgresEngine):
-            db_conn = hs.get_db_conn()
-            yield prepare_database(db_conn, db_engine, config)
-            db_conn.commit()
-            db_conn.close()
+        hs.setup()
+        if homeserverToUse.__name__ == "TestHomeServer":
+            hs.setup_master()
+
+        if isinstance(db_engine, PostgresEngine):
+            database = hs.get_datastores().databases[0]
 
-        else:
             # We need to do cleanup on PostgreSQL
             def cleanup():
                 import psycopg2
 
                 # Close all the db pools
-                hs.get_db_pool().close()
+                database._db_pool.close()
 
                 dropped = False
 
@@ -320,23 +316,12 @@ def setup_test_homeserver(
                 # Register the cleanup hook
                 cleanup_func(cleanup)
 
-        hs.setup()
-        if homeserverToUse.__name__ == "TestHomeServer":
-            hs.setup_master()
     else:
-        # If we have been given an explicit datastore we probably want to mock
-        # out the DataStores somehow too. This all feels a bit wrong, but then
-        # mocking the stores feels wrong too.
-        datastores = Mock(datastore=datastore)
-
         hs = homeserverToUse(
             name,
-            db_pool=None,
             datastore=datastore,
-            datastores=datastores,
             config=config,
             version_string="Synapse/tests",
-            database_engine=db_engine,
             tls_server_context_factory=Mock(),
             tls_client_options_factory=Mock(),
             reactor=reactor,
-- 
cgit 1.4.1


From 7963ca83cbefb782a94c47fd65ad6e94d05dc5d1 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 18 Dec 2019 11:13:33 +0000
Subject: Add delta file to fix missing default table data (#6555)

---
 changelog.d/6555.bugfix                              |  1 +
 synapse/storage/data_stores/main/deviceinbox.py      | 17 ++---------------
 .../main/schema/delta/56/device_stream_id_insert.sql | 20 ++++++++++++++++++++
 .../main/schema/full_schemas/54/stream_positions.sql |  1 +
 4 files changed, 24 insertions(+), 15 deletions(-)
 create mode 100644 changelog.d/6555.bugfix
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql

(limited to 'changelog.d')

diff --git a/changelog.d/6555.bugfix b/changelog.d/6555.bugfix
new file mode 100644
index 0000000000..86a5a56cf6
--- /dev/null
+++ b/changelog.d/6555.bugfix
@@ -0,0 +1 @@
+Fix missing row in device_max_stream_id that could cause unable to decrypt errors after server restart.
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py
index 85cfa16850..0613b49f4a 100644
--- a/synapse/storage/data_stores/main/deviceinbox.py
+++ b/synapse/storage/data_stores/main/deviceinbox.py
@@ -358,21 +358,8 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
     def _add_messages_to_local_device_inbox_txn(
         self, txn, stream_id, messages_by_user_then_device
     ):
-        # Compatible method of performing an upsert
-        sql = "SELECT stream_id FROM device_max_stream_id"
-
-        txn.execute(sql)
-        rows = txn.fetchone()
-        if rows:
-            db_stream_id = rows[0]
-            if db_stream_id < stream_id:
-                # Insert the new stream_id
-                sql = "UPDATE device_max_stream_id SET stream_id = ?"
-        else:
-            # No rows, perform an insert
-            sql = "INSERT INTO device_max_stream_id (stream_id) VALUES (?)"
-
-        txn.execute(sql, (stream_id,))
+        sql = "UPDATE device_max_stream_id" " SET stream_id = ?" " WHERE stream_id < ?"
+        txn.execute(sql, (stream_id, stream_id))
 
         local_by_user_then_device = {}
         for user_id, messages_by_device in messages_by_user_then_device.items():
diff --git a/synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql b/synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql
new file mode 100644
index 0000000000..c2f557fde9
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql
@@ -0,0 +1,20 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This line already existed in deltas/35/device_stream_id but was not included in the
+-- 54 full schema SQL. Add some SQL here to insert the missing row if it does not exist
+INSERT INTO device_max_stream_id (stream_id) SELECT 0 WHERE NOT EXISTS (
+    SELECT * from device_max_stream_id
+);
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql b/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql
index c265fd20e2..91d21b2921 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql
+++ b/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql
@@ -5,3 +5,4 @@ INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coales
 INSERT INTO user_directory_stream_pos (stream_id) VALUES (0);
 INSERT INTO stats_stream_pos (stream_id) VALUES (0);
 INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0);
+-- device_max_stream_id is handled separately in 56/device_stream_id_insert.sql
\ No newline at end of file
-- 
cgit 1.4.1


From d6752ce5da38d35857fe324800d76a86ee1e64f1 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 18 Dec 2019 14:26:58 +0000
Subject: Clean up startup for the pusher (#6558)

* Remove redundant python2 support code

`str.decode()` doesn't exist on python3, so presumably this code was doing
nothing

* Filter out pushers with corrupt data

When we get a row with unparsable json, drop the row, rather than returning a
row with null `data`, which will then cause an explosion later on.

* Improve logging when we can't start a pusher

Log the ID to help us understand the problem

* Make email pusher setup more robust

We know we'll have a `data` member, since that comes from the database. What we
*don't* know is if that is a dict, and if that has a `brand` member, and if
that member is a string.
---
 changelog.d/6558.misc                      |  1 +
 synapse/push/pusher.py                     | 12 ++++++-----
 synapse/push/pusherpool.py                 | 10 +++++----
 synapse/rest/client/v1/pusher.py           | 33 +++++++++++++++---------------
 synapse/storage/data_stores/main/pusher.py | 25 ++++++++--------------
 tests/push/test_email.py                   |  3 +++
 tests/push/test_http.py                    |  4 ++++
 7 files changed, 45 insertions(+), 43 deletions(-)
 create mode 100644 changelog.d/6558.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6558.misc b/changelog.d/6558.misc
new file mode 100644
index 0000000000..a7572f1a85
--- /dev/null
+++ b/changelog.d/6558.misc
@@ -0,0 +1 @@
+Clean up logs from the push notifier at startup.
\ No newline at end of file
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index f277aeb131..8ad0bf5936 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -80,9 +80,11 @@ class PusherFactory(object):
         return EmailPusher(self.hs, pusherdict, mailer)
 
     def _app_name_from_pusherdict(self, pusherdict):
-        if "data" in pusherdict and "brand" in pusherdict["data"]:
-            app_name = pusherdict["data"]["brand"]
-        else:
-            app_name = self.config.email_app_name
+        data = pusherdict["data"]
 
-        return app_name
+        if isinstance(data, dict):
+            brand = data.get("brand")
+            if isinstance(brand, str):
+                return brand
+
+        return self.config.email_app_name
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 0f6992202d..b9dca5bc63 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -232,7 +232,6 @@ class PusherPool:
             Deferred
         """
         pushers = yield self.store.get_all_pushers()
-        logger.info("Starting %d pushers", len(pushers))
 
         # Stagger starting up the pushers so we don't completely drown the
         # process on start up.
@@ -245,7 +244,7 @@ class PusherPool:
         """Start the given pusher
 
         Args:
-            pusherdict (dict):
+            pusherdict (dict): dict with the values pulled from the db table
 
         Returns:
             Deferred[EmailPusher|HttpPusher]
@@ -254,7 +253,8 @@ class PusherPool:
             p = self.pusher_factory.create_pusher(pusherdict)
         except PusherConfigException as e:
             logger.warning(
-                "Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
+                "Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s",
+                pusherdict["id"],
                 pusherdict.get("user_name"),
                 pusherdict.get("app_id"),
                 pusherdict.get("pushkey"),
@@ -262,7 +262,9 @@ class PusherPool:
             )
             return
         except Exception:
-            logger.exception("Couldn't start a pusher: caught Exception")
+            logger.exception(
+                "Couldn't start pusher id %i: caught Exception", pusherdict["id"],
+            )
             return
 
         if not p:
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index 0791866f55..6f6b7aed6e 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -28,6 +28,17 @@ from synapse.rest.client.v2_alpha._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
+ALLOWED_KEYS = {
+    "app_display_name",
+    "app_id",
+    "data",
+    "device_display_name",
+    "kind",
+    "lang",
+    "profile_tag",
+    "pushkey",
+}
+
 
 class PushersRestServlet(RestServlet):
     PATTERNS = client_patterns("/pushers$", v1=True)
@@ -43,23 +54,11 @@ class PushersRestServlet(RestServlet):
 
         pushers = await self.hs.get_datastore().get_pushers_by_user_id(user.to_string())
 
-        allowed_keys = [
-            "app_display_name",
-            "app_id",
-            "data",
-            "device_display_name",
-            "kind",
-            "lang",
-            "profile_tag",
-            "pushkey",
-        ]
-
-        for p in pushers:
-            for k, v in list(p.items()):
-                if k not in allowed_keys:
-                    del p[k]
-
-        return 200, {"pushers": pushers}
+        filtered_pushers = list(
+            {k: v for k, v in p.items() if k in ALLOWED_KEYS} for p in pushers
+        )
+
+        return 200, {"pushers": filtered_pushers}
 
     def on_OPTIONS(self, _):
         return 200, {}
diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/data_stores/main/pusher.py
index f07309ef09..6b03233262 100644
--- a/synapse/storage/data_stores/main/pusher.py
+++ b/synapse/storage/data_stores/main/pusher.py
@@ -15,8 +15,7 @@
 # limitations under the License.
 
 import logging
-
-import six
+from typing import Iterable, Iterator
 
 from canonicaljson import encode_canonical_json, json
 
@@ -27,21 +26,16 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
 
 logger = logging.getLogger(__name__)
 
-if six.PY2:
-    db_binary_type = six.moves.builtins.buffer
-else:
-    db_binary_type = memoryview
-
 
 class PusherWorkerStore(SQLBaseStore):
-    def _decode_pushers_rows(self, rows):
+    def _decode_pushers_rows(self, rows: Iterable[dict]) -> Iterator[dict]:
+        """JSON-decode the data in the rows returned from the `pushers` table
+
+        Drops any rows whose data cannot be decoded
+        """
         for r in rows:
             dataJson = r["data"]
-            r["data"] = None
             try:
-                if isinstance(dataJson, db_binary_type):
-                    dataJson = str(dataJson).decode("UTF8")
-
                 r["data"] = json.loads(dataJson)
             except Exception as e:
                 logger.warning(
@@ -50,12 +44,9 @@ class PusherWorkerStore(SQLBaseStore):
                     dataJson,
                     e.args[0],
                 )
-                pass
-
-            if isinstance(r["pushkey"], db_binary_type):
-                r["pushkey"] = str(r["pushkey"]).decode("UTF8")
+                continue
 
-        return rows
+            yield r
 
     @defer.inlineCallbacks
     def user_has_pusher(self, user_id):
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 358b593cd4..80187406bc 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -165,6 +165,7 @@ class EmailPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         last_stream_ordering = pushers[0]["last_stream_ordering"]
 
@@ -175,6 +176,7 @@ class EmailPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
 
@@ -192,5 +194,6 @@ class EmailPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index af2327fb66..fe3441f081 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -104,6 +104,7 @@ class HTTPPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         last_stream_ordering = pushers[0]["last_stream_ordering"]
 
@@ -114,6 +115,7 @@ class HTTPPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
 
@@ -132,6 +134,7 @@ class HTTPPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
         last_stream_ordering = pushers[0]["last_stream_ordering"]
@@ -151,5 +154,6 @@ class HTTPPusherTests(HomeserverTestCase):
         pushers = self.get_success(
             self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
         )
+        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
-- 
cgit 1.4.1


From b95b762560441b28f06e6458da796327e394953e Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Dec 2019 11:11:14 +0000
Subject: Add an export_signing_key script (#6546)

I want to do some key rotation, and it is silly that we don't have a way to do
this.
---
 changelog.d/6546.feature   |  1 +
 docs/code_style.md         | 13 ++++---
 docs/sample_config.yaml    | 21 +++++++----
 scripts/export_signing_key | 94 ++++++++++++++++++++++++++++++++++++++++++++++
 synapse/config/key.py      | 23 ++++++++----
 5 files changed, 130 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/6546.feature
 create mode 100755 scripts/export_signing_key

(limited to 'changelog.d')

diff --git a/changelog.d/6546.feature b/changelog.d/6546.feature
new file mode 100644
index 0000000000..954aacb0d0
--- /dev/null
+++ b/changelog.d/6546.feature
@@ -0,0 +1 @@
+Add an export_signing_key script to extract the public part of signing keys when rotating them.
diff --git a/docs/code_style.md b/docs/code_style.md
index f983f72d6c..71aecd41f7 100644
--- a/docs/code_style.md
+++ b/docs/code_style.md
@@ -137,6 +137,7 @@ Some guidelines follow:
     correctly handles the top-level option being set to `None` (as it
     will be if no sub-options are enabled).
 -   Lines should be wrapped at 80 characters.
+-   Use two-space indents.
 
 Example:
 
@@ -155,13 +156,13 @@ Example:
     # Settings for the frobber
     #
     frobber:
-       # frobbing speed. Defaults to 1.
-       #
-       #speed: 10
+      # frobbing speed. Defaults to 1.
+      #
+      #speed: 10
 
-       # frobbing distance. Defaults to 1000.
-       #
-       #distance: 100
+      # frobbing distance. Defaults to 1000.
+      #
+      #distance: 100
 
 Note that the sample configuration is generated from the synapse code
 and is maintained by a script, `scripts-dev/generate_sample_config`.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 1787248f53..e3b05423b8 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1122,14 +1122,19 @@ metrics_flags:
 signing_key_path: "CONFDIR/SERVERNAME.signing.key"
 
 # The keys that the server used to sign messages with but won't use
-# to sign new messages. E.g. it has lost its private key
-#
-#old_signing_keys:
-#  "ed25519:auto":
-#    # Base64 encoded public key
-#    key: "The public part of your old signing key."
-#    # Millisecond POSIX timestamp when the key expired.
-#    expired_ts: 123456789123
+# to sign new messages.
+#
+old_signing_keys:
+  # For each key, `key` should be the base64-encoded public key, and
+  # `expired_ts`should be the time (in milliseconds since the unix epoch) that
+  # it was last used.
+  #
+  # It is possible to build an entry from an old signing.key file using the
+  # `export_signing_key` script which is provided with synapse.
+  #
+  # For example:
+  #
+  #"ed25519:id": { key: "base64string", expired_ts: 123456789123 }
 
 # How long key response published by this server is valid for.
 # Used to set the valid_until_ts in /key/v2 APIs.
diff --git a/scripts/export_signing_key b/scripts/export_signing_key
new file mode 100755
index 0000000000..8aec9d802b
--- /dev/null
+++ b/scripts/export_signing_key
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import sys
+import time
+from typing import Optional
+
+import nacl.signing
+from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
+
+
+def exit(status: int = 0, message: Optional[str] = None):
+    if message:
+        print(message, file=sys.stderr)
+    sys.exit(status)
+
+
+def format_plain(public_key: nacl.signing.VerifyKey):
+    print(
+        "%s:%s %s"
+        % (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
+    )
+
+
+def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
+    print(
+        '  "%s:%s": { key: "%s", expired_ts: %i }'
+        % (
+            public_key.alg,
+            public_key.version,
+            encode_verify_key_base64(public_key),
+            expiry_ts,
+        )
+    )
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument(
+        "key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
+    )
+
+    parser.add_argument(
+        "-x",
+        action="store_true",
+        dest="for_config",
+        help="format the output for inclusion in the old_signing_keys config setting",
+    )
+
+    parser.add_argument(
+        "--expiry-ts",
+        type=int,
+        default=int(time.time() * 1000) + 6*3600000,
+        help=(
+            "The expiry time to use for -x, in milliseconds since 1970. The default "
+            "is (now+6h)."
+        ),
+    )
+
+    args = parser.parse_args()
+
+    formatter = (
+        (lambda k: format_for_config(k, args.expiry_ts))
+        if args.for_config
+        else format_plain
+    )
+
+    keys = []
+    for file in args.key_file:
+        try:
+            res = read_signing_keys(file)
+        except Exception as e:
+            exit(
+                status=1,
+                message="Error reading key from file %s: %s %s"
+                % (file.name, type(e), e),
+            )
+            res = []
+        for key in res:
+            formatter(get_verify_key(key))
diff --git a/synapse/config/key.py b/synapse/config/key.py
index 52ff1b2621..066e7838c3 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -108,7 +108,7 @@ class KeyConfig(Config):
             self.signing_key = self.read_signing_keys(signing_key_path, "signing_key")
 
         self.old_signing_keys = self.read_old_signing_keys(
-            config.get("old_signing_keys", {})
+            config.get("old_signing_keys")
         )
         self.key_refresh_interval = self.parse_duration(
             config.get("key_refresh_interval", "1d")
@@ -199,14 +199,19 @@ class KeyConfig(Config):
         signing_key_path: "%(base_key_name)s.signing.key"
 
         # The keys that the server used to sign messages with but won't use
-        # to sign new messages. E.g. it has lost its private key
+        # to sign new messages.
         #
-        #old_signing_keys:
-        #  "ed25519:auto":
-        #    # Base64 encoded public key
-        #    key: "The public part of your old signing key."
-        #    # Millisecond POSIX timestamp when the key expired.
-        #    expired_ts: 123456789123
+        old_signing_keys:
+          # For each key, `key` should be the base64-encoded public key, and
+          # `expired_ts`should be the time (in milliseconds since the unix epoch) that
+          # it was last used.
+          #
+          # It is possible to build an entry from an old signing.key file using the
+          # `export_signing_key` script which is provided with synapse.
+          #
+          # For example:
+          #
+          #"ed25519:id": { key: "base64string", expired_ts: 123456789123 }
 
         # How long key response published by this server is valid for.
         # Used to set the valid_until_ts in /key/v2 APIs.
@@ -290,6 +295,8 @@ class KeyConfig(Config):
             raise ConfigError("Error reading %s: %s" % (name, str(e)))
 
     def read_old_signing_keys(self, old_signing_keys):
+        if old_signing_keys is None:
+            return {}
         keys = {}
         for key_id, key_data in old_signing_keys.items():
             if is_signing_algorithm_supported(key_id):
-- 
cgit 1.4.1


From 0b794cbd7b232b42a2d726e6ab6c698d4bf35093 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Dec 2019 14:52:52 +0000
Subject: Fix sdnotify with acme enabled (#6571)

If acme was enabled, the sdnotify startup hook would never be run because we
would try to add it to a hook which had already fired.

There's no need to delay it: we can sdnotify as soon as we've started the
listeners.
---
 changelog.d/6571.bugfix |  1 +
 synapse/app/_base.py    | 10 +++++++---
 2 files changed, 8 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6571.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6571.bugfix b/changelog.d/6571.bugfix
new file mode 100644
index 0000000000..e38ea7b4f7
--- /dev/null
+++ b/changelog.d/6571.bugfix
@@ -0,0 +1 @@
+Fix a bug which meant that we did not send systemd notifications on startup if acme was enabled.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 9c96816096..0e8b467a3e 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -237,6 +237,12 @@ def start(hs, listeners=None):
     """
     Start a Synapse server or worker.
 
+    Should be called once the reactor is running and (if we're using ACME) the
+    TLS certificates are in place.
+
+    Will start the main HTTP listeners and do some other startup tasks, and then
+    notify systemd.
+
     Args:
         hs (synapse.server.HomeServer)
         listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
@@ -311,9 +317,7 @@ def setup_sdnotify(hs):
 
     # Tell systemd our state, if we're using it. This will silently fail if
     # we're not using systemd.
-    hs.get_reactor().addSystemEventTrigger(
-        "after", "startup", sdnotify, b"READY=1\nMAINPID=%i" % (os.getpid(),)
-    )
+    sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
 
     hs.get_reactor().addSystemEventTrigger(
         "before", "shutdown", sdnotify, b"STOPPING=1"
-- 
cgit 1.4.1


From bca30cefee3849813565dd71e571172818629d85 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Dec 2019 14:53:15 +0000
Subject: Improve diagnostics on database upgrade failure (#6570)

`Failed to upgrade database` is not helpful, and it's unlikely that UPGRADE.rst
has anything useful.
---
 changelog.d/6570.misc               | 1 +
 synapse/app/homeserver.py           | 9 ++-------
 synapse/storage/prepare_database.py | 5 ++++-
 3 files changed, 7 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/6570.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6570.misc b/changelog.d/6570.misc
new file mode 100644
index 0000000000..e89955a51e
--- /dev/null
+++ b/changelog.d/6570.misc
@@ -0,0 +1 @@
+Improve diagnostics on database upgrade failure.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index b8661457e2..0e9bf7f53a 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -342,13 +342,8 @@ def setup(config_options):
         hs.setup()
     except IncorrectDatabaseSetup as e:
         quit_with_error(str(e))
-    except UpgradeDatabaseException:
-        sys.stderr.write(
-            "\nFailed to upgrade database.\n"
-            "Have you checked for version specific instructions in"
-            " UPGRADES.rst?\n"
-        )
-        sys.exit(1)
+    except UpgradeDatabaseException as e:
+        quit_with_error("Failed to upgrade database: %s" % (e,))
 
     hs.setup_master()
 
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index b4194b44ee..0195edf4ac 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -69,7 +69,10 @@ def prepare_database(db_conn, database_engine, config, data_stores=["main"]):
                 if user_version != SCHEMA_VERSION:
                     # If we don't pass in a config file then we are expecting to
                     # have already upgraded the DB.
-                    raise UpgradeDatabaseException("Database needs to be upgraded")
+                    raise UpgradeDatabaseException(
+                        "Expected database schema version %i but got %i"
+                        % (SCHEMA_VERSION, user_version)
+                    )
             else:
                 _upgrade_existing_database(
                     cur,
-- 
cgit 1.4.1


From 3d46124ad01990d37fa54c1599c28314dc5f5d30 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 19 Dec 2019 15:07:28 +0000
Subject: Port some admin handlers to async/await (#6559)

---
 changelog.d/6559.misc                  |  1 +
 synapse/app/admin_cmd.py               |  6 ++--
 synapse/handlers/admin.py              | 41 +++++++++++---------------
 synapse/handlers/deactivate_account.py | 54 +++++++++++++++-------------------
 4 files changed, 46 insertions(+), 56 deletions(-)
 create mode 100644 changelog.d/6559.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6559.misc b/changelog.d/6559.misc
new file mode 100644
index 0000000000..8bca37457d
--- /dev/null
+++ b/changelog.d/6559.misc
@@ -0,0 +1 @@
+Port `synapse.handlers.admin` and `synapse.handlers.deactivate_account` to async/await.
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 51a909419f..8e36bc57d3 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -104,8 +104,10 @@ def export_data_command(hs, args):
     user_id = args.user_id
     directory = args.output_directory
 
-    res = yield hs.get_handlers().admin_handler.export_user_data(
-        user_id, FileExfiltrationWriter(user_id, directory=directory)
+    res = yield defer.ensureDeferred(
+        hs.get_handlers().admin_handler.export_user_data(
+            user_id, FileExfiltrationWriter(user_id, directory=directory)
+        )
     )
     print(res)
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 14449b9a1e..1a4ba12385 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.api.constants import Membership
 from synapse.types import RoomStreamToken
 from synapse.visibility import filter_events_for_client
@@ -33,11 +31,10 @@ class AdminHandler(BaseHandler):
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
 
-    @defer.inlineCallbacks
-    def get_whois(self, user):
+    async def get_whois(self, user):
         connections = []
 
-        sessions = yield self.store.get_user_ip_and_agents(user)
+        sessions = await self.store.get_user_ip_and_agents(user)
         for session in sessions:
             connections.append(
                 {
@@ -54,20 +51,18 @@ class AdminHandler(BaseHandler):
 
         return ret
 
-    @defer.inlineCallbacks
-    def get_users(self):
+    async def get_users(self):
         """Function to retrieve a list of users in users table.
 
         Args:
         Returns:
             defer.Deferred: resolves to list[dict[str, Any]]
         """
-        ret = yield self.store.get_users()
+        ret = await self.store.get_users()
 
         return ret
 
-    @defer.inlineCallbacks
-    def get_users_paginate(self, start, limit, name, guests, deactivated):
+    async def get_users_paginate(self, start, limit, name, guests, deactivated):
         """Function to retrieve a paginated list of users from
         users list. This will return a json list of users.
 
@@ -80,14 +75,13 @@ class AdminHandler(BaseHandler):
         Returns:
             defer.Deferred: resolves to json list[dict[str, Any]]
         """
-        ret = yield self.store.get_users_paginate(
+        ret = await self.store.get_users_paginate(
             start, limit, name, guests, deactivated
         )
 
         return ret
 
-    @defer.inlineCallbacks
-    def search_users(self, term):
+    async def search_users(self, term):
         """Function to search users list for one or more users with
         the matched term.
 
@@ -96,7 +90,7 @@ class AdminHandler(BaseHandler):
         Returns:
             defer.Deferred: resolves to list[dict[str, Any]]
         """
-        ret = yield self.store.search_users(term)
+        ret = await self.store.search_users(term)
 
         return ret
 
@@ -119,8 +113,7 @@ class AdminHandler(BaseHandler):
         """
         return self.store.set_server_admin(user, admin)
 
-    @defer.inlineCallbacks
-    def export_user_data(self, user_id, writer):
+    async def export_user_data(self, user_id, writer):
         """Write all data we have on the user to the given writer.
 
         Args:
@@ -132,7 +125,7 @@ class AdminHandler(BaseHandler):
             The returned value is that returned by `writer.finished()`.
         """
         # Get all rooms the user is in or has been in
-        rooms = yield self.store.get_rooms_for_user_where_membership_is(
+        rooms = await self.store.get_rooms_for_user_where_membership_is(
             user_id,
             membership_list=(
                 Membership.JOIN,
@@ -145,7 +138,7 @@ class AdminHandler(BaseHandler):
         # We only try and fetch events for rooms the user has been in. If
         # they've been e.g. invited to a room without joining then we handle
         # those seperately.
-        rooms_user_has_been_in = yield self.store.get_rooms_user_has_been_in(user_id)
+        rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id)
 
         for index, room in enumerate(rooms):
             room_id = room.room_id
@@ -154,7 +147,7 @@ class AdminHandler(BaseHandler):
                 "[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms)
             )
 
-            forgotten = yield self.store.did_forget(user_id, room_id)
+            forgotten = await self.store.did_forget(user_id, room_id)
             if forgotten:
                 logger.info("[%s] User forgot room %d, ignoring", user_id, room_id)
                 continue
@@ -166,7 +159,7 @@ class AdminHandler(BaseHandler):
 
                 if room.membership == Membership.INVITE:
                     event_id = room.event_id
-                    invite = yield self.store.get_event(event_id, allow_none=True)
+                    invite = await self.store.get_event(event_id, allow_none=True)
                     if invite:
                         invited_state = invite.unsigned["invite_room_state"]
                         writer.write_invite(room_id, invite, invited_state)
@@ -177,7 +170,7 @@ class AdminHandler(BaseHandler):
             # were joined. We estimate that point by looking at the
             # stream_ordering of the last membership if it wasn't a join.
             if room.membership == Membership.JOIN:
-                stream_ordering = yield self.store.get_room_max_stream_ordering()
+                stream_ordering = self.store.get_room_max_stream_ordering()
             else:
                 stream_ordering = room.stream_ordering
 
@@ -203,7 +196,7 @@ class AdminHandler(BaseHandler):
             # events that we have and then filtering, this isn't the most
             # efficient method perhaps but it does guarantee we get everything.
             while True:
-                events, _ = yield self.store.paginate_room_events(
+                events, _ = await self.store.paginate_room_events(
                     room_id, from_key, to_key, limit=100, direction="f"
                 )
                 if not events:
@@ -211,7 +204,7 @@ class AdminHandler(BaseHandler):
 
                 from_key = events[-1].internal_metadata.after
 
-                events = yield filter_events_for_client(self.storage, user_id, events)
+                events = await filter_events_for_client(self.storage, user_id, events)
 
                 writer.write_events(room_id, events)
 
@@ -247,7 +240,7 @@ class AdminHandler(BaseHandler):
             for event_id in extremities:
                 if not event_to_unseen_prevs[event_id]:
                     continue
-                state = yield self.state_store.get_state_for_event(event_id)
+                state = await self.state_store.get_state_for_event(event_id)
                 writer.write_state(room_id, event_id, state)
 
         return writer.finished()
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 6dedaaff8d..4426967f88 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -15,8 +15,6 @@
 # limitations under the License.
 import logging
 
-from twisted.internet import defer
-
 from synapse.api.errors import SynapseError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import UserID, create_requester
@@ -46,8 +44,7 @@ class DeactivateAccountHandler(BaseHandler):
 
         self._account_validity_enabled = hs.config.account_validity.enabled
 
-    @defer.inlineCallbacks
-    def deactivate_account(self, user_id, erase_data, id_server=None):
+    async def deactivate_account(self, user_id, erase_data, id_server=None):
         """Deactivate a user's account
 
         Args:
@@ -74,11 +71,11 @@ class DeactivateAccountHandler(BaseHandler):
         identity_server_supports_unbinding = True
 
         # Retrieve the 3PIDs this user has bound to an identity server
-        threepids = yield self.store.user_get_bound_threepids(user_id)
+        threepids = await self.store.user_get_bound_threepids(user_id)
 
         for threepid in threepids:
             try:
-                result = yield self._identity_handler.try_unbind_threepid(
+                result = await self._identity_handler.try_unbind_threepid(
                     user_id,
                     {
                         "medium": threepid["medium"],
@@ -91,33 +88,33 @@ class DeactivateAccountHandler(BaseHandler):
                 # Do we want this to be a fatal error or should we carry on?
                 logger.exception("Failed to remove threepid from ID server")
                 raise SynapseError(400, "Failed to remove threepid from ID server")
-            yield self.store.user_delete_threepid(
+            await self.store.user_delete_threepid(
                 user_id, threepid["medium"], threepid["address"]
             )
 
         # Remove all 3PIDs this user has bound to the homeserver
-        yield self.store.user_delete_threepids(user_id)
+        await self.store.user_delete_threepids(user_id)
 
         # delete any devices belonging to the user, which will also
         # delete corresponding access tokens.
-        yield self._device_handler.delete_all_devices_for_user(user_id)
+        await self._device_handler.delete_all_devices_for_user(user_id)
         # then delete any remaining access tokens which weren't associated with
         # a device.
-        yield self._auth_handler.delete_access_tokens_for_user(user_id)
+        await self._auth_handler.delete_access_tokens_for_user(user_id)
 
-        yield self.store.user_set_password_hash(user_id, None)
+        await self.store.user_set_password_hash(user_id, None)
 
         # Add the user to a table of users pending deactivation (ie.
         # removal from all the rooms they're a member of)
-        yield self.store.add_user_pending_deactivation(user_id)
+        await self.store.add_user_pending_deactivation(user_id)
 
         # delete from user directory
-        yield self.user_directory_handler.handle_user_deactivated(user_id)
+        await self.user_directory_handler.handle_user_deactivated(user_id)
 
         # Mark the user as erased, if they asked for that
         if erase_data:
             logger.info("Marking %s as erased", user_id)
-            yield self.store.mark_user_erased(user_id)
+            await self.store.mark_user_erased(user_id)
 
         # Now start the process that goes through that list and
         # parts users from rooms (if it isn't already running)
@@ -125,30 +122,29 @@ class DeactivateAccountHandler(BaseHandler):
 
         # Reject all pending invites for the user, so that the user doesn't show up in the
         # "invited" section of rooms' members list.
-        yield self._reject_pending_invites_for_user(user_id)
+        await self._reject_pending_invites_for_user(user_id)
 
         # Remove all information on the user from the account_validity table.
         if self._account_validity_enabled:
-            yield self.store.delete_account_validity_for_user(user_id)
+            await self.store.delete_account_validity_for_user(user_id)
 
         # Mark the user as deactivated.
-        yield self.store.set_user_deactivated_status(user_id, True)
+        await self.store.set_user_deactivated_status(user_id, True)
 
         return identity_server_supports_unbinding
 
-    @defer.inlineCallbacks
-    def _reject_pending_invites_for_user(self, user_id):
+    async def _reject_pending_invites_for_user(self, user_id):
         """Reject pending invites addressed to a given user ID.
 
         Args:
             user_id (str): The user ID to reject pending invites for.
         """
         user = UserID.from_string(user_id)
-        pending_invites = yield self.store.get_invited_rooms_for_user(user_id)
+        pending_invites = await self.store.get_invited_rooms_for_user(user_id)
 
         for room in pending_invites:
             try:
-                yield self._room_member_handler.update_membership(
+                await self._room_member_handler.update_membership(
                     create_requester(user),
                     user,
                     room.room_id,
@@ -180,8 +176,7 @@ class DeactivateAccountHandler(BaseHandler):
         if not self._user_parter_running:
             run_as_background_process("user_parter_loop", self._user_parter_loop)
 
-    @defer.inlineCallbacks
-    def _user_parter_loop(self):
+    async def _user_parter_loop(self):
         """Loop that parts deactivated users from rooms
 
         Returns:
@@ -191,19 +186,18 @@ class DeactivateAccountHandler(BaseHandler):
         logger.info("Starting user parter")
         try:
             while True:
-                user_id = yield self.store.get_user_pending_deactivation()
+                user_id = await self.store.get_user_pending_deactivation()
                 if user_id is None:
                     break
                 logger.info("User parter parting %r", user_id)
-                yield self._part_user(user_id)
-                yield self.store.del_user_pending_deactivation(user_id)
+                await self._part_user(user_id)
+                await self.store.del_user_pending_deactivation(user_id)
                 logger.info("User parter finished parting %r", user_id)
             logger.info("User parter finished: stopping")
         finally:
             self._user_parter_running = False
 
-    @defer.inlineCallbacks
-    def _part_user(self, user_id):
+    async def _part_user(self, user_id):
         """Causes the given user_id to leave all the rooms they're joined to
 
         Returns:
@@ -211,11 +205,11 @@ class DeactivateAccountHandler(BaseHandler):
         """
         user = UserID.from_string(user_id)
 
-        rooms_for_user = yield self.store.get_rooms_for_user(user_id)
+        rooms_for_user = await self.store.get_rooms_for_user(user_id)
         for room_id in rooms_for_user:
             logger.info("User parter parting %r from %r", user_id, room_id)
             try:
-                yield self._room_member_handler.update_membership(
+                await self._room_member_handler.update_membership(
                     create_requester(user),
                     user,
                     room_id,
-- 
cgit 1.4.1


From 0b5dbadd9607714c471cbf317a64a96d935898a2 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 19 Dec 2019 15:07:37 +0000
Subject: Explode on duplicate delta file names. (#6565)

---
 changelog.d/6565.misc               |  1 +
 synapse/storage/prepare_database.py | 18 ++++++++++++++++++
 2 files changed, 19 insertions(+)
 create mode 100644 changelog.d/6565.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6565.misc b/changelog.d/6565.misc
new file mode 100644
index 0000000000..e83f245bf0
--- /dev/null
+++ b/changelog.d/6565.misc
@@ -0,0 +1 @@
+Add assertion that schema delta file names are unique.
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 0195edf4ac..403848ad03 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -18,6 +18,7 @@ import imp
 import logging
 import os
 import re
+from collections import Counter
 
 import attr
 
@@ -315,6 +316,9 @@ def _upgrade_existing_database(
                 )
             )
 
+        # Used to check if we have any duplicate file names
+        file_name_counter = Counter()
+
         # Now find which directories have anything of interest.
         directory_entries = []
         for directory in directories:
@@ -325,6 +329,9 @@ def _upgrade_existing_database(
                     _DirectoryListing(file_name, os.path.join(directory, file_name))
                     for file_name in file_names
                 )
+
+                for file_name in file_names:
+                    file_name_counter[file_name] += 1
             except FileNotFoundError:
                 # Data stores can have empty entries for a given version delta.
                 pass
@@ -333,6 +340,17 @@ def _upgrade_existing_database(
                     "Could not open delta dir for version %d: %s" % (v, directory)
                 )
 
+        duplicates = set(
+            file_name for file_name, count in file_name_counter.items() if count > 1
+        )
+        if duplicates:
+            # We don't support using the same file name in the same delta version.
+            raise PrepareDatabaseException(
+                "Found multiple delta files with the same name in v%d: %s",
+                v,
+                duplicates,
+            )
+
         # We sort to ensure that we apply the delta files in a consistent
         # order (to avoid bugs caused by inconsistent directory listing order)
         directory_entries.sort()
-- 
cgit 1.4.1


From fa780e9721c940479a72eed9877ccad4fef78160 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 20 Dec 2019 10:32:02 +0000
Subject: Change EventContext to use the Storage class (#6564)

---
 changelog.d/6564.misc                          |  1 +
 synapse/api/auth.py                            |  2 +-
 synapse/events/snapshot.py                     | 36 +++++++++++++++-----------
 synapse/events/third_party_rules.py            |  2 +-
 synapse/handlers/_base.py                      |  2 +-
 synapse/handlers/federation.py                 | 14 +++++-----
 synapse/handlers/message.py                    | 10 +++----
 synapse/handlers/room.py                       |  2 +-
 synapse/handlers/room_member.py                |  4 +--
 synapse/push/bulk_push_rule_evaluator.py       |  4 +--
 synapse/replication/http/federation.py         |  5 +++-
 synapse/replication/http/send_event.py         |  3 ++-
 synapse/storage/data_stores/main/push_rule.py  |  2 +-
 synapse/storage/data_stores/main/roommember.py |  2 +-
 tests/test_state.py                            | 28 ++++++++++----------
 15 files changed, 64 insertions(+), 53 deletions(-)
 create mode 100644 changelog.d/6564.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6564.misc b/changelog.d/6564.misc
new file mode 100644
index 0000000000..f644f5868b
--- /dev/null
+++ b/changelog.d/6564.misc
@@ -0,0 +1 @@
+Change `EventContext` to use the `Storage` class, in preparation for moving state database queries to a separate data store.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 9fd52a8c77..abbc7079a3 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -79,7 +79,7 @@ class Auth(object):
 
     @defer.inlineCallbacks
     def check_from_context(self, room_version, event, context, do_sig_check=True):
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         auth_events_ids = yield self.compute_auth_events(
             event, prev_state_ids, for_verification=True
         )
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 64e898f40c..a44baea365 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -149,7 +149,7 @@ class EventContext:
         # the prev_state_ids, so if we're a state event we include the event
         # id that we replaced in the state.
         if event.is_state():
-            prev_state_ids = yield self.get_prev_state_ids(store)
+            prev_state_ids = yield self.get_prev_state_ids()
             prev_state_id = prev_state_ids.get((event.type, event.state_key))
         else:
             prev_state_id = None
@@ -167,12 +167,13 @@ class EventContext:
         }
 
     @staticmethod
-    def deserialize(store, input):
+    def deserialize(storage, input):
         """Converts a dict that was produced by `serialize` back into a
         EventContext.
 
         Args:
-            store (DataStore): Used to convert AS ID to AS object
+            storage (Storage): Used to convert AS ID to AS object and fetch
+                state.
             input (dict): A dict produced by `serialize`
 
         Returns:
@@ -181,6 +182,7 @@ class EventContext:
         context = _AsyncEventContextImpl(
             # We use the state_group and prev_state_id stuff to pull the
             # current_state_ids out of the DB and construct prev_state_ids.
+            storage=storage,
             prev_state_id=input["prev_state_id"],
             event_type=input["event_type"],
             event_state_key=input["event_state_key"],
@@ -193,7 +195,7 @@ class EventContext:
 
         app_service_id = input["app_service_id"]
         if app_service_id:
-            context.app_service = store.get_app_service_by_id(app_service_id)
+            context.app_service = storage.main.get_app_service_by_id(app_service_id)
 
         return context
 
@@ -216,7 +218,7 @@ class EventContext:
         return self._state_group
 
     @defer.inlineCallbacks
-    def get_current_state_ids(self, store):
+    def get_current_state_ids(self):
         """
         Gets the room state map, including this event - ie, the state in ``state_group``
 
@@ -234,11 +236,11 @@ class EventContext:
         if self.rejected:
             raise RuntimeError("Attempt to access state_ids of rejected event")
 
-        yield self._ensure_fetched(store)
+        yield self._ensure_fetched()
         return self._current_state_ids
 
     @defer.inlineCallbacks
-    def get_prev_state_ids(self, store):
+    def get_prev_state_ids(self):
         """
         Gets the room state map, excluding this event.
 
@@ -250,7 +252,7 @@ class EventContext:
                 Maps a (type, state_key) to the event ID of the state event matching
                 this tuple.
         """
-        yield self._ensure_fetched(store)
+        yield self._ensure_fetched()
         return self._prev_state_ids
 
     def get_cached_current_state_ids(self):
@@ -270,7 +272,7 @@ class EventContext:
 
         return self._current_state_ids
 
-    def _ensure_fetched(self, store):
+    def _ensure_fetched(self):
         return defer.succeed(None)
 
 
@@ -282,6 +284,8 @@ class _AsyncEventContextImpl(EventContext):
 
     Attributes:
 
+        _storage (Storage)
+
         _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
             been calculated. None if we haven't started calculating yet
 
@@ -295,28 +299,30 @@ class _AsyncEventContextImpl(EventContext):
             that was replaced.
     """
 
+    # This needs to have a default as we're inheriting
+    _storage = attr.ib(default=None)
     _prev_state_id = attr.ib(default=None)
     _event_type = attr.ib(default=None)
     _event_state_key = attr.ib(default=None)
     _fetching_state_deferred = attr.ib(default=None)
 
-    def _ensure_fetched(self, store):
+    def _ensure_fetched(self):
         if not self._fetching_state_deferred:
-            self._fetching_state_deferred = run_in_background(
-                self._fill_out_state, store
-            )
+            self._fetching_state_deferred = run_in_background(self._fill_out_state)
 
         return make_deferred_yieldable(self._fetching_state_deferred)
 
     @defer.inlineCallbacks
-    def _fill_out_state(self, store):
+    def _fill_out_state(self):
         """Called to populate the _current_state_ids and _prev_state_ids
         attributes by loading from the database.
         """
         if self.state_group is None:
             return
 
-        self._current_state_ids = yield store.get_state_ids_for_group(self.state_group)
+        self._current_state_ids = yield self._storage.state.get_state_ids_for_group(
+            self.state_group
+        )
         if self._prev_state_id and self._event_state_key is not None:
             self._prev_state_ids = dict(self._current_state_ids)
 
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 714a9b1579..86f7e5f8aa 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -53,7 +53,7 @@ class ThirdPartyEventRules(object):
         if self.third_party_rules is None:
             return True
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
 
         # Retrieve the state events from the database.
         state_events = {}
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index d15c6282fb..51413d910e 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -134,7 +134,7 @@ class BaseHandler(object):
             guest_access = event.content.get("guest_access", "forbidden")
             if guest_access != "can_join":
                 if context:
-                    current_state_ids = yield context.get_current_state_ids(self.store)
+                    current_state_ids = yield context.get_current_state_ids()
                     current_state = yield self.store.get_events(
                         list(current_state_ids.values())
                     )
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 60bb00fc6a..05ae40dde7 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -718,7 +718,7 @@ class FederationHandler(BaseHandler):
                 # changing their profile info.
                 newly_joined = True
 
-                prev_state_ids = await context.get_prev_state_ids(self.store)
+                prev_state_ids = await context.get_prev_state_ids()
 
                 prev_state_id = prev_state_ids.get((event.type, event.state_key))
                 if prev_state_id:
@@ -1418,7 +1418,7 @@ class FederationHandler(BaseHandler):
                 user = UserID.from_string(event.state_key)
                 yield self.user_joined_room(user, event.room_id)
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
 
         state_ids = list(prev_state_ids.values())
         auth_chain = yield self.store.get_auth_chain(state_ids)
@@ -1927,7 +1927,7 @@ class FederationHandler(BaseHandler):
         context = yield self.state_handler.compute_event_context(event, old_state=state)
 
         if not auth_events:
-            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            prev_state_ids = yield context.get_prev_state_ids()
             auth_events_ids = yield self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=True
             )
@@ -2336,12 +2336,12 @@ class FederationHandler(BaseHandler):
             k: a.event_id for k, a in iteritems(auth_events) if k != event_key
         }
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
         current_state_ids = dict(current_state_ids)
 
         current_state_ids.update(state_updates)
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         prev_state_ids = dict(prev_state_ids)
 
         prev_state_ids.update({k: a.event_id for k, a in iteritems(auth_events)})
@@ -2625,7 +2625,7 @@ class FederationHandler(BaseHandler):
             event.content["third_party_invite"]["signed"]["token"],
         )
         original_invite = None
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         original_invite_id = prev_state_ids.get(key)
         if original_invite_id:
             original_invite = yield self.store.get_event(
@@ -2673,7 +2673,7 @@ class FederationHandler(BaseHandler):
         signed = event.content["third_party_invite"]["signed"]
         token = signed["token"]
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
 
         invite_event = None
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index bf9add7fe2..4ad752205f 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -515,7 +515,7 @@ class EventCreationHandler(object):
             # federation as well as those created locally. As of room v3, aliases events
             # can be created by users that are not in the room, therefore we have to
             # tolerate them in event_auth.check().
-            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            prev_state_ids = yield context.get_prev_state_ids()
             prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
             prev_event = (
                 yield self.store.get_event(prev_event_id, allow_none=True)
@@ -665,7 +665,7 @@ class EventCreationHandler(object):
         If so, returns the version of the event in context.
         Otherwise, returns None.
         """
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         prev_event_id = prev_state_ids.get((event.type, event.state_key))
         if not prev_event_id:
             return
@@ -914,7 +914,7 @@ class EventCreationHandler(object):
                 def is_inviter_member_event(e):
                     return e.type == EventTypes.Member and e.sender == event.sender
 
-                current_state_ids = yield context.get_current_state_ids(self.store)
+                current_state_ids = yield context.get_current_state_ids()
 
                 state_to_include_ids = [
                     e_id
@@ -967,7 +967,7 @@ class EventCreationHandler(object):
                 if original_event.room_id != event.room_id:
                     raise SynapseError(400, "Cannot redact event from a different room")
 
-            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            prev_state_ids = yield context.get_prev_state_ids()
             auth_events_ids = yield self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=True
             )
@@ -989,7 +989,7 @@ class EventCreationHandler(object):
                 event.internal_metadata.recheck_redaction = False
 
         if event.type == EventTypes.Create:
-            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            prev_state_ids = yield context.get_prev_state_ids()
             if prev_state_ids:
                 raise AuthError(403, "Changing the room create event is forbidden")
 
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index d3a1a7b4a6..89c9118b26 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -184,7 +184,7 @@ class RoomCreationHandler(BaseHandler):
             requester, tombstone_event, tombstone_context
         )
 
-        old_room_state = yield tombstone_context.get_current_state_ids(self.store)
+        old_room_state = yield tombstone_context.get_current_state_ids()
 
         # update any aliases
         yield self._move_aliases_to_new_room(
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 7b7270fc61..44c5e3239c 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -193,7 +193,7 @@ class RoomMemberHandler(object):
             requester, event, context, extra_users=[target], ratelimit=ratelimit
         )
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
 
         prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
 
@@ -601,7 +601,7 @@ class RoomMemberHandler(object):
         if prev_event is not None:
             return
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         if event.membership == Membership.JOIN:
             if requester.is_guest:
                 guest_can_join = yield self._can_guest_join(prev_state_ids)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 7881780760..7d9f5a38d9 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -116,7 +116,7 @@ class BulkPushRuleEvaluator(object):
 
     @defer.inlineCallbacks
     def _get_power_levels_and_sender_level(self, event, context):
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         pl_event_id = prev_state_ids.get(POWER_KEY)
         if pl_event_id:
             # fastpath: if there's a power level event, that's all we need, and
@@ -304,7 +304,7 @@ class RulesForRoom(object):
 
                 push_rules_delta_state_cache_metric.inc_hits()
             else:
-                current_state_ids = yield context.get_current_state_ids(self.store)
+                current_state_ids = yield context.get_current_state_ids()
                 push_rules_delta_state_cache_metric.inc_misses()
 
             push_rules_state_size_counter.inc(len(current_state_ids))
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 9af4e7e173..49a3251372 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -51,6 +51,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
         super(ReplicationFederationSendEventsRestServlet, self).__init__(hs)
 
         self.store = hs.get_datastore()
+        self.storage = hs.get_storage()
         self.clock = hs.get_clock()
         self.federation_handler = hs.get_handlers().federation_handler
 
@@ -100,7 +101,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
                 EventType = event_type_from_format_version(format_ver)
                 event = EventType(event_dict, internal_metadata, rejected_reason)
 
-                context = EventContext.deserialize(self.store, event_payload["context"])
+                context = EventContext.deserialize(
+                    self.storage, event_payload["context"]
+                )
 
                 event_and_contexts.append((event, context))
 
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 9bafd60b14..84b92f16ad 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -54,6 +54,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
 
         self.event_creation_handler = hs.get_event_creation_handler()
         self.store = hs.get_datastore()
+        self.storage = hs.get_storage()
         self.clock = hs.get_clock()
 
     @staticmethod
@@ -100,7 +101,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             event = EventType(event_dict, internal_metadata, rejected_reason)
 
             requester = Requester.deserialize(self.store, content["requester"])
-            context = EventContext.deserialize(self.store, content["context"])
+            context = EventContext.deserialize(self.storage, content["context"])
 
             ratelimit = content["ratelimit"]
             extra_users = [UserID.from_string(u) for u in content["extra_users"]]
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py
index 5ba13aa973..e2673ae073 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/data_stores/main/push_rule.py
@@ -244,7 +244,7 @@ class PushRulesWorkerStore(
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        current_state_ids = yield context.get_current_state_ids(self)
+        current_state_ids = yield context.get_current_state_ids()
         result = yield self._bulk_get_push_rules_for_room(
             event.room_id, state_group, current_state_ids, event=event
         )
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 92e3b9c512..70ff5751b6 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -477,7 +477,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        current_state_ids = yield context.get_current_state_ids(self)
+        current_state_ids = yield context.get_current_state_ids()
         result = yield self._get_joined_users_from_context(
             event.room_id, state_group, current_state_ids, event=event, context=context
         )
diff --git a/tests/test_state.py b/tests/test_state.py
index 176535947a..e0aae06be4 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -209,7 +209,7 @@ class StateTestCase(unittest.TestCase):
         ctx_c = context_store["C"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
+        prev_state_ids = yield ctx_d.get_prev_state_ids()
         self.assertEqual(2, len(prev_state_ids))
 
         self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
@@ -253,7 +253,7 @@ class StateTestCase(unittest.TestCase):
         ctx_c = context_store["C"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
+        prev_state_ids = yield ctx_d.get_prev_state_ids()
         self.assertSetEqual(
             {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()}
         )
@@ -312,7 +312,7 @@ class StateTestCase(unittest.TestCase):
         ctx_c = context_store["C"]
         ctx_e = context_store["E"]
 
-        prev_state_ids = yield ctx_e.get_prev_state_ids(self.store)
+        prev_state_ids = yield ctx_e.get_prev_state_ids()
         self.assertSetEqual(
             {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()}
         )
@@ -387,7 +387,7 @@ class StateTestCase(unittest.TestCase):
         ctx_b = context_store["B"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
+        prev_state_ids = yield ctx_d.get_prev_state_ids()
         self.assertSetEqual(
             {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()}
         )
@@ -419,10 +419,10 @@ class StateTestCase(unittest.TestCase):
 
         context = yield self.state.compute_event_context(event, old_state=old_state)
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
         self.assertCountEqual(
             (e.event_id for e in old_state), current_state_ids.values()
         )
@@ -442,10 +442,10 @@ class StateTestCase(unittest.TestCase):
 
         context = yield self.state.compute_event_context(event, old_state=old_state)
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
         self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
         self.assertCountEqual(
             (e.event_id for e in old_state + [event]), current_state_ids.values()
         )
@@ -479,7 +479,7 @@ class StateTestCase(unittest.TestCase):
 
         context = yield self.state.compute_event_context(event)
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(
             set([e.event_id for e in old_state]), set(current_state_ids.values())
@@ -511,7 +511,7 @@ class StateTestCase(unittest.TestCase):
 
         context = yield self.state.compute_event_context(event)
 
-        prev_state_ids = yield context.get_prev_state_ids(self.store)
+        prev_state_ids = yield context.get_prev_state_ids()
 
         self.assertEqual(
             set([e.event_id for e in old_state]), set(prev_state_ids.values())
@@ -552,7 +552,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(len(current_state_ids), 6)
 
@@ -594,7 +594,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(len(current_state_ids), 6)
 
@@ -649,7 +649,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(old_state_2[3].event_id, current_state_ids[("test1", "1")])
 
@@ -677,7 +677,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids(self.store)
+        current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(old_state_1[3].event_id, current_state_ids[("test1", "1")])
 
-- 
cgit 1.4.1


From 75d8f26ac85efd3816d454927f40b6e4c3032df1 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 20 Dec 2019 10:48:24 +0000
Subject: Split state groups into a separate data store (#6296)

---
 changelog.d/6245.misc                              |   1 +
 scripts/synapse_port_db                            |   8 +-
 synapse/config/database.py                         |  10 +-
 synapse/storage/data_stores/__init__.py            |   5 +
 synapse/storage/data_stores/main/events.py         | 157 ----
 .../main/schema/delta/23/drop_state_index.sql      |  16 -
 .../main/schema/delta/30/state_stream.sql          |  33 -
 .../main/schema/delta/32/remove_indices.sql        |   1 -
 .../main/schema/delta/35/add_state_index.sql       |  17 -
 .../data_stores/main/schema/delta/35/state.sql     |  22 -
 .../main/schema/delta/35/state_dedupe.sql          |  17 -
 .../main/schema/delta/47/state_group_seq.py        |  34 -
 .../main/schema/full_schemas/54/full.sql.postgres  |  52 --
 .../main/schema/full_schemas/54/full.sql.sqlite    |   6 -
 synapse/storage/data_stores/main/state.py          | 937 +--------------------
 synapse/storage/data_stores/state/__init__.py      |  16 +
 synapse/storage/data_stores/state/bg_updates.py    | 374 ++++++++
 .../state/schema/delta/23/drop_state_index.sql     |  16 +
 .../state/schema/delta/30/state_stream.sql         |  33 +
 .../state/schema/delta/32/remove_state_indices.sql |  19 +
 .../state/schema/delta/35/add_state_index.sql      |  17 +
 .../data_stores/state/schema/delta/35/state.sql    |  22 +
 .../state/schema/delta/35/state_dedupe.sql         |  17 +
 .../state/schema/delta/47/state_group_seq.py       |  34 +
 .../state/schema/delta/56/state_group_room_idx.sql |  17 +
 .../state/schema/full_schemas/54/full.sql          |  37 +
 .../schema/full_schemas/54/sequence.sql.postgres   |  21 +
 synapse/storage/data_stores/state/store.py         | 640 ++++++++++++++
 synapse/storage/persist_events.py                  |   2 +-
 synapse/storage/prepare_database.py                |   2 +-
 synapse/storage/purge_events.py                    |   4 +-
 synapse/storage/state.py                           |  14 +-
 tests/storage/test_state.py                        |   2 +-
 tests/utils.py                                     |   2 +-
 34 files changed, 1298 insertions(+), 1307 deletions(-)
 create mode 100644 changelog.d/6245.misc
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/23/drop_state_index.sql
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/30/state_stream.sql
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/35/add_state_index.sql
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/35/state.sql
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/35/state_dedupe.sql
 delete mode 100644 synapse/storage/data_stores/main/schema/delta/47/state_group_seq.py
 create mode 100644 synapse/storage/data_stores/state/__init__.py
 create mode 100644 synapse/storage/data_stores/state/bg_updates.py
 create mode 100644 synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/30/state_stream.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/35/state.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql
 create mode 100644 synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py
 create mode 100644 synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql
 create mode 100644 synapse/storage/data_stores/state/schema/full_schemas/54/full.sql
 create mode 100644 synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres
 create mode 100644 synapse/storage/data_stores/state/store.py

(limited to 'changelog.d')

diff --git a/changelog.d/6245.misc b/changelog.d/6245.misc
new file mode 100644
index 0000000000..a3e6b8296e
--- /dev/null
+++ b/changelog.d/6245.misc
@@ -0,0 +1 @@
+Split out state storage into separate data store.
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 5b5368988c..eb927f2094 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -51,11 +51,12 @@ from synapse.storage.data_stores.main.registration import (
 from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
 from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
 from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
-from synapse.storage.data_stores.main.state import StateBackgroundUpdateStore
+from synapse.storage.data_stores.main.state import MainStateBackgroundUpdateStore
 from synapse.storage.data_stores.main.stats import StatsStore
 from synapse.storage.data_stores.main.user_directory import (
     UserDirectoryBackgroundUpdateStore,
 )
+from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
 from synapse.storage.database import Database, make_conn
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
@@ -138,6 +139,7 @@ class Store(
     RoomMemberBackgroundUpdateStore,
     SearchBackgroundUpdateStore,
     StateBackgroundUpdateStore,
+    MainStateBackgroundUpdateStore,
     UserDirectoryBackgroundUpdateStore,
     StatsStore,
 ):
@@ -496,9 +498,7 @@ class Porter(object):
     def run(self):
         try:
             self.sqlite_store = yield self.build_db_store(
-                DatabaseConnectionConfig(
-                    "master", self.sqlite_config, data_stores=["main"]
-                )
+                DatabaseConnectionConfig("master-sqlite", self.sqlite_config)
             )
 
             # Check if all background updates are done, abort if not.
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 5f2f3c7cfd..134824789c 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -34,10 +34,12 @@ class DatabaseConnectionConfig:
             module name, and `args` for the args to give to the database
             connector.
         data_stores: The list of data stores that should be provisioned on the
-            database.
+            database. Defaults to all data stores.
     """
 
-    def __init__(self, name: str, db_config: dict, data_stores: List[str]):
+    def __init__(
+        self, name: str, db_config: dict, data_stores: List[str] = ["main", "state"]
+    ):
         if db_config["name"] not in ("sqlite3", "psycopg2"):
             raise ConfigError("Unsupported database type %r" % (db_config["name"],))
 
@@ -62,9 +64,7 @@ class DatabaseConfig(Config):
         if database_config is None:
             database_config = {"name": "sqlite3", "args": {}}
 
-        self.databases = [
-            DatabaseConnectionConfig("master", database_config, data_stores=["main"])
-        ]
+        self.databases = [DatabaseConnectionConfig("master", database_config)]
 
         self.set_databasepath(config.get("database_path"))
 
diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py
index 0983e059c0..d20df5f076 100644
--- a/synapse/storage/data_stores/__init__.py
+++ b/synapse/storage/data_stores/__init__.py
@@ -15,6 +15,7 @@
 
 import logging
 
+from synapse.storage.data_stores.state import StateGroupDataStore
 from synapse.storage.database import Database, make_conn
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
@@ -55,6 +56,10 @@ class DataStores(object):
                     logger.info("Starting 'main' data store")
                     self.main = main_store_class(database, db_conn, hs)
 
+                if "state" in database_config.data_stores:
+                    logger.info("Starting 'state' data store")
+                    self.state = StateGroupDataStore(database, db_conn, hs)
+
                 db_conn.commit()
 
                 self.databases.append(database)
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 998bba1aad..58f35d7f56 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -1757,163 +1757,6 @@ class EventsStore(
 
         return state_groups
 
-    def purge_unreferenced_state_groups(
-        self, room_id: str, state_groups_to_delete
-    ) -> defer.Deferred:
-        """Deletes no longer referenced state groups and de-deltas any state
-        groups that reference them.
-
-        Args:
-            room_id: The room the state groups belong to (must all be in the
-                same room).
-            state_groups_to_delete (Collection[int]): Set of all state groups
-                to delete.
-        """
-
-        return self.db.runInteraction(
-            "purge_unreferenced_state_groups",
-            self._purge_unreferenced_state_groups,
-            room_id,
-            state_groups_to_delete,
-        )
-
-    def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
-        logger.info(
-            "[purge] found %i state groups to delete", len(state_groups_to_delete)
-        )
-
-        rows = self.db.simple_select_many_txn(
-            txn,
-            table="state_group_edges",
-            column="prev_state_group",
-            iterable=state_groups_to_delete,
-            keyvalues={},
-            retcols=("state_group",),
-        )
-
-        remaining_state_groups = set(
-            row["state_group"]
-            for row in rows
-            if row["state_group"] not in state_groups_to_delete
-        )
-
-        logger.info(
-            "[purge] de-delta-ing %i remaining state groups",
-            len(remaining_state_groups),
-        )
-
-        # Now we turn the state groups that reference to-be-deleted state
-        # groups to non delta versions.
-        for sg in remaining_state_groups:
-            logger.info("[purge] de-delta-ing remaining state group %s", sg)
-            curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
-            curr_state = curr_state[sg]
-
-            self.db.simple_delete_txn(
-                txn, table="state_groups_state", keyvalues={"state_group": sg}
-            )
-
-            self.db.simple_delete_txn(
-                txn, table="state_group_edges", keyvalues={"state_group": sg}
-            )
-
-            self.db.simple_insert_many_txn(
-                txn,
-                table="state_groups_state",
-                values=[
-                    {
-                        "state_group": sg,
-                        "room_id": room_id,
-                        "type": key[0],
-                        "state_key": key[1],
-                        "event_id": state_id,
-                    }
-                    for key, state_id in iteritems(curr_state)
-                ],
-            )
-
-        logger.info("[purge] removing redundant state groups")
-        txn.executemany(
-            "DELETE FROM state_groups_state WHERE state_group = ?",
-            ((sg,) for sg in state_groups_to_delete),
-        )
-        txn.executemany(
-            "DELETE FROM state_groups WHERE id = ?",
-            ((sg,) for sg in state_groups_to_delete),
-        )
-
-    @defer.inlineCallbacks
-    def get_previous_state_groups(self, state_groups):
-        """Fetch the previous groups of the given state groups.
-
-        Args:
-            state_groups (Iterable[int])
-
-        Returns:
-            Deferred[dict[int, int]]: mapping from state group to previous
-            state group.
-        """
-
-        rows = yield self.db.simple_select_many_batch(
-            table="state_group_edges",
-            column="prev_state_group",
-            iterable=state_groups,
-            keyvalues={},
-            retcols=("prev_state_group", "state_group"),
-            desc="get_previous_state_groups",
-        )
-
-        return {row["state_group"]: row["prev_state_group"] for row in rows}
-
-    def purge_room_state(self, room_id, state_groups_to_delete):
-        """Deletes all record of a room from state tables
-
-        Args:
-            room_id (str):
-            state_groups_to_delete (list[int]): State groups to delete
-        """
-
-        return self.db.runInteraction(
-            "purge_room_state",
-            self._purge_room_state_txn,
-            room_id,
-            state_groups_to_delete,
-        )
-
-    def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
-        # first we have to delete the state groups states
-        logger.info("[purge] removing %s from state_groups_state", room_id)
-
-        self.db.simple_delete_many_txn(
-            txn,
-            table="state_groups_state",
-            column="state_group",
-            iterable=state_groups_to_delete,
-            keyvalues={},
-        )
-
-        # ... and the state group edges
-        logger.info("[purge] removing %s from state_group_edges", room_id)
-
-        self.db.simple_delete_many_txn(
-            txn,
-            table="state_group_edges",
-            column="state_group",
-            iterable=state_groups_to_delete,
-            keyvalues={},
-        )
-
-        # ... and the state groups
-        logger.info("[purge] removing %s from state_groups", room_id)
-
-        self.db.simple_delete_many_txn(
-            txn,
-            table="state_groups",
-            column="id",
-            iterable=state_groups_to_delete,
-            keyvalues={},
-        )
-
     async def is_event_after(self, event_id1, event_id2):
         """Returns True if event_id1 is after event_id2 in the stream
         """
diff --git a/synapse/storage/data_stores/main/schema/delta/23/drop_state_index.sql b/synapse/storage/data_stores/main/schema/delta/23/drop_state_index.sql
deleted file mode 100644
index ae09fa0065..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/23/drop_state_index.sql
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright 2015, 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-DROP INDEX IF EXISTS state_groups_state_tuple;
diff --git a/synapse/storage/data_stores/main/schema/delta/30/state_stream.sql b/synapse/storage/data_stores/main/schema/delta/30/state_stream.sql
deleted file mode 100644
index e85699e82e..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/30/state_stream.sql
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/* We used to create a table called current_state_resets, but this is no
- * longer used and is removed in delta 54.
- */
-
-/* The outlier events that have aquired a state group typically through
- * backfill. This is tracked separately to the events table, as assigning a
- * state group change the position of the existing event in the stream
- * ordering.
- * However since a stream_ordering is assigned in persist_event for the
- * (event, state) pair, we can use that stream_ordering to identify when
- * the new state was assigned for the event.
- */
-CREATE TABLE IF NOT EXISTS ex_outlier_stream(
-    event_stream_ordering BIGINT PRIMARY KEY NOT NULL,
-    event_id TEXT NOT NULL,
-    state_group BIGINT NOT NULL
-);
diff --git a/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql b/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql
index 4219cdd06a..2de50d408c 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql
+++ b/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql
@@ -20,7 +20,6 @@ DROP INDEX IF EXISTS events_room_id; -- Prefix of events_room_stream
 DROP INDEX IF EXISTS events_order; -- Prefix of events_order_topo_stream_room
 DROP INDEX IF EXISTS events_topological_ordering; -- Prefix of events_order_topo_stream_room
 DROP INDEX IF EXISTS events_stream_ordering; -- Duplicate of PRIMARY KEY
-DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY
 DROP INDEX IF EXISTS event_to_state_groups_id; -- Duplicate of PRIMARY KEY
 DROP INDEX IF EXISTS event_push_actions_room_id_event_id_user_id_profile_tag; -- Duplicate of UNIQUE CONSTRAINT
 
diff --git a/synapse/storage/data_stores/main/schema/delta/35/add_state_index.sql b/synapse/storage/data_stores/main/schema/delta/35/add_state_index.sql
deleted file mode 100644
index 33980d02f0..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/35/add_state_index.sql
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-INSERT into background_updates (update_name, progress_json, depends_on)
-    VALUES ('state_group_state_type_index', '{}', 'state_group_state_deduplication');
diff --git a/synapse/storage/data_stores/main/schema/delta/35/state.sql b/synapse/storage/data_stores/main/schema/delta/35/state.sql
deleted file mode 100644
index 0f1fa68a89..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/35/state.sql
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE state_group_edges(
-    state_group BIGINT NOT NULL,
-    prev_state_group BIGINT NOT NULL
-);
-
-CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
-CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
diff --git a/synapse/storage/data_stores/main/schema/delta/35/state_dedupe.sql b/synapse/storage/data_stores/main/schema/delta/35/state_dedupe.sql
deleted file mode 100644
index 97e5067ef4..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/35/state_dedupe.sql
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-INSERT into background_updates (update_name, progress_json)
-    VALUES ('state_group_state_deduplication', '{}');
diff --git a/synapse/storage/data_stores/main/schema/delta/47/state_group_seq.py b/synapse/storage/data_stores/main/schema/delta/47/state_group_seq.py
deleted file mode 100644
index 9fd1ccf6f7..0000000000
--- a/synapse/storage/data_stores/main/schema/delta/47/state_group_seq.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.storage.engines import PostgresEngine
-
-
-def run_create(cur, database_engine, *args, **kwargs):
-    if isinstance(database_engine, PostgresEngine):
-        # if we already have some state groups, we want to start making new
-        # ones with a higher id.
-        cur.execute("SELECT max(id) FROM state_groups")
-        row = cur.fetchone()
-
-        if row[0] is None:
-            start_val = 1
-        else:
-            start_val = row[0] + 1
-
-        cur.execute("CREATE SEQUENCE state_group_id_seq START WITH %s", (start_val,))
-
-
-def run_upgrade(*args, **kwargs):
-    pass
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
index 4ad2929f32..889a9a0ce4 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
+++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
@@ -975,40 +975,6 @@ CREATE TABLE state_events (
 
 
 
-CREATE TABLE state_group_edges (
-    state_group bigint NOT NULL,
-    prev_state_group bigint NOT NULL
-);
-
-
-
-CREATE SEQUENCE state_group_id_seq
-    START WITH 1
-    INCREMENT BY 1
-    NO MINVALUE
-    NO MAXVALUE
-    CACHE 1;
-
-
-
-CREATE TABLE state_groups (
-    id bigint NOT NULL,
-    room_id text NOT NULL,
-    event_id text NOT NULL
-);
-
-
-
-CREATE TABLE state_groups_state (
-    state_group bigint NOT NULL,
-    room_id text NOT NULL,
-    type text NOT NULL,
-    state_key text NOT NULL,
-    event_id text NOT NULL
-);
-
-
-
 CREATE TABLE stats_stream_pos (
     lock character(1) DEFAULT 'X'::bpchar NOT NULL,
     stream_id bigint,
@@ -1482,12 +1448,6 @@ ALTER TABLE ONLY state_events
     ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
 
 
-
-ALTER TABLE ONLY state_groups
-    ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id);
-
-
-
 ALTER TABLE ONLY stats_stream_pos
     ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock);
 
@@ -1928,18 +1888,6 @@ CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts);
 
 
 
-CREATE INDEX state_group_edges_idx ON state_group_edges USING btree (state_group);
-
-
-
-CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group);
-
-
-
-CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key);
-
-
-
 CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
 
 
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
index bad33291e7..a0411ede7e 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
+++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
@@ -42,8 +42,6 @@ CREATE INDEX ev_edges_id ON event_edges(event_id);
 CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
 CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
 CREATE INDEX room_depth_room ON room_depth(room_id);
-CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
-CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL );
 CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
 CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) );
 CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
@@ -120,9 +118,6 @@ CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL );
 CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT);
 CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id );
 CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id );
-CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL );
-CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
-CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
 CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
 CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
 CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
@@ -254,6 +249,5 @@ CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
 CREATE INDEX users_creation_ts ON users (creation_ts);
 CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
 CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
-CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key);
 CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
 CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index dcc6b43cdf..0dc39f139c 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -17,8 +17,7 @@ import logging
 from collections import namedtuple
 from typing import Iterable, Tuple
 
-from six import iteritems, itervalues
-from six.moves import range
+from six import iteritems
 
 from twisted.internet import defer
 
@@ -29,11 +28,9 @@ from synapse.events.snapshot import EventContext
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.database import Database
-from synapse.storage.engines import PostgresEngine
 from synapse.storage.state import StateFilter
-from synapse.util.caches import get_cache_factor_for, intern_string
+from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import cached, cachedList
-from synapse.util.caches.dictionary_cache import DictionaryCache
 from synapse.util.stringutils import to_ascii
 
 logger = logging.getLogger(__name__)
@@ -55,207 +52,14 @@ class _GetStateGroupDelta(
         return len(self.delta_ids) if self.delta_ids else 0
 
 
-class StateGroupBackgroundUpdateStore(SQLBaseStore):
-    """Defines functions related to state groups needed to run the state backgroud
-    updates.
-    """
-
-    def _count_state_group_hops_txn(self, txn, state_group):
-        """Given a state group, count how many hops there are in the tree.
-
-        This is used to ensure the delta chains don't get too long.
-        """
-        if isinstance(self.database_engine, PostgresEngine):
-            sql = """
-                WITH RECURSIVE state(state_group) AS (
-                    VALUES(?::bigint)
-                    UNION ALL
-                    SELECT prev_state_group FROM state_group_edges e, state s
-                    WHERE s.state_group = e.state_group
-                )
-                SELECT count(*) FROM state;
-            """
-
-            txn.execute(sql, (state_group,))
-            row = txn.fetchone()
-            if row and row[0]:
-                return row[0]
-            else:
-                return 0
-        else:
-            # We don't use WITH RECURSIVE on sqlite3 as there are distributions
-            # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
-            next_group = state_group
-            count = 0
-
-            while next_group:
-                next_group = self.db.simple_select_one_onecol_txn(
-                    txn,
-                    table="state_group_edges",
-                    keyvalues={"state_group": next_group},
-                    retcol="prev_state_group",
-                    allow_none=True,
-                )
-                if next_group:
-                    count += 1
-
-            return count
-
-    def _get_state_groups_from_groups_txn(
-        self, txn, groups, state_filter=StateFilter.all()
-    ):
-        results = {group: {} for group in groups}
-
-        where_clause, where_args = state_filter.make_sql_filter_clause()
-
-        # Unless the filter clause is empty, we're going to append it after an
-        # existing where clause
-        if where_clause:
-            where_clause = " AND (%s)" % (where_clause,)
-
-        if isinstance(self.database_engine, PostgresEngine):
-            # Temporarily disable sequential scans in this transaction. This is
-            # a temporary hack until we can add the right indices in
-            txn.execute("SET LOCAL enable_seqscan=off")
-
-            # The below query walks the state_group tree so that the "state"
-            # table includes all state_groups in the tree. It then joins
-            # against `state_groups_state` to fetch the latest state.
-            # It assumes that previous state groups are always numerically
-            # lesser.
-            # The PARTITION is used to get the event_id in the greatest state
-            # group for the given type, state_key.
-            # This may return multiple rows per (type, state_key), but last_value
-            # should be the same.
-            sql = """
-                WITH RECURSIVE state(state_group) AS (
-                    VALUES(?::bigint)
-                    UNION ALL
-                    SELECT prev_state_group FROM state_group_edges e, state s
-                    WHERE s.state_group = e.state_group
-                )
-                SELECT DISTINCT type, state_key, last_value(event_id) OVER (
-                    PARTITION BY type, state_key ORDER BY state_group ASC
-                    ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
-                ) AS event_id FROM state_groups_state
-                WHERE state_group IN (
-                    SELECT state_group FROM state
-                )
-            """
-
-            for group in groups:
-                args = [group]
-                args.extend(where_args)
-
-                txn.execute(sql + where_clause, args)
-                for row in txn:
-                    typ, state_key, event_id = row
-                    key = (typ, state_key)
-                    results[group][key] = event_id
-        else:
-            max_entries_returned = state_filter.max_entries_returned()
-
-            # We don't use WITH RECURSIVE on sqlite3 as there are distributions
-            # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
-            for group in groups:
-                next_group = group
-
-                while next_group:
-                    # We did this before by getting the list of group ids, and
-                    # then passing that list to sqlite to get latest event for
-                    # each (type, state_key). However, that was terribly slow
-                    # without the right indices (which we can't add until
-                    # after we finish deduping state, which requires this func)
-                    args = [next_group]
-                    args.extend(where_args)
-
-                    txn.execute(
-                        "SELECT type, state_key, event_id FROM state_groups_state"
-                        " WHERE state_group = ? " + where_clause,
-                        args,
-                    )
-                    results[group].update(
-                        ((typ, state_key), event_id)
-                        for typ, state_key, event_id in txn
-                        if (typ, state_key) not in results[group]
-                    )
-
-                    # If the number of entries in the (type,state_key)->event_id dict
-                    # matches the number of (type,state_keys) types we were searching
-                    # for, then we must have found them all, so no need to go walk
-                    # further down the tree... UNLESS our types filter contained
-                    # wildcards (i.e. Nones) in which case we have to do an exhaustive
-                    # search
-                    if (
-                        max_entries_returned is not None
-                        and len(results[group]) == max_entries_returned
-                    ):
-                        break
-
-                    next_group = self.db.simple_select_one_onecol_txn(
-                        txn,
-                        table="state_group_edges",
-                        keyvalues={"state_group": next_group},
-                        retcol="prev_state_group",
-                        allow_none=True,
-                    )
-
-        return results
-
-
 # this inherits from EventsWorkerStore because it calls self.get_events
-class StateGroupWorkerStore(
-    EventsWorkerStore, StateGroupBackgroundUpdateStore, SQLBaseStore
-):
+class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
     """The parts of StateGroupStore that can be called from workers.
     """
 
-    STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
-    STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
-    CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
-
     def __init__(self, database: Database, db_conn, hs):
         super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
 
-        # Originally the state store used a single DictionaryCache to cache the
-        # event IDs for the state types in a given state group to avoid hammering
-        # on the state_group* tables.
-        #
-        # The point of using a DictionaryCache is that it can cache a subset
-        # of the state events for a given state group (i.e. a subset of the keys for a
-        # given dict which is an entry in the cache for a given state group ID).
-        #
-        # However, this poses problems when performing complicated queries
-        # on the store - for instance: "give me all the state for this group, but
-        # limit members to this subset of users", as DictionaryCache's API isn't
-        # rich enough to say "please cache any of these fields, apart from this subset".
-        # This is problematic when lazy loading members, which requires this behaviour,
-        # as without it the cache has no choice but to speculatively load all
-        # state events for the group, which negates the efficiency being sought.
-        #
-        # Rather than overcomplicating DictionaryCache's API, we instead split the
-        # state_group_cache into two halves - one for tracking non-member events,
-        # and the other for tracking member_events.  This means that lazy loading
-        # queries can be made in a cache-friendly manner by querying both caches
-        # separately and then merging the result.  So for the example above, you
-        # would query the members cache for a specific subset of state keys
-        # (which DictionaryCache will handle efficiently and fine) and the non-members
-        # cache for all state (which DictionaryCache will similarly handle fine)
-        # and then just merge the results together.
-        #
-        # We size the non-members cache to be smaller than the members cache as the
-        # vast majority of state in Matrix (today) is member events.
-
-        self._state_group_cache = DictionaryCache(
-            "*stateGroupCache*",
-            # TODO: this hasn't been tuned yet
-            50000 * get_cache_factor_for("stateGroupCache"),
-        )
-        self._state_group_members_cache = DictionaryCache(
-            "*stateGroupMembersCache*",
-            500000 * get_cache_factor_for("stateGroupMembersCache"),
-        )
-
     @defer.inlineCallbacks
     def get_room_version(self, room_id):
         """Get the room_version of a given room
@@ -431,229 +235,6 @@ class StateGroupWorkerStore(
 
         return event.content.get("canonical_alias")
 
-    @cached(max_entries=10000, iterable=True)
-    def get_state_group_delta(self, state_group):
-        """Given a state group try to return a previous group and a delta between
-        the old and the new.
-
-        Returns:
-            (prev_group, delta_ids), where both may be None.
-        """
-
-        def _get_state_group_delta_txn(txn):
-            prev_group = self.db.simple_select_one_onecol_txn(
-                txn,
-                table="state_group_edges",
-                keyvalues={"state_group": state_group},
-                retcol="prev_state_group",
-                allow_none=True,
-            )
-
-            if not prev_group:
-                return _GetStateGroupDelta(None, None)
-
-            delta_ids = self.db.simple_select_list_txn(
-                txn,
-                table="state_groups_state",
-                keyvalues={"state_group": state_group},
-                retcols=("type", "state_key", "event_id"),
-            )
-
-            return _GetStateGroupDelta(
-                prev_group,
-                {(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
-            )
-
-        return self.db.runInteraction(
-            "get_state_group_delta", _get_state_group_delta_txn
-        )
-
-    @defer.inlineCallbacks
-    def get_state_groups_ids(self, _room_id, event_ids):
-        """Get the event IDs of all the state for the state groups for the given events
-
-        Args:
-            _room_id (str): id of the room for these events
-            event_ids (iterable[str]): ids of the events
-
-        Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
-        """
-        if not event_ids:
-            return {}
-
-        event_to_groups = yield self._get_state_group_for_events(event_ids)
-
-        groups = set(itervalues(event_to_groups))
-        group_to_state = yield self._get_state_for_groups(groups)
-
-        return group_to_state
-
-    @defer.inlineCallbacks
-    def get_state_ids_for_group(self, state_group):
-        """Get the event IDs of all the state in the given state group
-
-        Args:
-            state_group (int)
-
-        Returns:
-            Deferred[dict]: Resolves to a map of (type, state_key) -> event_id
-        """
-        group_to_state = yield self._get_state_for_groups((state_group,))
-
-        return group_to_state[state_group]
-
-    @defer.inlineCallbacks
-    def get_state_groups(self, room_id, event_ids):
-        """ Get the state groups for the given list of event_ids
-
-        Returns:
-            Deferred[dict[int, list[EventBase]]]:
-                dict of state_group_id -> list of state events.
-        """
-        if not event_ids:
-            return {}
-
-        group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
-
-        state_event_map = yield self.get_events(
-            [
-                ev_id
-                for group_ids in itervalues(group_to_ids)
-                for ev_id in itervalues(group_ids)
-            ],
-            get_prev_content=False,
-        )
-
-        return {
-            group: [
-                state_event_map[v]
-                for v in itervalues(event_id_map)
-                if v in state_event_map
-            ]
-            for group, event_id_map in iteritems(group_to_ids)
-        }
-
-    @defer.inlineCallbacks
-    def _get_state_groups_from_groups(self, groups, state_filter):
-        """Returns the state groups for a given set of groups, filtering on
-        types of state events.
-
-        Args:
-            groups(list[int]): list of state group IDs to query
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-        Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
-        """
-        results = {}
-
-        chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
-        for chunk in chunks:
-            res = yield self.db.runInteraction(
-                "_get_state_groups_from_groups",
-                self._get_state_groups_from_groups_txn,
-                chunk,
-                state_filter,
-            )
-            results.update(res)
-
-        return results
-
-    @defer.inlineCallbacks
-    def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
-        """Given a list of event_ids and type tuples, return a list of state
-        dicts for each event.
-
-        Args:
-            event_ids (list[string])
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
-        """
-        event_to_groups = yield self._get_state_group_for_events(event_ids)
-
-        groups = set(itervalues(event_to_groups))
-        group_to_state = yield self._get_state_for_groups(groups, state_filter)
-
-        state_event_map = yield self.get_events(
-            [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
-            get_prev_content=False,
-        )
-
-        event_to_state = {
-            event_id: {
-                k: state_event_map[v]
-                for k, v in iteritems(group_to_state[group])
-                if v in state_event_map
-            }
-            for event_id, group in iteritems(event_to_groups)
-        }
-
-        return {event: event_to_state[event] for event in event_ids}
-
-    @defer.inlineCallbacks
-    def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
-        """
-        Get the state dicts corresponding to a list of events, containing the event_ids
-        of the state events (as opposed to the events themselves)
-
-        Args:
-            event_ids(list(str)): events whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            A deferred dict from event_id -> (type, state_key) -> event_id
-        """
-        event_to_groups = yield self._get_state_group_for_events(event_ids)
-
-        groups = set(itervalues(event_to_groups))
-        group_to_state = yield self._get_state_for_groups(groups, state_filter)
-
-        event_to_state = {
-            event_id: group_to_state[group]
-            for event_id, group in iteritems(event_to_groups)
-        }
-
-        return {event: event_to_state[event] for event in event_ids}
-
-    @defer.inlineCallbacks
-    def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
-        """
-        Get the state dict corresponding to a particular event
-
-        Args:
-            event_id(str): event whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            A deferred dict from (type, state_key) -> state_event
-        """
-        state_map = yield self.get_state_for_events([event_id], state_filter)
-        return state_map[event_id]
-
-    @defer.inlineCallbacks
-    def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
-        """
-        Get the state dict corresponding to a particular event
-
-        Args:
-            event_id(str): event whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            A deferred dict from (type, state_key) -> state_event
-        """
-        state_map = yield self.get_state_ids_for_events([event_id], state_filter)
-        return state_map[event_id]
-
     @cached(max_entries=50000)
     def _get_state_group_for_event(self, event_id):
         return self.db.simple_select_one_onecol(
@@ -684,329 +265,6 @@ class StateGroupWorkerStore(
 
         return {row["event_id"]: row["state_group"] for row in rows}
 
-    def _get_state_for_group_using_cache(self, cache, group, state_filter):
-        """Checks if group is in cache. See `_get_state_for_groups`
-
-        Args:
-            cache(DictionaryCache): the state group cache to use
-            group(int): The state group to lookup
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns 2-tuple (`state_dict`, `got_all`).
-        `got_all` is a bool indicating if we successfully retrieved all
-        requests state from the cache, if False we need to query the DB for the
-        missing state.
-        """
-        is_all, known_absent, state_dict_ids = cache.get(group)
-
-        if is_all or state_filter.is_full():
-            # Either we have everything or want everything, either way
-            # `is_all` tells us whether we've gotten everything.
-            return state_filter.filter_state(state_dict_ids), is_all
-
-        # tracks whether any of our requested types are missing from the cache
-        missing_types = False
-
-        if state_filter.has_wildcards():
-            # We don't know if we fetched all the state keys for the types in
-            # the filter that are wildcards, so we have to assume that we may
-            # have missed some.
-            missing_types = True
-        else:
-            # There aren't any wild cards, so `concrete_types()` returns the
-            # complete list of event types we're wanting.
-            for key in state_filter.concrete_types():
-                if key not in state_dict_ids and key not in known_absent:
-                    missing_types = True
-                    break
-
-        return state_filter.filter_state(state_dict_ids), not missing_types
-
-    @defer.inlineCallbacks
-    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
-        """Gets the state at each of a list of state groups, optionally
-        filtering by type/state_key
-
-        Args:
-            groups (iterable[int]): list of state groups for which we want
-                to get the state.
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-        Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
-        """
-
-        member_filter, non_member_filter = state_filter.get_member_split()
-
-        # Now we look them up in the member and non-member caches
-        (
-            non_member_state,
-            incomplete_groups_nm,
-        ) = yield self._get_state_for_groups_using_cache(
-            groups, self._state_group_cache, state_filter=non_member_filter
-        )
-
-        (
-            member_state,
-            incomplete_groups_m,
-        ) = yield self._get_state_for_groups_using_cache(
-            groups, self._state_group_members_cache, state_filter=member_filter
-        )
-
-        state = dict(non_member_state)
-        for group in groups:
-            state[group].update(member_state[group])
-
-        # Now fetch any missing groups from the database
-
-        incomplete_groups = incomplete_groups_m | incomplete_groups_nm
-
-        if not incomplete_groups:
-            return state
-
-        cache_sequence_nm = self._state_group_cache.sequence
-        cache_sequence_m = self._state_group_members_cache.sequence
-
-        # Help the cache hit ratio by expanding the filter a bit
-        db_state_filter = state_filter.return_expanded()
-
-        group_to_state_dict = yield self._get_state_groups_from_groups(
-            list(incomplete_groups), state_filter=db_state_filter
-        )
-
-        # Now lets update the caches
-        self._insert_into_cache(
-            group_to_state_dict,
-            db_state_filter,
-            cache_seq_num_members=cache_sequence_m,
-            cache_seq_num_non_members=cache_sequence_nm,
-        )
-
-        # And finally update the result dict, by filtering out any extra
-        # stuff we pulled out of the database.
-        for group, group_state_dict in iteritems(group_to_state_dict):
-            # We just replace any existing entries, as we will have loaded
-            # everything we need from the database anyway.
-            state[group] = state_filter.filter_state(group_state_dict)
-
-        return state
-
-    def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
-        """Gets the state at each of a list of state groups, optionally
-        filtering by type/state_key, querying from a specific cache.
-
-        Args:
-            groups (iterable[int]): list of state groups for which we want
-                to get the state.
-            cache (DictionaryCache): the cache of group ids to state dicts which
-                we will pass through - either the normal state cache or the specific
-                members state cache.
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
-            dict of state_group_id -> (dict of (type, state_key) -> event id)
-            of entries in the cache, and the state group ids either missing
-            from the cache or incomplete.
-        """
-        results = {}
-        incomplete_groups = set()
-        for group in set(groups):
-            state_dict_ids, got_all = self._get_state_for_group_using_cache(
-                cache, group, state_filter
-            )
-            results[group] = state_dict_ids
-
-            if not got_all:
-                incomplete_groups.add(group)
-
-        return results, incomplete_groups
-
-    def _insert_into_cache(
-        self,
-        group_to_state_dict,
-        state_filter,
-        cache_seq_num_members,
-        cache_seq_num_non_members,
-    ):
-        """Inserts results from querying the database into the relevant cache.
-
-        Args:
-            group_to_state_dict (dict): The new entries pulled from database.
-                Map from state group to state dict
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-            cache_seq_num_members (int): Sequence number of member cache since
-                last lookup in cache
-            cache_seq_num_non_members (int): Sequence number of member cache since
-                last lookup in cache
-        """
-
-        # We need to work out which types we've fetched from the DB for the
-        # member vs non-member caches. This should be as accurate as possible,
-        # but can be an underestimate (e.g. when we have wild cards)
-
-        member_filter, non_member_filter = state_filter.get_member_split()
-        if member_filter.is_full():
-            # We fetched all member events
-            member_types = None
-        else:
-            # `concrete_types()` will only return a subset when there are wild
-            # cards in the filter, but that's fine.
-            member_types = member_filter.concrete_types()
-
-        if non_member_filter.is_full():
-            # We fetched all non member events
-            non_member_types = None
-        else:
-            non_member_types = non_member_filter.concrete_types()
-
-        for group, group_state_dict in iteritems(group_to_state_dict):
-            state_dict_members = {}
-            state_dict_non_members = {}
-
-            for k, v in iteritems(group_state_dict):
-                if k[0] == EventTypes.Member:
-                    state_dict_members[k] = v
-                else:
-                    state_dict_non_members[k] = v
-
-            self._state_group_members_cache.update(
-                cache_seq_num_members,
-                key=group,
-                value=state_dict_members,
-                fetched_keys=member_types,
-            )
-
-            self._state_group_cache.update(
-                cache_seq_num_non_members,
-                key=group,
-                value=state_dict_non_members,
-                fetched_keys=non_member_types,
-            )
-
-    def store_state_group(
-        self, event_id, room_id, prev_group, delta_ids, current_state_ids
-    ):
-        """Store a new set of state, returning a newly assigned state group.
-
-        Args:
-            event_id (str): The event ID for which the state was calculated
-            room_id (str)
-            prev_group (int|None): A previous state group for the room, optional.
-            delta_ids (dict|None): The delta between state at `prev_group` and
-                `current_state_ids`, if `prev_group` was given. Same format as
-                `current_state_ids`.
-            current_state_ids (dict): The state to store. Map of (type, state_key)
-                to event_id.
-
-        Returns:
-            Deferred[int]: The state group ID
-        """
-
-        def _store_state_group_txn(txn):
-            if current_state_ids is None:
-                # AFAIK, this can never happen
-                raise Exception("current_state_ids cannot be None")
-
-            state_group = self.database_engine.get_next_state_group_id(txn)
-
-            self.db.simple_insert_txn(
-                txn,
-                table="state_groups",
-                values={"id": state_group, "room_id": room_id, "event_id": event_id},
-            )
-
-            # We persist as a delta if we can, while also ensuring the chain
-            # of deltas isn't tooo long, as otherwise read performance degrades.
-            if prev_group:
-                is_in_db = self.db.simple_select_one_onecol_txn(
-                    txn,
-                    table="state_groups",
-                    keyvalues={"id": prev_group},
-                    retcol="id",
-                    allow_none=True,
-                )
-                if not is_in_db:
-                    raise Exception(
-                        "Trying to persist state with unpersisted prev_group: %r"
-                        % (prev_group,)
-                    )
-
-                potential_hops = self._count_state_group_hops_txn(txn, prev_group)
-            if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
-                self.db.simple_insert_txn(
-                    txn,
-                    table="state_group_edges",
-                    values={"state_group": state_group, "prev_state_group": prev_group},
-                )
-
-                self.db.simple_insert_many_txn(
-                    txn,
-                    table="state_groups_state",
-                    values=[
-                        {
-                            "state_group": state_group,
-                            "room_id": room_id,
-                            "type": key[0],
-                            "state_key": key[1],
-                            "event_id": state_id,
-                        }
-                        for key, state_id in iteritems(delta_ids)
-                    ],
-                )
-            else:
-                self.db.simple_insert_many_txn(
-                    txn,
-                    table="state_groups_state",
-                    values=[
-                        {
-                            "state_group": state_group,
-                            "room_id": room_id,
-                            "type": key[0],
-                            "state_key": key[1],
-                            "event_id": state_id,
-                        }
-                        for key, state_id in iteritems(current_state_ids)
-                    ],
-                )
-
-            # Prefill the state group caches with this group.
-            # It's fine to use the sequence like this as the state group map
-            # is immutable. (If the map wasn't immutable then this prefill could
-            # race with another update)
-
-            current_member_state_ids = {
-                s: ev
-                for (s, ev) in iteritems(current_state_ids)
-                if s[0] == EventTypes.Member
-            }
-            txn.call_after(
-                self._state_group_members_cache.update,
-                self._state_group_members_cache.sequence,
-                key=state_group,
-                value=dict(current_member_state_ids),
-            )
-
-            current_non_member_state_ids = {
-                s: ev
-                for (s, ev) in iteritems(current_state_ids)
-                if s[0] != EventTypes.Member
-            }
-            txn.call_after(
-                self._state_group_cache.update,
-                self._state_group_cache.sequence,
-                key=state_group,
-                value=dict(current_non_member_state_ids),
-            )
-
-            return state_group
-
-        return self.db.runInteraction("store_state_group", _store_state_group_txn)
-
     @defer.inlineCallbacks
     def get_referenced_state_groups(self, state_groups):
         """Check if the state groups are referenced by events.
@@ -1031,22 +289,14 @@ class StateGroupWorkerStore(
         return set(row["state_group"] for row in rows)
 
 
-class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
+class MainStateBackgroundUpdateStore(SQLBaseStore):
 
-    STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
-    STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
     CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
     EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
 
     def __init__(self, database: Database, db_conn, hs):
-        super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
-        self.db.updates.register_background_update_handler(
-            self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
-            self._background_deduplicate_state,
-        )
-        self.db.updates.register_background_update_handler(
-            self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
-        )
+        super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
+
         self.db.updates.register_background_index_update(
             self.CURRENT_STATE_INDEX_UPDATE_NAME,
             index_name="current_state_events_member_index",
@@ -1061,181 +311,8 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
             columns=["state_group"],
         )
 
-    @defer.inlineCallbacks
-    def _background_deduplicate_state(self, progress, batch_size):
-        """This background update will slowly deduplicate state by reencoding
-        them as deltas.
-        """
-        last_state_group = progress.get("last_state_group", 0)
-        rows_inserted = progress.get("rows_inserted", 0)
-        max_group = progress.get("max_group", None)
-
-        BATCH_SIZE_SCALE_FACTOR = 100
 
-        batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
-
-        if max_group is None:
-            rows = yield self.db.execute(
-                "_background_deduplicate_state",
-                None,
-                "SELECT coalesce(max(id), 0) FROM state_groups",
-            )
-            max_group = rows[0][0]
-
-        def reindex_txn(txn):
-            new_last_state_group = last_state_group
-            for count in range(batch_size):
-                txn.execute(
-                    "SELECT id, room_id FROM state_groups"
-                    " WHERE ? < id AND id <= ?"
-                    " ORDER BY id ASC"
-                    " LIMIT 1",
-                    (new_last_state_group, max_group),
-                )
-                row = txn.fetchone()
-                if row:
-                    state_group, room_id = row
-
-                if not row or not state_group:
-                    return True, count
-
-                txn.execute(
-                    "SELECT state_group FROM state_group_edges"
-                    " WHERE state_group = ?",
-                    (state_group,),
-                )
-
-                # If we reach a point where we've already started inserting
-                # edges we should stop.
-                if txn.fetchall():
-                    return True, count
-
-                txn.execute(
-                    "SELECT coalesce(max(id), 0) FROM state_groups"
-                    " WHERE id < ? AND room_id = ?",
-                    (state_group, room_id),
-                )
-                (prev_group,) = txn.fetchone()
-                new_last_state_group = state_group
-
-                if prev_group:
-                    potential_hops = self._count_state_group_hops_txn(txn, prev_group)
-                    if potential_hops >= MAX_STATE_DELTA_HOPS:
-                        # We want to ensure chains are at most this long,#
-                        # otherwise read performance degrades.
-                        continue
-
-                    prev_state = self._get_state_groups_from_groups_txn(
-                        txn, [prev_group]
-                    )
-                    prev_state = prev_state[prev_group]
-
-                    curr_state = self._get_state_groups_from_groups_txn(
-                        txn, [state_group]
-                    )
-                    curr_state = curr_state[state_group]
-
-                    if not set(prev_state.keys()) - set(curr_state.keys()):
-                        # We can only do a delta if the current has a strict super set
-                        # of keys
-
-                        delta_state = {
-                            key: value
-                            for key, value in iteritems(curr_state)
-                            if prev_state.get(key, None) != value
-                        }
-
-                        self.db.simple_delete_txn(
-                            txn,
-                            table="state_group_edges",
-                            keyvalues={"state_group": state_group},
-                        )
-
-                        self.db.simple_insert_txn(
-                            txn,
-                            table="state_group_edges",
-                            values={
-                                "state_group": state_group,
-                                "prev_state_group": prev_group,
-                            },
-                        )
-
-                        self.db.simple_delete_txn(
-                            txn,
-                            table="state_groups_state",
-                            keyvalues={"state_group": state_group},
-                        )
-
-                        self.db.simple_insert_many_txn(
-                            txn,
-                            table="state_groups_state",
-                            values=[
-                                {
-                                    "state_group": state_group,
-                                    "room_id": room_id,
-                                    "type": key[0],
-                                    "state_key": key[1],
-                                    "event_id": state_id,
-                                }
-                                for key, state_id in iteritems(delta_state)
-                            ],
-                        )
-
-            progress = {
-                "last_state_group": state_group,
-                "rows_inserted": rows_inserted + batch_size,
-                "max_group": max_group,
-            }
-
-            self.db.updates._background_update_progress_txn(
-                txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
-            )
-
-            return False, batch_size
-
-        finished, result = yield self.db.runInteraction(
-            self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
-        )
-
-        if finished:
-            yield self.db.updates._end_background_update(
-                self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
-            )
-
-        return result * BATCH_SIZE_SCALE_FACTOR
-
-    @defer.inlineCallbacks
-    def _background_index_state(self, progress, batch_size):
-        def reindex_txn(conn):
-            conn.rollback()
-            if isinstance(self.database_engine, PostgresEngine):
-                # postgres insists on autocommit for the index
-                conn.set_session(autocommit=True)
-                try:
-                    txn = conn.cursor()
-                    txn.execute(
-                        "CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
-                        " ON state_groups_state(state_group, type, state_key)"
-                    )
-                    txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
-                finally:
-                    conn.set_session(autocommit=False)
-            else:
-                txn = conn.cursor()
-                txn.execute(
-                    "CREATE INDEX state_groups_state_type_idx"
-                    " ON state_groups_state(state_group, type, state_key)"
-                )
-                txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
-
-        yield self.db.runWithConnection(reindex_txn)
-
-        yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
-
-        return 1
-
-
-class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
+class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
     """ Keeps track of the state at a given event.
 
     This is done by the concept of `state groups`. Every event is a assigned
diff --git a/synapse/storage/data_stores/state/__init__.py b/synapse/storage/data_stores/state/__init__.py
new file mode 100644
index 0000000000..86e09f6229
--- /dev/null
+++ b/synapse/storage/data_stores/state/__init__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.data_stores.state.store import StateGroupDataStore  # noqa: F401
diff --git a/synapse/storage/data_stores/state/bg_updates.py b/synapse/storage/data_stores/state/bg_updates.py
new file mode 100644
index 0000000000..e8edaf9f7b
--- /dev/null
+++ b/synapse/storage/data_stores/state/bg_updates.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from six import iteritems
+
+from twisted.internet import defer
+
+from synapse.storage._base import SQLBaseStore
+from synapse.storage.database import Database
+from synapse.storage.engines import PostgresEngine
+from synapse.storage.state import StateFilter
+
+logger = logging.getLogger(__name__)
+
+
+MAX_STATE_DELTA_HOPS = 100
+
+
+class StateGroupBackgroundUpdateStore(SQLBaseStore):
+    """Defines functions related to state groups needed to run the state backgroud
+    updates.
+    """
+
+    def _count_state_group_hops_txn(self, txn, state_group):
+        """Given a state group, count how many hops there are in the tree.
+
+        This is used to ensure the delta chains don't get too long.
+        """
+        if isinstance(self.database_engine, PostgresEngine):
+            sql = """
+                WITH RECURSIVE state(state_group) AS (
+                    VALUES(?::bigint)
+                    UNION ALL
+                    SELECT prev_state_group FROM state_group_edges e, state s
+                    WHERE s.state_group = e.state_group
+                )
+                SELECT count(*) FROM state;
+            """
+
+            txn.execute(sql, (state_group,))
+            row = txn.fetchone()
+            if row and row[0]:
+                return row[0]
+            else:
+                return 0
+        else:
+            # We don't use WITH RECURSIVE on sqlite3 as there are distributions
+            # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
+            next_group = state_group
+            count = 0
+
+            while next_group:
+                next_group = self.db.simple_select_one_onecol_txn(
+                    txn,
+                    table="state_group_edges",
+                    keyvalues={"state_group": next_group},
+                    retcol="prev_state_group",
+                    allow_none=True,
+                )
+                if next_group:
+                    count += 1
+
+            return count
+
+    def _get_state_groups_from_groups_txn(
+        self, txn, groups, state_filter=StateFilter.all()
+    ):
+        results = {group: {} for group in groups}
+
+        where_clause, where_args = state_filter.make_sql_filter_clause()
+
+        # Unless the filter clause is empty, we're going to append it after an
+        # existing where clause
+        if where_clause:
+            where_clause = " AND (%s)" % (where_clause,)
+
+        if isinstance(self.database_engine, PostgresEngine):
+            # Temporarily disable sequential scans in this transaction. This is
+            # a temporary hack until we can add the right indices in
+            txn.execute("SET LOCAL enable_seqscan=off")
+
+            # The below query walks the state_group tree so that the "state"
+            # table includes all state_groups in the tree. It then joins
+            # against `state_groups_state` to fetch the latest state.
+            # It assumes that previous state groups are always numerically
+            # lesser.
+            # The PARTITION is used to get the event_id in the greatest state
+            # group for the given type, state_key.
+            # This may return multiple rows per (type, state_key), but last_value
+            # should be the same.
+            sql = """
+                WITH RECURSIVE state(state_group) AS (
+                    VALUES(?::bigint)
+                    UNION ALL
+                    SELECT prev_state_group FROM state_group_edges e, state s
+                    WHERE s.state_group = e.state_group
+                )
+                SELECT DISTINCT type, state_key, last_value(event_id) OVER (
+                    PARTITION BY type, state_key ORDER BY state_group ASC
+                    ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+                ) AS event_id FROM state_groups_state
+                WHERE state_group IN (
+                    SELECT state_group FROM state
+                )
+            """
+
+            for group in groups:
+                args = [group]
+                args.extend(where_args)
+
+                txn.execute(sql + where_clause, args)
+                for row in txn:
+                    typ, state_key, event_id = row
+                    key = (typ, state_key)
+                    results[group][key] = event_id
+        else:
+            max_entries_returned = state_filter.max_entries_returned()
+
+            # We don't use WITH RECURSIVE on sqlite3 as there are distributions
+            # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
+            for group in groups:
+                next_group = group
+
+                while next_group:
+                    # We did this before by getting the list of group ids, and
+                    # then passing that list to sqlite to get latest event for
+                    # each (type, state_key). However, that was terribly slow
+                    # without the right indices (which we can't add until
+                    # after we finish deduping state, which requires this func)
+                    args = [next_group]
+                    args.extend(where_args)
+
+                    txn.execute(
+                        "SELECT type, state_key, event_id FROM state_groups_state"
+                        " WHERE state_group = ? " + where_clause,
+                        args,
+                    )
+                    results[group].update(
+                        ((typ, state_key), event_id)
+                        for typ, state_key, event_id in txn
+                        if (typ, state_key) not in results[group]
+                    )
+
+                    # If the number of entries in the (type,state_key)->event_id dict
+                    # matches the number of (type,state_keys) types we were searching
+                    # for, then we must have found them all, so no need to go walk
+                    # further down the tree... UNLESS our types filter contained
+                    # wildcards (i.e. Nones) in which case we have to do an exhaustive
+                    # search
+                    if (
+                        max_entries_returned is not None
+                        and len(results[group]) == max_entries_returned
+                    ):
+                        break
+
+                    next_group = self.db.simple_select_one_onecol_txn(
+                        txn,
+                        table="state_group_edges",
+                        keyvalues={"state_group": next_group},
+                        retcol="prev_state_group",
+                        allow_none=True,
+                    )
+
+        return results
+
+
+class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
+
+    STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
+    STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
+    STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
+
+    def __init__(self, database: Database, db_conn, hs):
+        super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
+        self.db.updates.register_background_update_handler(
+            self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
+            self._background_deduplicate_state,
+        )
+        self.db.updates.register_background_update_handler(
+            self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
+        )
+        self.db.updates.register_background_index_update(
+            self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME,
+            index_name="state_groups_room_id_idx",
+            table="state_groups",
+            columns=["room_id"],
+        )
+
+    @defer.inlineCallbacks
+    def _background_deduplicate_state(self, progress, batch_size):
+        """This background update will slowly deduplicate state by reencoding
+        them as deltas.
+        """
+        last_state_group = progress.get("last_state_group", 0)
+        rows_inserted = progress.get("rows_inserted", 0)
+        max_group = progress.get("max_group", None)
+
+        BATCH_SIZE_SCALE_FACTOR = 100
+
+        batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
+
+        if max_group is None:
+            rows = yield self.db.execute(
+                "_background_deduplicate_state",
+                None,
+                "SELECT coalesce(max(id), 0) FROM state_groups",
+            )
+            max_group = rows[0][0]
+
+        def reindex_txn(txn):
+            new_last_state_group = last_state_group
+            for count in range(batch_size):
+                txn.execute(
+                    "SELECT id, room_id FROM state_groups"
+                    " WHERE ? < id AND id <= ?"
+                    " ORDER BY id ASC"
+                    " LIMIT 1",
+                    (new_last_state_group, max_group),
+                )
+                row = txn.fetchone()
+                if row:
+                    state_group, room_id = row
+
+                if not row or not state_group:
+                    return True, count
+
+                txn.execute(
+                    "SELECT state_group FROM state_group_edges"
+                    " WHERE state_group = ?",
+                    (state_group,),
+                )
+
+                # If we reach a point where we've already started inserting
+                # edges we should stop.
+                if txn.fetchall():
+                    return True, count
+
+                txn.execute(
+                    "SELECT coalesce(max(id), 0) FROM state_groups"
+                    " WHERE id < ? AND room_id = ?",
+                    (state_group, room_id),
+                )
+                (prev_group,) = txn.fetchone()
+                new_last_state_group = state_group
+
+                if prev_group:
+                    potential_hops = self._count_state_group_hops_txn(txn, prev_group)
+                    if potential_hops >= MAX_STATE_DELTA_HOPS:
+                        # We want to ensure chains are at most this long,#
+                        # otherwise read performance degrades.
+                        continue
+
+                    prev_state = self._get_state_groups_from_groups_txn(
+                        txn, [prev_group]
+                    )
+                    prev_state = prev_state[prev_group]
+
+                    curr_state = self._get_state_groups_from_groups_txn(
+                        txn, [state_group]
+                    )
+                    curr_state = curr_state[state_group]
+
+                    if not set(prev_state.keys()) - set(curr_state.keys()):
+                        # We can only do a delta if the current has a strict super set
+                        # of keys
+
+                        delta_state = {
+                            key: value
+                            for key, value in iteritems(curr_state)
+                            if prev_state.get(key, None) != value
+                        }
+
+                        self.db.simple_delete_txn(
+                            txn,
+                            table="state_group_edges",
+                            keyvalues={"state_group": state_group},
+                        )
+
+                        self.db.simple_insert_txn(
+                            txn,
+                            table="state_group_edges",
+                            values={
+                                "state_group": state_group,
+                                "prev_state_group": prev_group,
+                            },
+                        )
+
+                        self.db.simple_delete_txn(
+                            txn,
+                            table="state_groups_state",
+                            keyvalues={"state_group": state_group},
+                        )
+
+                        self.db.simple_insert_many_txn(
+                            txn,
+                            table="state_groups_state",
+                            values=[
+                                {
+                                    "state_group": state_group,
+                                    "room_id": room_id,
+                                    "type": key[0],
+                                    "state_key": key[1],
+                                    "event_id": state_id,
+                                }
+                                for key, state_id in iteritems(delta_state)
+                            ],
+                        )
+
+            progress = {
+                "last_state_group": state_group,
+                "rows_inserted": rows_inserted + batch_size,
+                "max_group": max_group,
+            }
+
+            self.db.updates._background_update_progress_txn(
+                txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
+            )
+
+            return False, batch_size
+
+        finished, result = yield self.db.runInteraction(
+            self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
+        )
+
+        if finished:
+            yield self.db.updates._end_background_update(
+                self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
+            )
+
+        return result * BATCH_SIZE_SCALE_FACTOR
+
+    @defer.inlineCallbacks
+    def _background_index_state(self, progress, batch_size):
+        def reindex_txn(conn):
+            conn.rollback()
+            if isinstance(self.database_engine, PostgresEngine):
+                # postgres insists on autocommit for the index
+                conn.set_session(autocommit=True)
+                try:
+                    txn = conn.cursor()
+                    txn.execute(
+                        "CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
+                        " ON state_groups_state(state_group, type, state_key)"
+                    )
+                    txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
+                finally:
+                    conn.set_session(autocommit=False)
+            else:
+                txn = conn.cursor()
+                txn.execute(
+                    "CREATE INDEX state_groups_state_type_idx"
+                    " ON state_groups_state(state_group, type, state_key)"
+                )
+                txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
+
+        yield self.db.runWithConnection(reindex_txn)
+
+        yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
+
+        return 1
diff --git a/synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql b/synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql
new file mode 100644
index 0000000000..ae09fa0065
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql
@@ -0,0 +1,16 @@
+/* Copyright 2015, 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP INDEX IF EXISTS state_groups_state_tuple;
diff --git a/synapse/storage/data_stores/state/schema/delta/30/state_stream.sql b/synapse/storage/data_stores/state/schema/delta/30/state_stream.sql
new file mode 100644
index 0000000000..e85699e82e
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/30/state_stream.sql
@@ -0,0 +1,33 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/* We used to create a table called current_state_resets, but this is no
+ * longer used and is removed in delta 54.
+ */
+
+/* The outlier events that have aquired a state group typically through
+ * backfill. This is tracked separately to the events table, as assigning a
+ * state group change the position of the existing event in the stream
+ * ordering.
+ * However since a stream_ordering is assigned in persist_event for the
+ * (event, state) pair, we can use that stream_ordering to identify when
+ * the new state was assigned for the event.
+ */
+CREATE TABLE IF NOT EXISTS ex_outlier_stream(
+    event_stream_ordering BIGINT PRIMARY KEY NOT NULL,
+    event_id TEXT NOT NULL,
+    state_group BIGINT NOT NULL
+);
diff --git a/synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql b/synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql
new file mode 100644
index 0000000000..1450313bfa
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql
@@ -0,0 +1,19 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- The following indices are redundant, other indices are equivalent or
+-- supersets
+DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY
diff --git a/synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql b/synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql
new file mode 100644
index 0000000000..33980d02f0
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT into background_updates (update_name, progress_json, depends_on)
+    VALUES ('state_group_state_type_index', '{}', 'state_group_state_deduplication');
diff --git a/synapse/storage/data_stores/state/schema/delta/35/state.sql b/synapse/storage/data_stores/state/schema/delta/35/state.sql
new file mode 100644
index 0000000000..0f1fa68a89
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/35/state.sql
@@ -0,0 +1,22 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE state_group_edges(
+    state_group BIGINT NOT NULL,
+    prev_state_group BIGINT NOT NULL
+);
+
+CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
diff --git a/synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql b/synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql
new file mode 100644
index 0000000000..97e5067ef4
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql
@@ -0,0 +1,17 @@
+/* Copyright 2016 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('state_group_state_deduplication', '{}');
diff --git a/synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py b/synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py
new file mode 100644
index 0000000000..9fd1ccf6f7
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py
@@ -0,0 +1,34 @@
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.engines import PostgresEngine
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    if isinstance(database_engine, PostgresEngine):
+        # if we already have some state groups, we want to start making new
+        # ones with a higher id.
+        cur.execute("SELECT max(id) FROM state_groups")
+        row = cur.fetchone()
+
+        if row[0] is None:
+            start_val = 1
+        else:
+            start_val = row[0] + 1
+
+        cur.execute("CREATE SEQUENCE state_group_id_seq START WITH %s", (start_val,))
+
+
+def run_upgrade(*args, **kwargs):
+    pass
diff --git a/synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql b/synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql
new file mode 100644
index 0000000000..7916ef18b2
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+    ('state_groups_room_id_idx', '{}');
diff --git a/synapse/storage/data_stores/state/schema/full_schemas/54/full.sql b/synapse/storage/data_stores/state/schema/full_schemas/54/full.sql
new file mode 100644
index 0000000000..35f97d6b3d
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/full_schemas/54/full.sql
@@ -0,0 +1,37 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE state_groups (
+    id BIGINT PRIMARY KEY,
+    room_id TEXT NOT NULL,
+    event_id TEXT NOT NULL
+);
+
+CREATE TABLE state_groups_state (
+    state_group BIGINT NOT NULL,
+    room_id TEXT NOT NULL,
+    type TEXT NOT NULL,
+    state_key TEXT NOT NULL,
+    event_id TEXT NOT NULL
+);
+
+CREATE TABLE state_group_edges (
+    state_group BIGINT NOT NULL,
+    prev_state_group BIGINT NOT NULL
+);
+
+CREATE INDEX state_group_edges_idx ON state_group_edges (state_group);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges (prev_state_group);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state (state_group, type, state_key);
diff --git a/synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres b/synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres
new file mode 100644
index 0000000000..fcd926c9fb
--- /dev/null
+++ b/synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres
@@ -0,0 +1,21 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE SEQUENCE state_group_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
new file mode 100644
index 0000000000..d53695f238
--- /dev/null
+++ b/synapse/storage/data_stores/state/store.py
@@ -0,0 +1,640 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from collections import namedtuple
+
+from six import iteritems
+from six.moves import range
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.storage._base import SQLBaseStore
+from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
+from synapse.storage.database import Database
+from synapse.storage.state import StateFilter
+from synapse.util.caches import get_cache_factor_for
+from synapse.util.caches.descriptors import cached
+from synapse.util.caches.dictionary_cache import DictionaryCache
+
+logger = logging.getLogger(__name__)
+
+
+MAX_STATE_DELTA_HOPS = 100
+
+
+class _GetStateGroupDelta(
+    namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
+):
+    """Return type of get_state_group_delta that implements __len__, which lets
+    us use the itrable flag when caching
+    """
+
+    __slots__ = []
+
+    def __len__(self):
+        return len(self.delta_ids) if self.delta_ids else 0
+
+
+class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
+    """A data store for fetching/storing state groups.
+    """
+
+    def __init__(self, database: Database, db_conn, hs):
+        super(StateGroupDataStore, self).__init__(database, db_conn, hs)
+
+        # Originally the state store used a single DictionaryCache to cache the
+        # event IDs for the state types in a given state group to avoid hammering
+        # on the state_group* tables.
+        #
+        # The point of using a DictionaryCache is that it can cache a subset
+        # of the state events for a given state group (i.e. a subset of the keys for a
+        # given dict which is an entry in the cache for a given state group ID).
+        #
+        # However, this poses problems when performing complicated queries
+        # on the store - for instance: "give me all the state for this group, but
+        # limit members to this subset of users", as DictionaryCache's API isn't
+        # rich enough to say "please cache any of these fields, apart from this subset".
+        # This is problematic when lazy loading members, which requires this behaviour,
+        # as without it the cache has no choice but to speculatively load all
+        # state events for the group, which negates the efficiency being sought.
+        #
+        # Rather than overcomplicating DictionaryCache's API, we instead split the
+        # state_group_cache into two halves - one for tracking non-member events,
+        # and the other for tracking member_events.  This means that lazy loading
+        # queries can be made in a cache-friendly manner by querying both caches
+        # separately and then merging the result.  So for the example above, you
+        # would query the members cache for a specific subset of state keys
+        # (which DictionaryCache will handle efficiently and fine) and the non-members
+        # cache for all state (which DictionaryCache will similarly handle fine)
+        # and then just merge the results together.
+        #
+        # We size the non-members cache to be smaller than the members cache as the
+        # vast majority of state in Matrix (today) is member events.
+
+        self._state_group_cache = DictionaryCache(
+            "*stateGroupCache*",
+            # TODO: this hasn't been tuned yet
+            50000 * get_cache_factor_for("stateGroupCache"),
+        )
+        self._state_group_members_cache = DictionaryCache(
+            "*stateGroupMembersCache*",
+            500000 * get_cache_factor_for("stateGroupMembersCache"),
+        )
+
+    @cached(max_entries=10000, iterable=True)
+    def get_state_group_delta(self, state_group):
+        """Given a state group try to return a previous group and a delta between
+        the old and the new.
+
+        Returns:
+            (prev_group, delta_ids), where both may be None.
+        """
+
+        def _get_state_group_delta_txn(txn):
+            prev_group = self.db.simple_select_one_onecol_txn(
+                txn,
+                table="state_group_edges",
+                keyvalues={"state_group": state_group},
+                retcol="prev_state_group",
+                allow_none=True,
+            )
+
+            if not prev_group:
+                return _GetStateGroupDelta(None, None)
+
+            delta_ids = self.db.simple_select_list_txn(
+                txn,
+                table="state_groups_state",
+                keyvalues={"state_group": state_group},
+                retcols=("type", "state_key", "event_id"),
+            )
+
+            return _GetStateGroupDelta(
+                prev_group,
+                {(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
+            )
+
+        return self.db.runInteraction(
+            "get_state_group_delta", _get_state_group_delta_txn
+        )
+
+    @defer.inlineCallbacks
+    def _get_state_groups_from_groups(self, groups, state_filter):
+        """Returns the state groups for a given set of groups, filtering on
+        types of state events.
+
+        Args:
+            groups(list[int]): list of state group IDs to query
+            state_filter (StateFilter): The state filter used to fetch state
+                from the database.
+        Returns:
+            Deferred[dict[int, dict[tuple[str, str], str]]]:
+                dict of state_group_id -> (dict of (type, state_key) -> event id)
+        """
+        results = {}
+
+        chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
+        for chunk in chunks:
+            res = yield self.db.runInteraction(
+                "_get_state_groups_from_groups",
+                self._get_state_groups_from_groups_txn,
+                chunk,
+                state_filter,
+            )
+            results.update(res)
+
+        return results
+
+    def _get_state_for_group_using_cache(self, cache, group, state_filter):
+        """Checks if group is in cache. See `_get_state_for_groups`
+
+        Args:
+            cache(DictionaryCache): the state group cache to use
+            group(int): The state group to lookup
+            state_filter (StateFilter): The state filter used to fetch state
+                from the database.
+
+        Returns 2-tuple (`state_dict`, `got_all`).
+        `got_all` is a bool indicating if we successfully retrieved all
+        requests state from the cache, if False we need to query the DB for the
+        missing state.
+        """
+        is_all, known_absent, state_dict_ids = cache.get(group)
+
+        if is_all or state_filter.is_full():
+            # Either we have everything or want everything, either way
+            # `is_all` tells us whether we've gotten everything.
+            return state_filter.filter_state(state_dict_ids), is_all
+
+        # tracks whether any of our requested types are missing from the cache
+        missing_types = False
+
+        if state_filter.has_wildcards():
+            # We don't know if we fetched all the state keys for the types in
+            # the filter that are wildcards, so we have to assume that we may
+            # have missed some.
+            missing_types = True
+        else:
+            # There aren't any wild cards, so `concrete_types()` returns the
+            # complete list of event types we're wanting.
+            for key in state_filter.concrete_types():
+                if key not in state_dict_ids and key not in known_absent:
+                    missing_types = True
+                    break
+
+        return state_filter.filter_state(state_dict_ids), not missing_types
+
+    @defer.inlineCallbacks
+    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
+        """Gets the state at each of a list of state groups, optionally
+        filtering by type/state_key
+
+        Args:
+            groups (iterable[int]): list of state groups for which we want
+                to get the state.
+            state_filter (StateFilter): The state filter used to fetch state
+                from the database.
+        Returns:
+            Deferred[dict[int, dict[tuple[str, str], str]]]:
+                dict of state_group_id -> (dict of (type, state_key) -> event id)
+        """
+
+        member_filter, non_member_filter = state_filter.get_member_split()
+
+        # Now we look them up in the member and non-member caches
+        (
+            non_member_state,
+            incomplete_groups_nm,
+        ) = yield self._get_state_for_groups_using_cache(
+            groups, self._state_group_cache, state_filter=non_member_filter
+        )
+
+        (
+            member_state,
+            incomplete_groups_m,
+        ) = yield self._get_state_for_groups_using_cache(
+            groups, self._state_group_members_cache, state_filter=member_filter
+        )
+
+        state = dict(non_member_state)
+        for group in groups:
+            state[group].update(member_state[group])
+
+        # Now fetch any missing groups from the database
+
+        incomplete_groups = incomplete_groups_m | incomplete_groups_nm
+
+        if not incomplete_groups:
+            return state
+
+        cache_sequence_nm = self._state_group_cache.sequence
+        cache_sequence_m = self._state_group_members_cache.sequence
+
+        # Help the cache hit ratio by expanding the filter a bit
+        db_state_filter = state_filter.return_expanded()
+
+        group_to_state_dict = yield self._get_state_groups_from_groups(
+            list(incomplete_groups), state_filter=db_state_filter
+        )
+
+        # Now lets update the caches
+        self._insert_into_cache(
+            group_to_state_dict,
+            db_state_filter,
+            cache_seq_num_members=cache_sequence_m,
+            cache_seq_num_non_members=cache_sequence_nm,
+        )
+
+        # And finally update the result dict, by filtering out any extra
+        # stuff we pulled out of the database.
+        for group, group_state_dict in iteritems(group_to_state_dict):
+            # We just replace any existing entries, as we will have loaded
+            # everything we need from the database anyway.
+            state[group] = state_filter.filter_state(group_state_dict)
+
+        return state
+
+    def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
+        """Gets the state at each of a list of state groups, optionally
+        filtering by type/state_key, querying from a specific cache.
+
+        Args:
+            groups (iterable[int]): list of state groups for which we want
+                to get the state.
+            cache (DictionaryCache): the cache of group ids to state dicts which
+                we will pass through - either the normal state cache or the specific
+                members state cache.
+            state_filter (StateFilter): The state filter used to fetch state
+                from the database.
+
+        Returns:
+            tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
+            dict of state_group_id -> (dict of (type, state_key) -> event id)
+            of entries in the cache, and the state group ids either missing
+            from the cache or incomplete.
+        """
+        results = {}
+        incomplete_groups = set()
+        for group in set(groups):
+            state_dict_ids, got_all = self._get_state_for_group_using_cache(
+                cache, group, state_filter
+            )
+            results[group] = state_dict_ids
+
+            if not got_all:
+                incomplete_groups.add(group)
+
+        return results, incomplete_groups
+
+    def _insert_into_cache(
+        self,
+        group_to_state_dict,
+        state_filter,
+        cache_seq_num_members,
+        cache_seq_num_non_members,
+    ):
+        """Inserts results from querying the database into the relevant cache.
+
+        Args:
+            group_to_state_dict (dict): The new entries pulled from database.
+                Map from state group to state dict
+            state_filter (StateFilter): The state filter used to fetch state
+                from the database.
+            cache_seq_num_members (int): Sequence number of member cache since
+                last lookup in cache
+            cache_seq_num_non_members (int): Sequence number of member cache since
+                last lookup in cache
+        """
+
+        # We need to work out which types we've fetched from the DB for the
+        # member vs non-member caches. This should be as accurate as possible,
+        # but can be an underestimate (e.g. when we have wild cards)
+
+        member_filter, non_member_filter = state_filter.get_member_split()
+        if member_filter.is_full():
+            # We fetched all member events
+            member_types = None
+        else:
+            # `concrete_types()` will only return a subset when there are wild
+            # cards in the filter, but that's fine.
+            member_types = member_filter.concrete_types()
+
+        if non_member_filter.is_full():
+            # We fetched all non member events
+            non_member_types = None
+        else:
+            non_member_types = non_member_filter.concrete_types()
+
+        for group, group_state_dict in iteritems(group_to_state_dict):
+            state_dict_members = {}
+            state_dict_non_members = {}
+
+            for k, v in iteritems(group_state_dict):
+                if k[0] == EventTypes.Member:
+                    state_dict_members[k] = v
+                else:
+                    state_dict_non_members[k] = v
+
+            self._state_group_members_cache.update(
+                cache_seq_num_members,
+                key=group,
+                value=state_dict_members,
+                fetched_keys=member_types,
+            )
+
+            self._state_group_cache.update(
+                cache_seq_num_non_members,
+                key=group,
+                value=state_dict_non_members,
+                fetched_keys=non_member_types,
+            )
+
+    def store_state_group(
+        self, event_id, room_id, prev_group, delta_ids, current_state_ids
+    ):
+        """Store a new set of state, returning a newly assigned state group.
+
+        Args:
+            event_id (str): The event ID for which the state was calculated
+            room_id (str)
+            prev_group (int|None): A previous state group for the room, optional.
+            delta_ids (dict|None): The delta between state at `prev_group` and
+                `current_state_ids`, if `prev_group` was given. Same format as
+                `current_state_ids`.
+            current_state_ids (dict): The state to store. Map of (type, state_key)
+                to event_id.
+
+        Returns:
+            Deferred[int]: The state group ID
+        """
+
+        def _store_state_group_txn(txn):
+            if current_state_ids is None:
+                # AFAIK, this can never happen
+                raise Exception("current_state_ids cannot be None")
+
+            state_group = self.database_engine.get_next_state_group_id(txn)
+
+            self.db.simple_insert_txn(
+                txn,
+                table="state_groups",
+                values={"id": state_group, "room_id": room_id, "event_id": event_id},
+            )
+
+            # We persist as a delta if we can, while also ensuring the chain
+            # of deltas isn't tooo long, as otherwise read performance degrades.
+            if prev_group:
+                is_in_db = self.db.simple_select_one_onecol_txn(
+                    txn,
+                    table="state_groups",
+                    keyvalues={"id": prev_group},
+                    retcol="id",
+                    allow_none=True,
+                )
+                if not is_in_db:
+                    raise Exception(
+                        "Trying to persist state with unpersisted prev_group: %r"
+                        % (prev_group,)
+                    )
+
+                potential_hops = self._count_state_group_hops_txn(txn, prev_group)
+            if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
+                self.db.simple_insert_txn(
+                    txn,
+                    table="state_group_edges",
+                    values={"state_group": state_group, "prev_state_group": prev_group},
+                )
+
+                self.db.simple_insert_many_txn(
+                    txn,
+                    table="state_groups_state",
+                    values=[
+                        {
+                            "state_group": state_group,
+                            "room_id": room_id,
+                            "type": key[0],
+                            "state_key": key[1],
+                            "event_id": state_id,
+                        }
+                        for key, state_id in iteritems(delta_ids)
+                    ],
+                )
+            else:
+                self.db.simple_insert_many_txn(
+                    txn,
+                    table="state_groups_state",
+                    values=[
+                        {
+                            "state_group": state_group,
+                            "room_id": room_id,
+                            "type": key[0],
+                            "state_key": key[1],
+                            "event_id": state_id,
+                        }
+                        for key, state_id in iteritems(current_state_ids)
+                    ],
+                )
+
+            # Prefill the state group caches with this group.
+            # It's fine to use the sequence like this as the state group map
+            # is immutable. (If the map wasn't immutable then this prefill could
+            # race with another update)
+
+            current_member_state_ids = {
+                s: ev
+                for (s, ev) in iteritems(current_state_ids)
+                if s[0] == EventTypes.Member
+            }
+            txn.call_after(
+                self._state_group_members_cache.update,
+                self._state_group_members_cache.sequence,
+                key=state_group,
+                value=dict(current_member_state_ids),
+            )
+
+            current_non_member_state_ids = {
+                s: ev
+                for (s, ev) in iteritems(current_state_ids)
+                if s[0] != EventTypes.Member
+            }
+            txn.call_after(
+                self._state_group_cache.update,
+                self._state_group_cache.sequence,
+                key=state_group,
+                value=dict(current_non_member_state_ids),
+            )
+
+            return state_group
+
+        return self.db.runInteraction("store_state_group", _store_state_group_txn)
+
+    def purge_unreferenced_state_groups(
+        self, room_id: str, state_groups_to_delete
+    ) -> defer.Deferred:
+        """Deletes no longer referenced state groups and de-deltas any state
+        groups that reference them.
+
+        Args:
+            room_id: The room the state groups belong to (must all be in the
+                same room).
+            state_groups_to_delete (Collection[int]): Set of all state groups
+                to delete.
+        """
+
+        return self.db.runInteraction(
+            "purge_unreferenced_state_groups",
+            self._purge_unreferenced_state_groups,
+            room_id,
+            state_groups_to_delete,
+        )
+
+    def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
+        logger.info(
+            "[purge] found %i state groups to delete", len(state_groups_to_delete)
+        )
+
+        rows = self.db.simple_select_many_txn(
+            txn,
+            table="state_group_edges",
+            column="prev_state_group",
+            iterable=state_groups_to_delete,
+            keyvalues={},
+            retcols=("state_group",),
+        )
+
+        remaining_state_groups = set(
+            row["state_group"]
+            for row in rows
+            if row["state_group"] not in state_groups_to_delete
+        )
+
+        logger.info(
+            "[purge] de-delta-ing %i remaining state groups",
+            len(remaining_state_groups),
+        )
+
+        # Now we turn the state groups that reference to-be-deleted state
+        # groups to non delta versions.
+        for sg in remaining_state_groups:
+            logger.info("[purge] de-delta-ing remaining state group %s", sg)
+            curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
+            curr_state = curr_state[sg]
+
+            self.db.simple_delete_txn(
+                txn, table="state_groups_state", keyvalues={"state_group": sg}
+            )
+
+            self.db.simple_delete_txn(
+                txn, table="state_group_edges", keyvalues={"state_group": sg}
+            )
+
+            self.db.simple_insert_many_txn(
+                txn,
+                table="state_groups_state",
+                values=[
+                    {
+                        "state_group": sg,
+                        "room_id": room_id,
+                        "type": key[0],
+                        "state_key": key[1],
+                        "event_id": state_id,
+                    }
+                    for key, state_id in iteritems(curr_state)
+                ],
+            )
+
+        logger.info("[purge] removing redundant state groups")
+        txn.executemany(
+            "DELETE FROM state_groups_state WHERE state_group = ?",
+            ((sg,) for sg in state_groups_to_delete),
+        )
+        txn.executemany(
+            "DELETE FROM state_groups WHERE id = ?",
+            ((sg,) for sg in state_groups_to_delete),
+        )
+
+    @defer.inlineCallbacks
+    def get_previous_state_groups(self, state_groups):
+        """Fetch the previous groups of the given state groups.
+
+        Args:
+            state_groups (Iterable[int])
+
+        Returns:
+            Deferred[dict[int, int]]: mapping from state group to previous
+            state group.
+        """
+
+        rows = yield self.db.simple_select_many_batch(
+            table="state_group_edges",
+            column="prev_state_group",
+            iterable=state_groups,
+            keyvalues={},
+            retcols=("prev_state_group", "state_group"),
+            desc="get_previous_state_groups",
+        )
+
+        return {row["state_group"]: row["prev_state_group"] for row in rows}
+
+    def purge_room_state(self, room_id, state_groups_to_delete):
+        """Deletes all record of a room from state tables
+
+        Args:
+            room_id (str):
+            state_groups_to_delete (list[int]): State groups to delete
+        """
+
+        return self.db.runInteraction(
+            "purge_room_state",
+            self._purge_room_state_txn,
+            room_id,
+            state_groups_to_delete,
+        )
+
+    def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
+        # first we have to delete the state groups states
+        logger.info("[purge] removing %s from state_groups_state", room_id)
+
+        self.db.simple_delete_many_txn(
+            txn,
+            table="state_groups_state",
+            column="state_group",
+            iterable=state_groups_to_delete,
+            keyvalues={},
+        )
+
+        # ... and the state group edges
+        logger.info("[purge] removing %s from state_group_edges", room_id)
+
+        self.db.simple_delete_many_txn(
+            txn,
+            table="state_group_edges",
+            column="state_group",
+            iterable=state_groups_to_delete,
+            keyvalues={},
+        )
+
+        # ... and the state groups
+        logger.info("[purge] removing %s from state_groups", room_id)
+
+        self.db.simple_delete_many_txn(
+            txn,
+            table="state_groups",
+            column="id",
+            iterable=state_groups_to_delete,
+            keyvalues={},
+        )
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index fa03ca9ff7..1ed44925fc 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -183,7 +183,7 @@ class EventsPersistenceStorage(object):
         # so we use separate variables here even though they point to the same
         # store for now.
         self.main_store = stores.main
-        self.state_store = stores.main
+        self.state_store = stores.state
 
         self._clock = hs.get_clock()
         self.is_mine_id = hs.is_mine_id
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 403848ad03..e70026b80a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -42,7 +42,7 @@ class UpgradeDatabaseException(PrepareDatabaseException):
     pass
 
 
-def prepare_database(db_conn, database_engine, config, data_stores=["main"]):
+def prepare_database(db_conn, database_engine, config, data_stores=["main", "state"]):
     """Prepares a database for usage. Will either create all necessary tables
     or upgrade from an older schema version.
 
diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py
index a368182034..d6a7bd7834 100644
--- a/synapse/storage/purge_events.py
+++ b/synapse/storage/purge_events.py
@@ -58,7 +58,7 @@ class PurgeEventsStorage(object):
 
         sg_to_delete = yield self._find_unreferenced_groups(state_groups)
 
-        yield self.stores.main.purge_unreferenced_state_groups(room_id, sg_to_delete)
+        yield self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
 
     @defer.inlineCallbacks
     def _find_unreferenced_groups(self, state_groups):
@@ -102,7 +102,7 @@ class PurgeEventsStorage(object):
             # groups that are referenced.
             current_search -= referenced
 
-            edges = yield self.stores.main.get_previous_state_groups(current_search)
+            edges = yield self.stores.state.get_previous_state_groups(current_search)
 
             prevs = set(edges.values())
             # We don't bother re-handling groups we've already seen
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 3735846899..cbeb586014 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -342,7 +342,7 @@ class StateGroupStorage(object):
                 (prev_group, delta_ids)
         """
 
-        return self.stores.main.get_state_group_delta(state_group)
+        return self.stores.state.get_state_group_delta(state_group)
 
     @defer.inlineCallbacks
     def get_state_groups_ids(self, _room_id, event_ids):
@@ -362,7 +362,7 @@ class StateGroupStorage(object):
         event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(itervalues(event_to_groups))
-        group_to_state = yield self.stores.main._get_state_for_groups(groups)
+        group_to_state = yield self.stores.state._get_state_for_groups(groups)
 
         return group_to_state
 
@@ -423,7 +423,7 @@ class StateGroupStorage(object):
                 dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
 
-        return self.stores.main._get_state_groups_from_groups(groups, state_filter)
+        return self.stores.state._get_state_groups_from_groups(groups, state_filter)
 
     @defer.inlineCallbacks
     def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
@@ -439,7 +439,7 @@ class StateGroupStorage(object):
         event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(itervalues(event_to_groups))
-        group_to_state = yield self.stores.main._get_state_for_groups(
+        group_to_state = yield self.stores.state._get_state_for_groups(
             groups, state_filter
         )
 
@@ -476,7 +476,7 @@ class StateGroupStorage(object):
         event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(itervalues(event_to_groups))
-        group_to_state = yield self.stores.main._get_state_for_groups(
+        group_to_state = yield self.stores.state._get_state_for_groups(
             groups, state_filter
         )
 
@@ -532,7 +532,7 @@ class StateGroupStorage(object):
             Deferred[dict[int, dict[tuple[str, str], str]]]:
                 dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
-        return self.stores.main._get_state_for_groups(groups, state_filter)
+        return self.stores.state._get_state_for_groups(groups, state_filter)
 
     def store_state_group(
         self, event_id, room_id, prev_group, delta_ids, current_state_ids
@@ -552,6 +552,6 @@ class StateGroupStorage(object):
         Returns:
             Deferred[int]: The state group ID
         """
-        return self.stores.main.store_state_group(
+        return self.stores.state.store_state_group(
             event_id, room_id, prev_group, delta_ids, current_state_ids
         )
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index 43200654f1..d6ecf102f8 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -35,7 +35,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
 
         self.store = hs.get_datastore()
         self.storage = hs.get_storage()
-        self.state_datastore = self.store
+        self.state_datastore = self.storage.state.stores.state
         self.event_builder_factory = hs.get_event_builder_factory()
         self.event_creation_handler = hs.get_event_creation_handler()
 
diff --git a/tests/utils.py b/tests/utils.py
index 9f5bf40b4b..e2e9cafd79 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -231,7 +231,7 @@ def setup_test_homeserver(
             "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1},
         }
 
-    database = DatabaseConnectionConfig("master", database_config, ["main"])
+    database = DatabaseConnectionConfig("master", database_config)
     config.database.databases = [database]
 
     db_engine = create_engine(database.config)
-- 
cgit 1.4.1


From 32779b59fab0b4a64198f8fe617d7c495aeb1ede Mon Sep 17 00:00:00 2001
From: Aaron Raimist 
Date: Thu, 2 Jan 2020 04:28:20 -0600
Subject: Reword sections of federate.md that explained delegation at time of
 Synapse 1.0 transition (#6601)

* Remove sections of federate.md explaining delegation at time of Synapse 1.0 transition

Signed-off-by: Aaron Raimist 

* Add changelog

Signed-off-by: Aaron Raimist 
---
 changelog.d/6601.doc |  1 +
 docs/federate.md     | 24 +++---------------------
 2 files changed, 4 insertions(+), 21 deletions(-)
 create mode 100644 changelog.d/6601.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6601.doc b/changelog.d/6601.doc
new file mode 100644
index 0000000000..08c5b3d215
--- /dev/null
+++ b/changelog.d/6601.doc
@@ -0,0 +1 @@
+Reword sections of federate.md that explained delegation at time of Synapse 1.0 transition.
\ No newline at end of file
diff --git a/docs/federate.md b/docs/federate.md
index 193e2d2dfe..f9f17fcca5 100644
--- a/docs/federate.md
+++ b/docs/federate.md
@@ -66,10 +66,6 @@ therefore cannot gain access to the necessary certificate. With .well-known,
 federation servers will check for a valid TLS certificate for the delegated
 hostname (in our example: ``synapse.example.com``).
 
-.well-known support first appeared in Synapse v0.99.0. To federate with older
-servers you may need to additionally configure SRV delegation. Alternatively,
-encourage the server admin in question to upgrade :).
-
 ### DNS SRV delegation
 
 To use this delegation method, you need to have write access to your
@@ -111,29 +107,15 @@ giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is
 it would automatically generate a valid TLS certificate for you via Let's Encrypt
 and no SRV record or .well-known URI would be needed.
 
-This is the common case, although you can add an SRV record or
-`.well-known/matrix/server` URI for completeness if you wish.
-
 **However**, if your server does not listen on port 8448, or if your `server_name`
 does not point to the host that your homeserver runs on, you will need to let
 other servers know how to find it. The way to do this is via .well-known or an
 SRV record.
 
-#### I have created a .well-known URI. Do I still need an SRV record?
-
-As of Synapse 0.99, Synapse will first check for the existence of a .well-known
-URI and follow any delegation it suggests. It will only then check for the
-existence of an SRV record.
-
-That means that the SRV record will often be redundant. However, you should
-remember that there may still be older versions of Synapse in the federation
-which do not understand .well-known URIs, so if you removed your SRV record
-you would no longer be able to federate with them.
+#### I have created a .well-known URI. Do I also need an SRV record?
 
-It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
-earlier will follow the SRV record (and not care about the invalid
-certificate). Synapse 0.99 and later will follow the .well-known URI, with the
-correct certificate chain.
+No. You can use either `.well-known` delegation or use an SRV record for delegation. You
+do not need to use both to delegate to the same location.
 
 #### Can I manage my own certificates rather than having Synapse renew certificates itself?
 
-- 
cgit 1.4.1


From 0495097a7f6c6f5230972e0a5f32c0c9c42ef61b Mon Sep 17 00:00:00 2001
From: ewaf1 <59422220+ewaf1@users.noreply.github.com>
Date: Thu, 2 Jan 2020 11:41:30 +0100
Subject: Added the section 'Configuration' in /docs/turn-howto.md (#6614)

put the 2nd part of the "source installation"-section into a new section, because it also applies to Debian packages
---
 changelog.d/6614.doc | 1 +
 docs/turn-howto.md   | 2 ++
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6614.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6614.doc b/changelog.d/6614.doc
new file mode 100644
index 0000000000..38b962b062
--- /dev/null
+++ b/changelog.d/6614.doc
@@ -0,0 +1 @@
+Added the section 'Configuration' in /docs/turn-howto.md.
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 4a983621e5..1bd3943f54 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -39,6 +39,8 @@ The TURN daemon `coturn` is available from a variety of sources such as native p
         make
         make install
 
+### Configuration
+
 1.  Create or edit the config file in `/etc/turnserver.conf`. The relevant
     lines, with example values, are:
 
-- 
cgit 1.4.1


From 9c59bc59c8daee1c2e98008e9929ac67cf08ff0b Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 3 Jan 2020 13:00:32 +0100
Subject: Changelog

---
 changelog.d/6621.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6621.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6621.doc b/changelog.d/6621.doc
new file mode 100644
index 0000000000..6722ccfda3
--- /dev/null
+++ b/changelog.d/6621.doc
@@ -0,0 +1 @@
+Fix a typo in the configuration example for purge jobs in the sample configuration file.
-- 
cgit 1.4.1


From 9279a2c4e4c1c63e87d43fc9f6ad2c495bf47e67 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 3 Jan 2020 13:43:55 +0100
Subject: Add a complete documentation of the message retention policies
 support

---
 changelog.d/6623.doc               |   1 +
 docs/message_retention_policies.md | 191 +++++++++++++++++++++++++++++++++++++
 2 files changed, 192 insertions(+)
 create mode 100644 changelog.d/6623.doc
 create mode 100644 docs/message_retention_policies.md

(limited to 'changelog.d')

diff --git a/changelog.d/6623.doc b/changelog.d/6623.doc
new file mode 100644
index 0000000000..c8aade0974
--- /dev/null
+++ b/changelog.d/6623.doc
@@ -0,0 +1 @@
+Add a complete documentation of the message retention policies support.
diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md
new file mode 100644
index 0000000000..78055b2f64
--- /dev/null
+++ b/docs/message_retention_policies.md
@@ -0,0 +1,191 @@
+# Message retention policies
+
+Synapse admins can enable support for message retention policies on
+their homeserver. Message retention policies exist at a room level,
+follow the semantics described in
+[MSC1763](https://github.com/matrix-org/matrix-doc/blob/matthew/msc1763/proposals/1763-configurable-retention-periods.md),
+and allow server and room admins to configure how long messages should
+be kept in a homeserver's database before being purged from it.
+
+A message retention policy is mainly defined by its `max_lifetime`
+parameter, which defines how long a message can be kept around after
+it's been sent in the room. If a room doesn't have a message retention
+policy, and there's no default one for a given server, then no message
+sent in that room is ever purged on that server.
+
+MSC1763 also specifies semantics for a `min_lifetime` parameter which
+defines the amount of time after which an event _can_ get purged (after
+it's been sent to the room), but Synapse doesn't currently support it
+beyond registering it.
+
+Both `max_lifetime` and `min_lifetime` are optional parameters.
+
+Note that message retention policies don't apply to state events.
+
+Once an event reaches its expiry date (defined as the time it was sent
+plus the value for `max_lifetime` in the room), two things happen:
+
+* Synapse stops serving the event to clients via any endpoint.
+* The message gets picked up by the next purge job (see the "Purge jobs"
+  section) and is removed from Synapse's database.
+
+Since purge jobs don't run continuously, this means that an event might
+stay in a server's database for longer than the value for `max_lifetime`
+in the room would allow, though hidden from clients.
+
+Similarly, if a server (with support for message retention policies
+enabled) receives from another server an event that should have been
+purged according to its room's policy, then the receiving server will
+process and store that event until it's picked up by the next purge job,
+though it will always hide it from clients.
+
+
+## Room configuration
+
+To configure a room's message retention policy, a room's admin or
+moderator needs to send a state event in that room with the type
+`m.room.retention` and the following content:
+
+```json
+{
+    "max_lifetime": ...
+}
+```
+
+In this event's content, the `max_lifetime` parameter has the same
+meaning as previously described, and needs to be expressed in
+milliseconds. The event's content can also include a `min_lifetime`
+parameter, which has the same meaning and limited support as previously
+described.
+
+Note that over every server in the room, only the ones with support for
+message retention policies will actually remove expired events. While
+we plan to eventually enable this support by default in Synapse, this
+isn't currently the case.
+
+
+## Server configuration
+
+Support for this feature can be enabled and configured in the
+`retention` section of the Synapse configuration file (see the
+[sample file](https://github.com/matrix-org/synapse/blob/v1.7.3/docs/sample_config.yaml#L332-L393)).
+
+To enable support for message retentions policies, set the setting
+`enabled` in this section to `true`.
+
+
+### Default policy
+
+A default message retention policy is a policy defined in Synapse's
+configuration that is used by Synapse for every room that doesn't have a
+message retention policy configured in its state. This allows server
+admins to ensure that messages are never kept indefinitely in a server's
+database. 
+
+A default policy can be defined as such, in the `retention` section of
+the configuration file:
+
+```yaml
+  default_policy:
+    min_lifetime: 1d
+    max_lifetime: 1y
+```
+
+Here, `min_lifetime` and `max_lifetime` have the same meaning and level
+of support as previously described. They can be expressed either as a
+duration (using the units `s` (seconds), `m` (minutes), `h` (hours),
+`d` (days), `w` (weeks) and `y` (years)) or as a number of milliseconds.
+
+
+### Purge jobs
+
+Purge jobs are the jobs that Synapse run in the background to purge
+expired events from the database. They are only run if support for
+message retention policies is enabled in the server's configuration. If
+no configuration for purge jobs is configured by the server admin,
+Synapse will run one daily that will handle every room with a message
+retention policy (or, if the server has a default policy configured,
+every room it knows), which should be enough in most cases.
+
+Some server admins might want a finer control on when events are removed
+depending on an event's room's policy. This can be done by setting the
+`purge_jobs` sub-section in the `retention` section of the configuration
+file. An example of such configuration could be:
+
+```yaml
+  purge_jobs:
+    - longest_max_lifetime: 3d
+      interval: 12h
+    - shortest_max_lifetime: 3d
+      longest_max_lifetime: 1w
+      interval: 1d
+    - shortest_max_lifetime: 1w
+      interval: 2d
+```
+
+In this example, we define two jobs:
+
+* one that runs twice a day (every 12 hours) and purges events in rooms
+  which policy's `max_lifetime` is lower or equal to 3 days.
+* one that runs once a day and purges events in rooms which policy's
+  `max_lifetime` is between 3 days and a week.
+* one that runs once every 2 days and purges events in rooms which
+  policy's `max_lifetime` is greater than a week.
+
+Note that this example is tailored to show different configurations and
+features slightly more jobs than it's probably necessary (in practice, a
+server admin would probably consider it better to replace the two last
+jobs with one that runs once a day and handles rooms which which
+policy's `max_lifetime` is greater than 3 days).
+
+Keep in mind, when configuring these jobs, that a purge job can become
+quite heavy on the server if it targets many rooms, therefore prefer
+having jobs with a low interval that target a limited set of rooms. Also
+make sure to include a job with no minimum and one with no maximum to
+make sure your configuration handles every policy.
+
+As previously mentioned in this documentation, while a purge job that
+runs e.g. every day means that an expired event might stay in the
+database for up to a day after its expiry, Synapse hides expired events
+from clients as soon as they expire, so the event is not visible to
+local users between its expiry date and the moment it gets purged from
+the server's database.
+
+
+### Lifetime limits
+
+**Note: this feature is mainly useful within a closed federation or on
+servers that don't federate, because there currently is no way to
+enforce these limits in an open federation.**
+
+Server admins can restrict the values their local users are allowed to
+use for both `min_lifetime` and `max_lifetime`. These limits can be
+defined as such in the `retention` section of the configuration file:
+
+```yaml
+  allowed_lifetime_min: 1d
+  allowed_lifetime_max: 1y
+```
+
+Here, `allowed_lifetime_min` is the lowest value a local user can set
+for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max`
+is the highest value. Both parameters are optional (e.g. setting
+`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a
+minimum and no maximum).
+
+Like other settings in this section, these parameters can be expressed
+either as a duration or as a number of milliseconds.
+
+
+## Note on reclaiming disk space
+
+While purge jobs actually delete data from the database, the disk space
+used by the database might not decrease immediately on the database's
+host. However, even though the database engine won't free up the disk
+space, it will start writing new data into where the purged data was.
+
+If you want to reclaim the freed disk space anyway and return it to the
+operating system, the server admin needs to run `VACUUM FULL;` on the
+database (see the related
+[PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-vacuum.html)).
+
-- 
cgit 1.4.1


From 51b8a21f0c3f52c26c63c196f5ed11b8be2394af Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 3 Jan 2020 13:49:12 +0100
Subject: Rename changelog

---
 changelog.d/6623.doc | 1 -
 changelog.d/6624.doc | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
 delete mode 100644 changelog.d/6623.doc
 create mode 100644 changelog.d/6624.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6623.doc b/changelog.d/6623.doc
deleted file mode 100644
index c8aade0974..0000000000
--- a/changelog.d/6623.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add a complete documentation of the message retention policies support.
diff --git a/changelog.d/6624.doc b/changelog.d/6624.doc
new file mode 100644
index 0000000000..c8aade0974
--- /dev/null
+++ b/changelog.d/6624.doc
@@ -0,0 +1 @@
+Add a complete documentation of the message retention policies support.
-- 
cgit 1.4.1


From 6964ea095bbd474b5c2b9dfe99c817cd370987bf Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 3 Jan 2020 14:19:09 +0000
Subject: Reduce the reconnect time when replication fails. (#6617)

---
 changelog.d/6617.misc             | 1 +
 synapse/replication/tcp/client.py | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6617.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6617.misc b/changelog.d/6617.misc
new file mode 100644
index 0000000000..94aa271d38
--- /dev/null
+++ b/changelog.d/6617.misc
@@ -0,0 +1 @@
+Reduce the reconnect time when worker replication fails, to make it easier to catch up.
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index fead78388c..bbcb84646c 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -46,7 +46,8 @@ class ReplicationClientFactory(ReconnectingClientFactory):
     is required.
     """
 
-    maxDelay = 30  # Try at least once every N seconds
+    initialDelay = 0.1
+    maxDelay = 1  # Try at least once every N seconds
 
     def __init__(self, hs, client_name, handler: AbstractReplicationClientHandler):
         self.client_name = client_name
-- 
cgit 1.4.1


From b6b57ecb4e845490fc26a537ff57df8cae1587b9 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 3 Jan 2020 14:19:48 +0000
Subject: Kill off redundant SynapseRequestFactory (#6619)

We already get the Site via the Channel, so there's no need for a dedicated
RequestFactory: we can just use the right constructor.
---
 changelog.d/6619.misc |  1 +
 synapse/http/site.py  | 18 +++---------------
 tests/server.py       |  6 ++++--
 3 files changed, 8 insertions(+), 17 deletions(-)
 create mode 100644 changelog.d/6619.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6619.misc b/changelog.d/6619.misc
new file mode 100644
index 0000000000..b608133219
--- /dev/null
+++ b/changelog.d/6619.misc
@@ -0,0 +1 @@
+Simplify http handling by removing redundant SynapseRequestFactory.
diff --git a/synapse/http/site.py b/synapse/http/site.py
index ff8184a3d0..9f2d035fa0 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -47,9 +47,9 @@ class SynapseRequest(Request):
         logcontext(LoggingContext) : the log context for this request
     """
 
-    def __init__(self, site, channel, *args, **kw):
+    def __init__(self, channel, *args, **kw):
         Request.__init__(self, channel, *args, **kw)
-        self.site = site
+        self.site = channel.site
         self._channel = channel  # this is used by the tests
         self.authenticated_entity = None
         self.start_time = 0
@@ -331,18 +331,6 @@ class XForwardedForRequest(SynapseRequest):
         )
 
 
-class SynapseRequestFactory(object):
-    def __init__(self, site, x_forwarded_for):
-        self.site = site
-        self.x_forwarded_for = x_forwarded_for
-
-    def __call__(self, *args, **kwargs):
-        if self.x_forwarded_for:
-            return XForwardedForRequest(self.site, *args, **kwargs)
-        else:
-            return SynapseRequest(self.site, *args, **kwargs)
-
-
 class SynapseSite(Site):
     """
     Subclass of a twisted http Site that does access logging with python's
@@ -364,7 +352,7 @@ class SynapseSite(Site):
         self.site_tag = site_tag
 
         proxied = config.get("x_forwarded", False)
-        self.requestFactory = SynapseRequestFactory(self, proxied)
+        self.requestFactory = XForwardedForRequest if proxied else SynapseRequest
         self.access_logger = logging.getLogger(logger_name)
         self.server_version_string = server_version_string.encode("ascii")
 
diff --git a/tests/server.py b/tests/server.py
index a554dfdd57..1644710aa0 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -20,6 +20,7 @@ from twisted.python.failure import Failure
 from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
 from twisted.web.http import unquote
 from twisted.web.http_headers import Headers
+from twisted.web.server import Site
 
 from synapse.http.site import SynapseRequest
 from synapse.util import Clock
@@ -42,6 +43,7 @@ class FakeChannel(object):
     wire).
     """
 
+    site = attr.ib(type=Site)
     _reactor = attr.ib()
     result = attr.ib(default=attr.Factory(dict))
     _producer = None
@@ -176,9 +178,9 @@ def make_request(
         content = content.encode("utf8")
 
     site = FakeSite()
-    channel = FakeChannel(reactor)
+    channel = FakeChannel(site, reactor)
 
-    req = request(site, channel)
+    req = request(channel)
     req.process = lambda: b""
     req.content = BytesIO(content)
     req.postpath = list(map(unquote, path[1:].split(b"/")))
-- 
cgit 1.4.1


From 98247c4a0e169ee5f201fe5f0e404604d6628566 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 3 Jan 2020 17:10:52 +0000
Subject: Remove unused, undocumented "content repo" resource (#6628)

This looks like it got half-killed back in #888.

Fixes #6567.
---
 changelog.d/6628.removal                    |   1 +
 docs/sample_config.yaml                     |   4 --
 synapse/api/urls.py                         |   1 -
 synapse/app/homeserver.py                   |  10 +--
 synapse/app/media_repository.py             |   6 +-
 synapse/config/repository.py                |   5 --
 synapse/rest/media/v0/__init__.py           |   0
 synapse/rest/media/v0/content_repository.py | 103 ----------------------------
 tox.ini                                     |   1 -
 9 files changed, 3 insertions(+), 128 deletions(-)
 create mode 100644 changelog.d/6628.removal
 delete mode 100644 synapse/rest/media/v0/__init__.py
 delete mode 100644 synapse/rest/media/v0/content_repository.py

(limited to 'changelog.d')

diff --git a/changelog.d/6628.removal b/changelog.d/6628.removal
new file mode 100644
index 0000000000..66cd6aeca4
--- /dev/null
+++ b/changelog.d/6628.removal
@@ -0,0 +1 @@
+Remove unused, undocumented /_matrix/content API.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index e3b05423b8..fad5f968b5 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -692,10 +692,6 @@ media_store_path: "DATADIR/media_store"
 #    config:
 #       directory: /mnt/some/other/directory
 
-# Directory where in-progress uploads are stored.
-#
-uploads_path: "DATADIR/uploads"
-
 # The largest allowed upload size in bytes
 #
 #max_upload_size: 10M
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index ff1f39e86c..f34434bd67 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -29,7 +29,6 @@ FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
 FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
 STATIC_PREFIX = "/_matrix/static"
 WEB_CLIENT_PREFIX = "/_matrix/client"
-CONTENT_REPO_PREFIX = "/_matrix/content"
 SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
 MEDIA_PREFIX = "/_matrix/media/r0"
 LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 0e9bf7f53a..6208deb646 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -39,7 +39,6 @@ import synapse
 import synapse.config.logger
 from synapse import events
 from synapse.api.urls import (
-    CONTENT_REPO_PREFIX,
     FEDERATION_PREFIX,
     LEGACY_MEDIA_PREFIX,
     MEDIA_PREFIX,
@@ -65,7 +64,6 @@ from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
 from synapse.rest.admin import AdminRestResource
 from synapse.rest.key.v2 import KeyApiV2Resource
-from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.rest.well_known import WellKnownResource
 from synapse.server import HomeServer
 from synapse.storage import DataStore
@@ -223,13 +221,7 @@ class SynapseHomeServer(HomeServer):
             if self.get_config().enable_media_repo:
                 media_repo = self.get_media_repository_resource()
                 resources.update(
-                    {
-                        MEDIA_PREFIX: media_repo,
-                        LEGACY_MEDIA_PREFIX: media_repo,
-                        CONTENT_REPO_PREFIX: ContentRepoResource(
-                            self, self.config.uploads_path
-                        ),
-                    }
+                    {MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
                 )
             elif name == "media":
                 raise ConfigError(
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 4c80f257e2..a63c53dc44 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
 
 import synapse
 from synapse import events
-from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
+from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
 from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
@@ -37,7 +37,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.admin import register_servlets_for_media_repo
-from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.server import HomeServer
 from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
 from synapse.util.httpresourcetree import create_resource_tree
@@ -82,9 +81,6 @@ class MediaRepositoryServer(HomeServer):
                         {
                             MEDIA_PREFIX: media_repo,
                             LEGACY_MEDIA_PREFIX: media_repo,
-                            CONTENT_REPO_PREFIX: ContentRepoResource(
-                                self, self.config.uploads_path
-                            ),
                             "/_synapse/admin": admin_resource,
                         }
                     )
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index d0205e14b9..7d2dd27fd0 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -156,7 +156,6 @@ class ContentRepositoryConfig(Config):
                 (provider_class, parsed_config, wrapper_config)
             )
 
-        self.uploads_path = self.ensure_directory(config.get("uploads_path", "uploads"))
         self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
         self.thumbnail_requirements = parse_thumbnail_requirements(
             config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)
@@ -231,10 +230,6 @@ class ContentRepositoryConfig(Config):
         #    config:
         #       directory: /mnt/some/other/directory
 
-        # Directory where in-progress uploads are stored.
-        #
-        uploads_path: "%(uploads_path)s"
-
         # The largest allowed upload size in bytes
         #
         #max_upload_size: 10M
diff --git a/synapse/rest/media/v0/__init__.py b/synapse/rest/media/v0/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
deleted file mode 100644
index 86884c0ef4..0000000000
--- a/synapse/rest/media/v0/content_repository.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import base64
-import logging
-import os
-import re
-
-from canonicaljson import json
-
-from twisted.protocols.basic import FileSender
-from twisted.web import resource, server
-
-from synapse.api.errors import Codes, cs_error
-from synapse.http.server import finish_request, respond_with_json_bytes
-
-logger = logging.getLogger(__name__)
-
-
-class ContentRepoResource(resource.Resource):
-    """Provides file uploading and downloading.
-
-    Uploads are POSTed to wherever this Resource is linked to. This resource
-    returns a "content token" which can be used to GET this content again. The
-    token is typically a path, but it may not be. Tokens can expire, be
-    one-time uses, etc.
-
-    In this case, the token is a path to the file and contains 3 interesting
-    sections:
-        - User ID base64d (for namespacing content to each user)
-        - random 24 char string
-        - Content type base64d (so we can return it when clients GET it)
-
-    """
-
-    isLeaf = True
-
-    def __init__(self, hs, directory):
-        resource.Resource.__init__(self)
-        self.hs = hs
-        self.directory = directory
-
-    def render_GET(self, request):
-        # no auth here on purpose, to allow anyone to view, even across home
-        # servers.
-
-        # TODO: A little crude here, we could do this better.
-        filename = request.path.decode("ascii").split("/")[-1]
-        # be paranoid
-        filename = re.sub("[^0-9A-z.-_]", "", filename)
-
-        file_path = self.directory + "/" + filename
-
-        logger.debug("Searching for %s", file_path)
-
-        if os.path.isfile(file_path):
-            # filename has the content type
-            base64_contentype = filename.split(".")[1]
-            content_type = base64.urlsafe_b64decode(base64_contentype)
-            logger.info("Sending file %s", file_path)
-            f = open(file_path, "rb")
-            request.setHeader("Content-Type", content_type)
-
-            # cache for at least a day.
-            # XXX: we might want to turn this off for data we don't want to
-            # recommend caching as it's sensitive or private - or at least
-            # select private. don't bother setting Expires as all our matrix
-            # clients are smart enough to be happy with Cache-Control (right?)
-            request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
-
-            d = FileSender().beginFileTransfer(f, request)
-
-            # after the file has been sent, clean up and finish the request
-            def cbFinished(ignored):
-                f.close()
-                finish_request(request)
-
-            d.addCallback(cbFinished)
-        else:
-            respond_with_json_bytes(
-                request,
-                404,
-                json.dumps(cs_error("Not found", code=Codes.NOT_FOUND)),
-                send_cors=True,
-            )
-
-        return server.NOT_DONE_YET
-
-    def render_OPTIONS(self, request):
-        respond_with_json_bytes(request, 200, {}, send_cors=True)
-        return server.NOT_DONE_YET
diff --git a/tox.ini b/tox.ini
index 1d6428f64f..0ab6d5666b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -182,7 +182,6 @@ commands = mypy \
             synapse/logging/ \
             synapse/module_api \
             synapse/rest/consent \
-            synapse/rest/media/v0 \
             synapse/rest/saml2 \
             synapse/spam_checker_api \
             synapse/storage/engines \
-- 
cgit 1.4.1


From e484101306787988adacf6d6de4fcd565368dec4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 3 Jan 2020 17:11:29 +0000
Subject: Raise an error if someone tries to use the log_file config option
 (#6626)

This has caused some confusion for people who didn't notice it going away.
---
 changelog.d/6626.feature  |  1 +
 synapse/app/homeserver.py |  2 +-
 synapse/config/logger.py  | 17 +++++++++++++++--
 3 files changed, 17 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6626.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6626.feature b/changelog.d/6626.feature
new file mode 100644
index 0000000000..15798fa59b
--- /dev/null
+++ b/changelog.d/6626.feature
@@ -0,0 +1 @@
+Raise an error if someone tries to use the log_file config option.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 6208deb646..e5b44a5eed 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -310,7 +310,7 @@ def setup(config_options):
             "Synapse Homeserver", config_options
         )
     except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
+        sys.stderr.write("\nERROR: %s\n" % (e,))
         sys.exit(1)
 
     if not config:
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 75bb904718..3c455610d9 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import argparse
 import logging
 import logging.config
 import os
@@ -37,7 +37,7 @@ from synapse.logging._structured import (
 from synapse.logging.context import LoggingContextFilter
 from synapse.util.versionstring import get_version_string
 
-from ._base import Config
+from ._base import Config, ConfigError
 
 DEFAULT_LOG_CONFIG = Template(
     """
@@ -81,11 +81,18 @@ disable_existing_loggers: false
 """
 )
 
+LOG_FILE_ERROR = """\
+Support for the log_file configuration option and --log-file command-line option was
+removed in Synapse 1.3.0. You should instead set up a separate log configuration file.
+"""
+
 
 class LoggingConfig(Config):
     section = "logging"
 
     def read_config(self, config, **kwargs):
+        if config.get("log_file"):
+            raise ConfigError(LOG_FILE_ERROR)
         self.log_config = self.abspath(config.get("log_config"))
         self.no_redirect_stdio = config.get("no_redirect_stdio", False)
 
@@ -106,6 +113,8 @@ class LoggingConfig(Config):
     def read_arguments(self, args):
         if args.no_redirect_stdio is not None:
             self.no_redirect_stdio = args.no_redirect_stdio
+        if args.log_file is not None:
+            raise ConfigError(LOG_FILE_ERROR)
 
     @staticmethod
     def add_arguments(parser):
@@ -118,6 +127,10 @@ class LoggingConfig(Config):
             help="Do not redirect stdout/stderr to the log",
         )
 
+        logging_group.add_argument(
+            "-f", "--log-file", dest="log_file", help=argparse.SUPPRESS,
+        )
+
     def generate_files(self, config, config_dir_path):
         log_config = config.get("log_config")
         if log_config and not os.path.exists(log_config):
-- 
cgit 1.4.1


From 08815566bca79d001ad1bf58b2b082e435b6e5df Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 3 Jan 2020 17:14:00 +0000
Subject: Automate generation of the sample and debian log configs (#6627)

---
 changelog.d/6627.misc              |  1 +
 debian/build_virtualenv            |  3 +++
 debian/changelog                   |  6 ++++++
 debian/install                     |  1 -
 debian/log.yaml                    | 36 -------------------------------
 docs/sample_log_config.yaml        |  4 ++--
 scripts-dev/generate_sample_config | 10 +++++++++
 scripts/generate_log_config        | 43 ++++++++++++++++++++++++++++++++++++++
 synapse/config/logger.py           |  9 +++++++-
 9 files changed, 73 insertions(+), 40 deletions(-)
 create mode 100644 changelog.d/6627.misc
 delete mode 100644 debian/log.yaml
 create mode 100755 scripts/generate_log_config

(limited to 'changelog.d')

diff --git a/changelog.d/6627.misc b/changelog.d/6627.misc
new file mode 100644
index 0000000000..702f067070
--- /dev/null
+++ b/changelog.d/6627.misc
@@ -0,0 +1 @@
+Automate generation of the sample log config.
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index 2791896052..d892fd5c9d 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -85,6 +85,9 @@ PYTHONPATH="$tmpdir" \
 
 ' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
 
+# build the log config file
+"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
+        --output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
 
 # add a dependency on the right version of python to substvars.
 PYPKG=`basename $SNAKE`
diff --git a/debian/changelog b/debian/changelog
index 31791c127c..75fe89fa97 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.7.3ubuntu1) UNRELEASED; urgency=medium
+
+  * Automate generation of the default log configuration file.
+
+ -- Richard van der Hoff   Fri, 03 Jan 2020 13:55:38 +0000
+
 matrix-synapse-py3 (1.7.3) stable; urgency=medium
 
   * New synapse release 1.7.3.
diff --git a/debian/install b/debian/install
index 43dc8c6904..da8b726a2b 100644
--- a/debian/install
+++ b/debian/install
@@ -1,2 +1 @@
-debian/log.yaml etc/matrix-synapse
 debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
diff --git a/debian/log.yaml b/debian/log.yaml
deleted file mode 100644
index 95b655dd35..0000000000
--- a/debian/log.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-
-version: 1
-
-formatters:
-  precise:
-   format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
-
-filters:
-  context:
-    (): synapse.logging.context.LoggingContextFilter
-    request: ""
-
-handlers:
-  file:
-    class: logging.handlers.RotatingFileHandler
-    formatter: precise
-    filename: /var/log/matrix-synapse/homeserver.log
-    maxBytes: 104857600
-    backupCount: 10
-    filters: [context]
-    encoding: utf8
-  console:
-    class: logging.StreamHandler
-    formatter: precise
-    level: WARN
-
-loggers:
-    synapse:
-        level: INFO
-
-    synapse.storage.SQL:
-        level: INFO
-
-root:
-    level: INFO
-    handlers: [file, console]
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 11e8f35f41..1a2739455e 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -1,4 +1,4 @@
-# Example log config file for synapse.
+# Log configuration for Synapse.
 #
 # This is a YAML file containing a standard Python logging configuration
 # dictionary. See [1] for details on the valid settings.
@@ -20,7 +20,7 @@ handlers:
     file:
         class: logging.handlers.RotatingFileHandler
         formatter: precise
-        filename: /home/rav/work/synapse/homeserver.log
+        filename: /var/log/matrix-synapse/homeserver.log
         maxBytes: 104857600
         backupCount: 10
         filters: [context]
diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config
index 5e33b9b549..9cb4630a5c 100755
--- a/scripts-dev/generate_sample_config
+++ b/scripts-dev/generate_sample_config
@@ -7,12 +7,22 @@ set -e
 cd `dirname $0`/..
 
 SAMPLE_CONFIG="docs/sample_config.yaml"
+SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
+
+check() {
+    diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1
+}
 
 if [ "$1" == "--check" ]; then
     diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
         echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
         exit 1
     }
+    diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || {
+        echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
+        exit 1
+    }
 else
     ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
+    ./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG"
 fi
diff --git a/scripts/generate_log_config b/scripts/generate_log_config
new file mode 100755
index 0000000000..b6957f48a3
--- /dev/null
+++ b/scripts/generate_log_config
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import sys
+
+from synapse.config.logger import DEFAULT_LOG_CONFIG
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument(
+        "-o",
+        "--output-file",
+        type=argparse.FileType("w"),
+        default=sys.stdout,
+        help="File to write the configuration to. Default: stdout",
+    )
+
+    parser.add_argument(
+        "-f",
+        "--log-file",
+        type=str,
+        default="/var/log/matrix-synapse/homeserver.log",
+        help="name of the log file",
+    )
+
+    args = parser.parse_args()
+    args.output_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 3c455610d9..a25c70e928 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -40,7 +40,14 @@ from synapse.util.versionstring import get_version_string
 from ._base import Config, ConfigError
 
 DEFAULT_LOG_CONFIG = Template(
-    """
+    """\
+# Log configuration for Synapse.
+#
+# This is a YAML file containing a standard Python logging configuration
+# dictionary. See [1] for details on the valid settings.
+#
+# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+
 version: 1
 
 formatters:
-- 
cgit 1.4.1


From 01c3c6c9298d0bbdbbc6e829e9c9f1e1a52e8332 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 6 Jan 2020 04:53:07 -0500
Subject: Fix power levels being incorrectly set in old and new rooms after a
 room upgrade (#6633)

Modify a copy of an upgraded room's PL before sending to the new room
---
 changelog.d/6633.bugfix  |  1 +
 synapse/handlers/room.py | 17 ++++++++++-------
 2 files changed, 11 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6633.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6633.bugfix b/changelog.d/6633.bugfix
new file mode 100644
index 0000000000..4bacf26021
--- /dev/null
+++ b/changelog.d/6633.bugfix
@@ -0,0 +1 @@
+Fix bug where a moderator upgraded a room and became an admin in the new room.
\ No newline at end of file
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 89c9118b26..4f489762fc 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -16,6 +16,7 @@
 # limitations under the License.
 
 """Contains functions for performing events on rooms."""
+import copy
 import itertools
 import logging
 import math
@@ -271,7 +272,7 @@ class RoomCreationHandler(BaseHandler):
             except AuthError as e:
                 logger.warning("Unable to update PLs in old room: %s", e)
 
-        logger.info("Setting correct PLs in new room")
+        logger.info("Setting correct PLs in new room to %s", old_room_pl_state.content)
         yield self.event_creation_handler.create_and_send_nonmember_event(
             requester,
             {
@@ -365,13 +366,15 @@ class RoomCreationHandler(BaseHandler):
         needed_power_level = max(state_default, ban, max(event_power_levels.values()))
 
         # Raise the requester's power level in the new room if necessary
-        current_power_level = power_levels["users"][requester.user.to_string()]
+        current_power_level = power_levels["users"][user_id]
         if current_power_level < needed_power_level:
-            # Assign this power level to the requester
-            power_levels["users"][requester.user.to_string()] = needed_power_level
+            # Perform a deepcopy in order to not modify the original power levels in a
+            # room, as its contents are preserved as the state for the old room later on
+            new_power_levels = copy.deepcopy(power_levels)
+            initial_state[(EventTypes.PowerLevels, "")] = new_power_levels
 
-        # Set the power levels to the modified state
-        initial_state[(EventTypes.PowerLevels, "")] = power_levels
+            # Assign this power level to the requester
+            new_power_levels["users"][user_id] = needed_power_level
 
         yield self._send_events_for_new_room(
             requester,
@@ -733,7 +736,7 @@ class RoomCreationHandler(BaseHandler):
         initial_state,
         creation_content,
         room_alias=None,
-        power_level_content_override=None,
+        power_level_content_override=None,  # Doesn't apply when initial state has power level state event content
         creator_join_profile=None,
     ):
         def create(etype, content, **kwargs):
-- 
cgit 1.4.1


From 18674eebb1fa5d7445952d7e201afe33bd040523 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 6 Jan 2020 12:28:58 +0000
Subject: Workaround for error when fetching notary's own key (#6620)

* Kill off redundant SynapseRequestFactory

We already get the Site via the Channel, so there's no need for a dedicated
RequestFactory: we can just use the right constructor.

* Workaround for error when fetching notary's own key

As a notary server, when we return our own keys, include all of our signing
keys in verify_keys.

This is a workaround for #6596.
---
 changelog.d/6620.misc                         |   1 +
 synapse/rest/key/v2/remote_key_resource.py    |  30 ++++--
 tests/rest/key/v2/test_remote_key_resource.py | 130 ++++++++++++++++++++++++++
 tests/unittest.py                             |  11 ++-
 4 files changed, 163 insertions(+), 9 deletions(-)
 create mode 100644 changelog.d/6620.misc
 create mode 100644 tests/rest/key/v2/test_remote_key_resource.py

(limited to 'changelog.d')

diff --git a/changelog.d/6620.misc b/changelog.d/6620.misc
new file mode 100644
index 0000000000..8bfb78fb20
--- /dev/null
+++ b/changelog.d/6620.misc
@@ -0,0 +1 @@
+Add a workaround for synapse raising exceptions when fetching the notary's own key from the notary.
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index e7fc3f0431..bf5e0eb844 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -15,6 +15,7 @@
 import logging
 
 from canonicaljson import encode_canonical_json, json
+from signedjson.key import encode_verify_key_base64
 from signedjson.sign import sign_json
 
 from twisted.internet import defer
@@ -216,15 +217,28 @@ class RemoteKey(DirectServeResource):
         if cache_misses and query_remote_on_cache_miss:
             yield self.fetcher.get_keys(cache_misses)
             yield self.query_keys(request, query, query_remote_on_cache_miss=False)
-        else:
-            signed_keys = []
-            for key_json in json_results:
-                key_json = json.loads(key_json)
+            return
+
+        signed_keys = []
+        for key_json in json_results:
+            key_json = json.loads(key_json)
+
+            # backwards-compatibility hack for #6596: if the requested key belongs
+            # to us, make sure that all of the signing keys appear in the
+            # "verify_keys" section.
+            if key_json["server_name"] == self.config.server_name:
+                verify_keys = key_json["verify_keys"]
                 for signing_key in self.config.key_server_signing_keys:
-                    key_json = sign_json(key_json, self.config.server_name, signing_key)
+                    key_id = "%s:%s" % (signing_key.alg, signing_key.version)
+                    verify_keys[key_id] = {
+                        "key": encode_verify_key_base64(signing_key.verify_key)
+                    }
+
+            for signing_key in self.config.key_server_signing_keys:
+                key_json = sign_json(key_json, self.config.server_name, signing_key)
 
-                signed_keys.append(key_json)
+            signed_keys.append(key_json)
 
-            results = {"server_keys": signed_keys}
+        results = {"server_keys": signed_keys}
 
-            respond_with_json_bytes(request, 200, encode_canonical_json(results))
+        respond_with_json_bytes(request, 200, encode_canonical_json(results))
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
new file mode 100644
index 0000000000..d8246b4e78
--- /dev/null
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import urllib.parse
+from io import BytesIO
+
+from mock import Mock
+
+import signedjson.key
+from nacl.signing import SigningKey
+from signedjson.sign import sign_json
+
+from twisted.web.resource import NoResource
+
+from synapse.http.site import SynapseRequest
+from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.util.httpresourcetree import create_resource_tree
+
+from tests import unittest
+from tests.server import FakeChannel, wait_until_result
+
+
+class RemoteKeyResourceTestCase(unittest.HomeserverTestCase):
+    def make_homeserver(self, reactor, clock):
+        self.http_client = Mock()
+        return self.setup_test_homeserver(http_client=self.http_client)
+
+    def create_test_json_resource(self):
+        return create_resource_tree(
+            {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource()
+        )
+
+    def expect_outgoing_key_request(
+        self, server_name: str, signing_key: SigningKey
+    ) -> None:
+        """
+        Tell the mock http client to expect an outgoing GET request for the given key
+        """
+
+        def get_json(destination, path, ignore_backoff=False, **kwargs):
+            self.assertTrue(ignore_backoff)
+            self.assertEqual(destination, server_name)
+            key_id = "%s:%s" % (signing_key.alg, signing_key.version)
+            self.assertEqual(
+                path, "/_matrix/key/v2/server/%s" % (urllib.parse.quote(key_id),)
+            )
+
+            response = {
+                "server_name": server_name,
+                "old_verify_keys": {},
+                "valid_until_ts": 200 * 1000,
+                "verify_keys": {
+                    key_id: {
+                        "key": signedjson.key.encode_verify_key_base64(
+                            signing_key.verify_key
+                        )
+                    }
+                },
+            }
+            sign_json(response, server_name, signing_key)
+            return response
+
+        self.http_client.get_json.side_effect = get_json
+
+    def make_notary_request(self, server_name: str, key_id: str) -> dict:
+        """Send a GET request to the test server requesting the given key.
+
+        Checks that the response is a 200 and returns the decoded json body.
+        """
+        channel = FakeChannel(self.site, self.reactor)
+        req = SynapseRequest(channel)
+        req.content = BytesIO(b"")
+        req.requestReceived(
+            b"GET",
+            b"/_matrix/key/v2/query/%s/%s"
+            % (server_name.encode("utf-8"), key_id.encode("utf-8")),
+            b"1.1",
+        )
+        wait_until_result(self.reactor, req)
+        self.assertEqual(channel.code, 200)
+        resp = channel.json_body
+        return resp
+
+    def test_get_key(self):
+        """Fetch a remote key"""
+        SERVER_NAME = "remote.server"
+        testkey = signedjson.key.generate_signing_key("ver1")
+        self.expect_outgoing_key_request(SERVER_NAME, testkey)
+
+        resp = self.make_notary_request(SERVER_NAME, "ed25519:ver1")
+        keys = resp["server_keys"]
+        self.assertEqual(len(keys), 1)
+
+        self.assertIn("ed25519:ver1", keys[0]["verify_keys"])
+        self.assertEqual(len(keys[0]["verify_keys"]), 1)
+
+        # it should be signed by both the origin server and the notary
+        self.assertIn(SERVER_NAME, keys[0]["signatures"])
+        self.assertIn(self.hs.hostname, keys[0]["signatures"])
+
+    def test_get_own_key(self):
+        """Fetch our own key"""
+        testkey = signedjson.key.generate_signing_key("ver1")
+        self.expect_outgoing_key_request(self.hs.hostname, testkey)
+
+        resp = self.make_notary_request(self.hs.hostname, "ed25519:ver1")
+        keys = resp["server_keys"]
+        self.assertEqual(len(keys), 1)
+
+        # it should be signed by both itself, and the notary signing key
+        sigs = keys[0]["signatures"]
+        self.assertEqual(len(sigs), 1)
+        self.assertIn(self.hs.hostname, sigs)
+        oursigs = sigs[self.hs.hostname]
+        self.assertEqual(len(oursigs), 2)
+
+        # and both keys should be present in the verify_keys section
+        self.assertIn("ed25519:ver1", keys[0]["verify_keys"])
+        self.assertIn("ed25519:a_lPym", keys[0]["verify_keys"])
diff --git a/tests/unittest.py b/tests/unittest.py
index b30b7d1718..cbda237278 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -36,7 +36,7 @@ from synapse.config.homeserver import HomeServerConfig
 from synapse.config.ratelimiting import FederationRateLimitConfig
 from synapse.federation.transport import server as federation_server
 from synapse.http.server import JsonResource
-from synapse.http.site import SynapseRequest
+from synapse.http.site import SynapseRequest, SynapseSite
 from synapse.logging.context import LoggingContext
 from synapse.server import HomeServer
 from synapse.types import Requester, UserID, create_requester
@@ -210,6 +210,15 @@ class HomeserverTestCase(TestCase):
         # Register the resources
         self.resource = self.create_test_json_resource()
 
+        # create a site to wrap the resource.
+        self.site = SynapseSite(
+            logger_name="synapse.access.http.fake",
+            site_tag="test",
+            config={},
+            resource=self.resource,
+            server_version_string="1",
+        )
+
         from tests.rest.client.v1.utils import RestHelper
 
         self.helper = RestHelper(self.hs, self.resource, getattr(self, "user_id", None))
-- 
cgit 1.4.1


From 4b36b482e0cc1a63db27534c4ea5d9608cdb6a79 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 6 Jan 2020 12:33:56 +0000
Subject: Fix exception when fetching notary server's old keys (#6625)

Lift the restriction that *all* the keys used for signing v2 key responses be
present in verify_keys.

Fixes #6596.
---
 changelog.d/6625.bugfix      |   1 +
 synapse/crypto/keyring.py    |  13 ++--
 tests/crypto/test_keyring.py | 139 +++++++++++++++++++++++++++++--------------
 3 files changed, 103 insertions(+), 50 deletions(-)
 create mode 100644 changelog.d/6625.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6625.bugfix b/changelog.d/6625.bugfix
new file mode 100644
index 0000000000..a8dc5587dc
--- /dev/null
+++ b/changelog.d/6625.bugfix
@@ -0,0 +1 @@
+Fix exception when fetching the `matrix.org:ed25519:auto` key.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 7cfad192e8..6fe5a6a26a 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -511,17 +511,18 @@ class BaseV2KeyFetcher(object):
         server_name = response_json["server_name"]
         verified = False
         for key_id in response_json["signatures"].get(server_name, {}):
-            # each of the keys used for the signature must be present in the response
-            # json.
             key = verify_keys.get(key_id)
             if not key:
-                raise KeyLookupError(
-                    "Key response is signed by key id %s:%s but that key is not "
-                    "present in the response" % (server_name, key_id)
-                )
+                # the key may not be present in verify_keys if:
+                #  * we got the key from the notary server, and:
+                #  * the key belongs to the notary server, and:
+                #  * the notary server is using a different key to sign notary
+                #    responses.
+                continue
 
             verify_signed_json(response_json, server_name, key.verify_key)
             verified = True
+            break
 
         if not verified:
             raise KeyLookupError(
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 8efd39c7f7..34d5895f18 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -19,6 +19,7 @@ from mock import Mock
 import canonicaljson
 import signedjson.key
 import signedjson.sign
+from nacl.signing import SigningKey
 from signedjson.key import encode_verify_key_base64, get_verify_key
 
 from twisted.internet import defer
@@ -412,34 +413,37 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
             handlers=None, http_client=self.http_client, config=config
         )
 
-    def test_get_keys_from_perspectives(self):
-        # arbitrarily advance the clock a bit
-        self.reactor.advance(100)
-
-        fetcher = PerspectivesKeyFetcher(self.hs)
-
-        SERVER_NAME = "server2"
-        testkey = signedjson.key.generate_signing_key("ver1")
-        testverifykey = signedjson.key.get_verify_key(testkey)
-        testverifykey_id = "ed25519:ver1"
-        VALID_UNTIL_TS = 200 * 1000
+    def build_perspectives_response(
+        self, server_name: str, signing_key: SigningKey, valid_until_ts: int,
+    ) -> dict:
+        """
+        Build a valid perspectives server response to a request for the given key
+        """
+        verify_key = signedjson.key.get_verify_key(signing_key)
+        verifykey_id = "%s:%s" % (verify_key.alg, verify_key.version)
 
-        # valid response
         response = {
-            "server_name": SERVER_NAME,
+            "server_name": server_name,
             "old_verify_keys": {},
-            "valid_until_ts": VALID_UNTIL_TS,
+            "valid_until_ts": valid_until_ts,
             "verify_keys": {
-                testverifykey_id: {
-                    "key": signedjson.key.encode_verify_key_base64(testverifykey)
+                verifykey_id: {
+                    "key": signedjson.key.encode_verify_key_base64(verify_key)
                 }
             },
         }
-
         # the response must be signed by both the origin server and the perspectives
         # server.
-        signedjson.sign.sign_json(response, SERVER_NAME, testkey)
+        signedjson.sign.sign_json(response, server_name, signing_key)
         self.mock_perspective_server.sign_response(response)
+        return response
+
+    def expect_outgoing_key_query(
+        self, expected_server_name: str, expected_key_id: str, response: dict
+    ) -> None:
+        """
+        Tell the mock http client to expect a perspectives-server key query
+        """
 
         def post_json(destination, path, data, **kwargs):
             self.assertEqual(destination, self.mock_perspective_server.server_name)
@@ -447,11 +451,79 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
 
             # check that the request is for the expected key
             q = data["server_keys"]
-            self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"])
+            self.assertEqual(list(q[expected_server_name].keys()), [expected_key_id])
             return {"server_keys": [response]}
 
         self.http_client.post_json.side_effect = post_json
 
+    def test_get_keys_from_perspectives(self):
+        # arbitrarily advance the clock a bit
+        self.reactor.advance(100)
+
+        fetcher = PerspectivesKeyFetcher(self.hs)
+
+        SERVER_NAME = "server2"
+        testkey = signedjson.key.generate_signing_key("ver1")
+        testverifykey = signedjson.key.get_verify_key(testkey)
+        testverifykey_id = "ed25519:ver1"
+        VALID_UNTIL_TS = 200 * 1000
+
+        response = self.build_perspectives_response(
+            SERVER_NAME, testkey, VALID_UNTIL_TS,
+        )
+
+        self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
+
+        keys_to_fetch = {SERVER_NAME: {"key1": 0}}
+        keys = self.get_success(fetcher.get_keys(keys_to_fetch))
+        self.assertIn(SERVER_NAME, keys)
+        k = keys[SERVER_NAME][testverifykey_id]
+        self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS)
+        self.assertEqual(k.verify_key, testverifykey)
+        self.assertEqual(k.verify_key.alg, "ed25519")
+        self.assertEqual(k.verify_key.version, "ver1")
+
+        # check that the perspectives store is correctly updated
+        lookup_triplet = (SERVER_NAME, testverifykey_id, None)
+        key_json = self.get_success(
+            self.hs.get_datastore().get_server_keys_json([lookup_triplet])
+        )
+        res = key_json[lookup_triplet]
+        self.assertEqual(len(res), 1)
+        res = res[0]
+        self.assertEqual(res["key_id"], testverifykey_id)
+        self.assertEqual(res["from_server"], self.mock_perspective_server.server_name)
+        self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000)
+        self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS)
+
+        self.assertEqual(
+            bytes(res["key_json"]), canonicaljson.encode_canonical_json(response)
+        )
+
+    def test_get_perspectives_own_key(self):
+        """Check that we can get the perspectives server's own keys
+
+        This is slightly complicated by the fact that the perspectives server may
+        use different keys for signing notary responses.
+        """
+
+        # arbitrarily advance the clock a bit
+        self.reactor.advance(100)
+
+        fetcher = PerspectivesKeyFetcher(self.hs)
+
+        SERVER_NAME = self.mock_perspective_server.server_name
+        testkey = signedjson.key.generate_signing_key("ver1")
+        testverifykey = signedjson.key.get_verify_key(testkey)
+        testverifykey_id = "ed25519:ver1"
+        VALID_UNTIL_TS = 200 * 1000
+
+        response = self.build_perspectives_response(
+            SERVER_NAME, testkey, VALID_UNTIL_TS
+        )
+
+        self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
+
         keys_to_fetch = {SERVER_NAME: {"key1": 0}}
         keys = self.get_success(fetcher.get_keys(keys_to_fetch))
         self.assertIn(SERVER_NAME, keys)
@@ -490,35 +562,14 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
         VALID_UNTIL_TS = 200 * 1000
 
         def build_response():
-            # valid response
-            response = {
-                "server_name": SERVER_NAME,
-                "old_verify_keys": {},
-                "valid_until_ts": VALID_UNTIL_TS,
-                "verify_keys": {
-                    testverifykey_id: {
-                        "key": signedjson.key.encode_verify_key_base64(testverifykey)
-                    }
-                },
-            }
-
-            # the response must be signed by both the origin server and the perspectives
-            # server.
-            signedjson.sign.sign_json(response, SERVER_NAME, testkey)
-            self.mock_perspective_server.sign_response(response)
-            return response
+            return self.build_perspectives_response(
+                SERVER_NAME, testkey, VALID_UNTIL_TS
+            )
 
         def get_key_from_perspectives(response):
             fetcher = PerspectivesKeyFetcher(self.hs)
             keys_to_fetch = {SERVER_NAME: {"key1": 0}}
-
-            def post_json(destination, path, data, **kwargs):
-                self.assertEqual(destination, self.mock_perspective_server.server_name)
-                self.assertEqual(path, "/_matrix/key/v2/query")
-                return {"server_keys": [response]}
-
-            self.http_client.post_json.side_effect = post_json
-
+            self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
             return self.get_success(fetcher.get_keys(keys_to_fetch))
 
         # start with a valid response so we can check we are testing the right thing
-- 
cgit 1.4.1


From 550b2946d8beb9c3808972e730790d6dda86d953 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 3 Jan 2020 16:54:32 +0000
Subject: changelog

---
 changelog.d/6629.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6629.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6629.misc b/changelog.d/6629.misc
new file mode 100644
index 0000000000..68f77af05b
--- /dev/null
+++ b/changelog.d/6629.misc
@@ -0,0 +1 @@
+Simplify event creation code by removing redundant queries on the event_reference_hashes table.
\ No newline at end of file
-- 
cgit 1.4.1


From ab4b4ee6a7e15d1d6e83c4b826051da7df7f83e3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 6 Jan 2020 14:34:02 +0000
Subject: Fix an error which was thrown by the PresenceHandler _on_shutdown
 handler. (#6640)

---
 changelog.d/6640.bugfix      | 1 +
 synapse/handlers/presence.py | 9 ++-------
 2 files changed, 3 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6640.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6640.bugfix b/changelog.d/6640.bugfix
new file mode 100644
index 0000000000..8c2a129933
--- /dev/null
+++ b/changelog.d/6640.bugfix
@@ -0,0 +1 @@
+Fix an error which was thrown by the PresenceHandler _on_shutdown handler.
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 240c4add12..202aa9294f 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -95,12 +95,7 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
 
 
 class PresenceHandler(object):
-    def __init__(self, hs):
-        """
-
-        Args:
-            hs (synapse.server.HomeServer):
-        """
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.hs = hs
         self.is_mine = hs.is_mine
         self.is_mine_id = hs.is_mine_id
@@ -230,7 +225,7 @@ class PresenceHandler(object):
         is some spurious presence changes that will self-correct.
         """
         # If the DB pool has already terminated, don't try updating
-        if not self.store.database.is_running():
+        if not self.store.db.is_running():
             return
 
         logger.info(
-- 
cgit 1.4.1


From 9f6c1befbbb0279dca261b105148e633c3d45453 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 6 Jan 2020 14:44:01 +0000
Subject: Add experimental 'databases' config (#6580)

---
 changelog.d/6580.feature                |  1 +
 synapse/config/database.py              | 55 +++++++++++++++++++++++++--------
 synapse/storage/data_stores/__init__.py | 21 +++++++++++++
 3 files changed, 64 insertions(+), 13 deletions(-)
 create mode 100644 changelog.d/6580.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6580.feature b/changelog.d/6580.feature
new file mode 100644
index 0000000000..233c589c66
--- /dev/null
+++ b/changelog.d/6580.feature
@@ -0,0 +1 @@
+Add experimental config option to specify multiple databases.
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 134824789c..219b32f670 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -15,7 +15,6 @@
 import logging
 import os
 from textwrap import indent
-from typing import List
 
 import yaml
 
@@ -30,16 +29,13 @@ class DatabaseConnectionConfig:
     Args:
         name: A label for the database, used for logging.
         db_config: The config for a particular database, as per `database`
-            section of main config. Has two fields: `name` for database
-            module name, and `args` for the args to give to the database
-            connector.
-        data_stores: The list of data stores that should be provisioned on the
-            database. Defaults to all data stores.
+            section of main config. Has three fields: `name` for database
+            module name, `args` for the args to give to the database
+            connector, and optional `data_stores` that is a list of stores to
+            provision on this database (defaulting to all).
     """
 
-    def __init__(
-        self, name: str, db_config: dict, data_stores: List[str] = ["main", "state"]
-    ):
+    def __init__(self, name: str, db_config: dict):
         if db_config["name"] not in ("sqlite3", "psycopg2"):
             raise ConfigError("Unsupported database type %r" % (db_config["name"],))
 
@@ -48,6 +44,10 @@ class DatabaseConnectionConfig:
                 {"cp_min": 1, "cp_max": 1, "check_same_thread": False}
             )
 
+        data_stores = db_config.get("data_stores")
+        if data_stores is None:
+            data_stores = ["main", "state"]
+
         self.name = name
         self.config = db_config
         self.data_stores = data_stores
@@ -59,14 +59,43 @@ class DatabaseConfig(Config):
     def read_config(self, config, **kwargs):
         self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K"))
 
+        # We *experimentally* support specifying multiple databases via the
+        # `databases` key. This is a map from a label to database config in the
+        # same format as the `database` config option, plus an extra
+        # `data_stores` key to specify which data store goes where. For example:
+        #
+        #   databases:
+        #       master:
+        #           name: psycopg2
+        #           data_stores: ["main"]
+        #           args: {}
+        #       state:
+        #           name: psycopg2
+        #           data_stores: ["state"]
+        #           args: {}
+
+        multi_database_config = config.get("databases")
         database_config = config.get("database")
 
-        if database_config is None:
-            database_config = {"name": "sqlite3", "args": {}}
+        if multi_database_config and database_config:
+            raise ConfigError("Can't specify both 'database' and 'datbases' in config")
+
+        if multi_database_config:
+            if config.get("database_path"):
+                raise ConfigError("Can't specify 'database_path' with 'databases'")
+
+            self.databases = [
+                DatabaseConnectionConfig(name, db_conf)
+                for name, db_conf in multi_database_config.items()
+            ]
+
+        else:
+            if database_config is None:
+                database_config = {"name": "sqlite3", "args": {}}
 
-        self.databases = [DatabaseConnectionConfig("master", database_config)]
+            self.databases = [DatabaseConnectionConfig("master", database_config)]
 
-        self.set_databasepath(config.get("database_path"))
+            self.set_databasepath(config.get("database_path"))
 
     def generate_config_section(self, data_dir_path, database_conf, **kwargs):
         if not database_conf:
diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py
index d20df5f076..092e803799 100644
--- a/synapse/storage/data_stores/__init__.py
+++ b/synapse/storage/data_stores/__init__.py
@@ -37,6 +37,8 @@ class DataStores(object):
         # store.
 
         self.databases = []
+        self.main = None
+        self.state = None
 
         for database_config in hs.config.database.databases:
             db_name = database_config.name
@@ -54,10 +56,22 @@ class DataStores(object):
 
                 if "main" in database_config.data_stores:
                     logger.info("Starting 'main' data store")
+
+                    # Sanity check we don't try and configure the main store on
+                    # multiple databases.
+                    if self.main:
+                        raise Exception("'main' data store already configured")
+
                     self.main = main_store_class(database, db_conn, hs)
 
                 if "state" in database_config.data_stores:
                     logger.info("Starting 'state' data store")
+
+                    # Sanity check we don't try and configure the state store on
+                    # multiple databases.
+                    if self.state:
+                        raise Exception("'state' data store already configured")
+
                     self.state = StateGroupDataStore(database, db_conn, hs)
 
                 db_conn.commit()
@@ -65,3 +79,10 @@ class DataStores(object):
                 self.databases.append(database)
 
                 logger.info("Database %r prepared", db_name)
+
+        # Sanity check that we have actually configured all the required stores.
+        if not self.main:
+            raise Exception("No 'main' data store configured")
+
+        if not self.state:
+            raise Exception("No 'main' data store configured")
-- 
cgit 1.4.1


From ba897a75903129a453d4fb853190dd31f7d1193b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 6 Jan 2020 15:22:46 +0000
Subject: Fix some test failures when frozen_dicts are enabled (#6642)

Fixes #4026
---
 changelog.d/6642.misc                     |  1 +
 synapse/crypto/event_signing.py           |  9 ++++++---
 synapse/handlers/room.py                  | 15 +++++++++------
 synapse/handlers/room_member.py           |  2 ++
 synapse/storage/data_stores/main/state.py |  4 ++--
 5 files changed, 20 insertions(+), 11 deletions(-)
 create mode 100644 changelog.d/6642.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6642.misc b/changelog.d/6642.misc
new file mode 100644
index 0000000000..a480bbd134
--- /dev/null
+++ b/changelog.d/6642.misc
@@ -0,0 +1 @@
+Fix errors when frozen_dicts are enabled.
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index ccaa8a9920..e65bd61d97 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
+import collections.abc
 import hashlib
 import logging
 
@@ -40,8 +40,11 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
     # some malformed events lack a 'hashes'. Protect against it being missing
     # or a weird type by basically treating it the same as an unhashed event.
     hashes = event.get("hashes")
-    if not isinstance(hashes, dict):
-        raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
+    # nb it might be a frozendict or a dict
+    if not isinstance(hashes, collections.abc.Mapping):
+        raise SynapseError(
+            400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
+        )
 
     if name not in hashes:
         raise SynapseError(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 4f489762fc..9cab2adbfb 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -16,7 +16,7 @@
 # limitations under the License.
 
 """Contains functions for performing events on rooms."""
-import copy
+
 import itertools
 import logging
 import math
@@ -368,13 +368,16 @@ class RoomCreationHandler(BaseHandler):
         # Raise the requester's power level in the new room if necessary
         current_power_level = power_levels["users"][user_id]
         if current_power_level < needed_power_level:
-            # Perform a deepcopy in order to not modify the original power levels in a
-            # room, as its contents are preserved as the state for the old room later on
-            new_power_levels = copy.deepcopy(power_levels)
-            initial_state[(EventTypes.PowerLevels, "")] = new_power_levels
+            # make sure we copy the event content rather than overwriting it.
+            # note that if frozen_dicts are enabled, `power_levels` will be a frozen
+            # dict so we can't just copy.deepcopy it.
 
-            # Assign this power level to the requester
+            new_power_levels = {k: v for k, v in power_levels.items() if k != "users"}
+            new_power_levels["users"] = {
+                k: v for k, v in power_levels.get("users", {}).items() if k != user_id
+            }
             new_power_levels["users"][user_id] = needed_power_level
+            initial_state[(EventTypes.PowerLevels, "")] = new_power_levels
 
         yield self._send_events_for_new_room(
             requester,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 44c5e3239c..dbb0c3dda2 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -507,6 +507,8 @@ class RoomMemberHandler(object):
         Returns:
             Deferred
         """
+        logger.info("Transferring room state from %s to %s", old_room_id, room_id)
+
         # Find all local users that were in the old room and copy over each user's state
         users = yield self.store.get_users_in_room(old_room_id)
         yield self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 0dc39f139c..d07440e3ed 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import collections.abc
 import logging
 from collections import namedtuple
 from typing import Iterable, Tuple
@@ -107,7 +107,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         predecessor = create_event.content.get("predecessor", None)
 
         # Ensure the key is a dictionary
-        if not isinstance(predecessor, dict):
+        if not isinstance(predecessor, collections.abc.Mapping):
             return None
 
         return predecessor
-- 
cgit 1.4.1


From 055e6fbaa2a4f2aceb82677c7d2480982fd76c9c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 6 Jan 2020 17:17:40 +0000
Subject: changelog

---
 changelog.d/6645.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6645.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6645.bugfix b/changelog.d/6645.bugfix
new file mode 100644
index 0000000000..f648df3fc0
--- /dev/null
+++ b/changelog.d/6645.bugfix
@@ -0,0 +1 @@
+Fix exceptions in the synchrotron worker log when events are rejected.
-- 
cgit 1.4.1


From 1f2a5923d4e2339e214a2ed0affadf36d0af9662 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 7 Jan 2020 13:12:17 +0000
Subject: Changelog

---
 changelog.d/6652.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6652.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6652.bugfix b/changelog.d/6652.bugfix
new file mode 100644
index 0000000000..7e9781d652
--- /dev/null
+++ b/changelog.d/6652.bugfix
@@ -0,0 +1 @@
+Fix a bug causing Synapse not to fetch missing events when it believes it has every event in the room.
-- 
cgit 1.4.1


From d20c3465441cd64ba3a1e84ee399bbadc0997bdf Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 7 Jan 2020 14:09:07 +0000
Subject: port BackgroundUpdateTestCase to HomeserverTestCase (#6653)

---
 changelog.d/6653.misc                   |  1 +
 tests/storage/test_background_update.py | 72 +++++++++++++++++----------------
 2 files changed, 38 insertions(+), 35 deletions(-)
 create mode 100644 changelog.d/6653.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6653.misc b/changelog.d/6653.misc
new file mode 100644
index 0000000000..fbe7c0e7db
--- /dev/null
+++ b/changelog.d/6653.misc
@@ -0,0 +1 @@
+Port core background update routines to async/await.
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index aec76f4ab1..ae14fb407d 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -2,44 +2,37 @@ from mock import Mock
 
 from twisted.internet import defer
 
+from synapse.storage.background_updates import BackgroundUpdater
+
 from tests import unittest
-from tests.utils import setup_test_homeserver
 
 
-class BackgroundUpdateTestCase(unittest.TestCase):
-    @defer.inlineCallbacks
-    def setUp(self):
-        hs = yield setup_test_homeserver(self.addCleanup)
-        self.store = hs.get_datastore()
-        self.clock = hs.get_clock()
+class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, homeserver):
+        self.updates = self.hs.get_datastore().db.updates  # type: BackgroundUpdater
+        # the base test class should have run the real bg updates for us
+        self.assertTrue(self.updates.has_completed_background_updates())
 
         self.update_handler = Mock()
-
-        yield self.store.db.updates.register_background_update_handler(
+        self.updates.register_background_update_handler(
             "test_update", self.update_handler
         )
 
-        # run the real background updates, to get them out the way
-        # (perhaps we should run them as part of the test HS setup, since we
-        # run all of the other schema setup stuff there?)
-        while True:
-            res = yield self.store.db.updates.do_next_background_update(1000)
-            if res is None:
-                break
-
-    @defer.inlineCallbacks
     def test_do_background_update(self):
-        desired_count = 1000
+        # the time we claim each update takes
         duration_ms = 42
 
+        # the target runtime for each bg update
+        target_background_update_duration_ms = 50000
+
         # first step: make a bit of progress
         @defer.inlineCallbacks
         def update(progress, count):
-            self.clock.advance_time_msec(count * duration_ms)
+            yield self.clock.sleep((count * duration_ms) / 1000)
             progress = {"my_key": progress["my_key"] + 1}
-            yield self.store.db.runInteraction(
+            yield self.hs.get_datastore().db.runInteraction(
                 "update_progress",
-                self.store.db.updates._background_update_progress_txn,
+                self.updates._background_update_progress_txn,
                 "test_update",
                 progress,
             )
@@ -47,37 +40,46 @@ class BackgroundUpdateTestCase(unittest.TestCase):
 
         self.update_handler.side_effect = update
 
-        yield self.store.db.updates.start_background_update(
-            "test_update", {"my_key": 1}
+        self.get_success(
+            self.updates.start_background_update("test_update", {"my_key": 1})
         )
-
         self.update_handler.reset_mock()
-        result = yield self.store.db.updates.do_next_background_update(
-            duration_ms * desired_count
+        res = self.get_success(
+            self.updates.do_next_background_update(
+                target_background_update_duration_ms
+            ),
+            by=0.1,
         )
-        self.assertIsNotNone(result)
+        self.assertIsNotNone(res)
+
+        # on the first call, we should get run with the default background update size
         self.update_handler.assert_called_once_with(
-            {"my_key": 1}, self.store.db.updates.DEFAULT_BACKGROUND_BATCH_SIZE
+            {"my_key": 1}, self.updates.DEFAULT_BACKGROUND_BATCH_SIZE
         )
 
         # second step: complete the update
+        # we should now get run with a much bigger number of items to update
         @defer.inlineCallbacks
         def update(progress, count):
-            yield self.store.db.updates._end_background_update("test_update")
+            self.assertEqual(progress, {"my_key": 2})
+            self.assertAlmostEqual(
+                count, target_background_update_duration_ms / duration_ms, places=0,
+            )
+            yield self.updates._end_background_update("test_update")
             return count
 
         self.update_handler.side_effect = update
         self.update_handler.reset_mock()
-        result = yield self.store.db.updates.do_next_background_update(
-            duration_ms * desired_count
+        result = self.get_success(
+            self.updates.do_next_background_update(target_background_update_duration_ms)
         )
         self.assertIsNotNone(result)
-        self.update_handler.assert_called_once_with({"my_key": 2}, desired_count)
+        self.update_handler.assert_called_once()
 
         # third step: we don't expect to be called any more
         self.update_handler.reset_mock()
-        result = yield self.store.db.updates.do_next_background_update(
-            duration_ms * desired_count
+        result = self.get_success(
+            self.updates.do_next_background_update(target_background_update_duration_ms)
         )
         self.assertIsNone(result)
         self.assertFalse(self.update_handler.called)
-- 
cgit 1.4.1


From 9824a39d807d2d13424095743761930b853fb08f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 7 Jan 2020 14:12:42 +0000
Subject: Async/await for background updates (#6647)

so that bg update routines can be async
---
 changelog.d/6647.misc                 |  1 +
 synapse/storage/background_updates.py | 36 +++++++++++++++++++----------------
 2 files changed, 21 insertions(+), 16 deletions(-)
 create mode 100644 changelog.d/6647.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6647.misc b/changelog.d/6647.misc
new file mode 100644
index 0000000000..fbe7c0e7db
--- /dev/null
+++ b/changelog.d/6647.misc
@@ -0,0 +1 @@
+Port core background update routines to async/await.
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 4f97fd5ab6..b4825acc7b 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Optional
 
 from canonicaljson import json
 
@@ -97,15 +98,14 @@ class BackgroundUpdater(object):
     def start_doing_background_updates(self):
         run_as_background_process("background_updates", self.run_background_updates)
 
-    @defer.inlineCallbacks
-    def run_background_updates(self, sleep=True):
+    async def run_background_updates(self, sleep=True):
         logger.info("Starting background schema updates")
         while True:
             if sleep:
-                yield self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
+                await self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
 
             try:
-                result = yield self.do_next_background_update(
+                result = await self.do_next_background_update(
                     self.BACKGROUND_UPDATE_DURATION_MS
                 )
             except Exception:
@@ -170,20 +170,21 @@ class BackgroundUpdater(object):
 
         return not update_exists
 
-    @defer.inlineCallbacks
-    def do_next_background_update(self, desired_duration_ms):
+    async def do_next_background_update(
+        self, desired_duration_ms: float
+    ) -> Optional[int]:
         """Does some amount of work on the next queued background update
 
+        Returns once some amount of work is done.
+
         Args:
             desired_duration_ms(float): How long we want to spend
                 updating.
         Returns:
-            A deferred that completes once some amount of work is done.
-            The deferred will have a value of None if there is currently
-            no more work to do.
+            None if there is no more work to do, otherwise an int
         """
         if not self._background_update_queue:
-            updates = yield self.db.simple_select_list(
+            updates = await self.db.simple_select_list(
                 "background_updates",
                 keyvalues=None,
                 retcols=("update_name", "depends_on"),
@@ -201,11 +202,12 @@ class BackgroundUpdater(object):
         update_name = self._background_update_queue.pop(0)
         self._background_update_queue.append(update_name)
 
-        res = yield self._do_background_update(update_name, desired_duration_ms)
+        res = await self._do_background_update(update_name, desired_duration_ms)
         return res
 
-    @defer.inlineCallbacks
-    def _do_background_update(self, update_name, desired_duration_ms):
+    async def _do_background_update(
+        self, update_name: str, desired_duration_ms: float
+    ) -> int:
         logger.info("Starting update batch on background update '%s'", update_name)
 
         update_handler = self._background_update_handlers[update_name]
@@ -225,7 +227,7 @@ class BackgroundUpdater(object):
         else:
             batch_size = self.DEFAULT_BACKGROUND_BATCH_SIZE
 
-        progress_json = yield self.db.simple_select_one_onecol(
+        progress_json = await self.db.simple_select_one_onecol(
             "background_updates",
             keyvalues={"update_name": update_name},
             retcol="progress_json",
@@ -234,7 +236,7 @@ class BackgroundUpdater(object):
         progress = json.loads(progress_json)
 
         time_start = self._clock.time_msec()
-        items_updated = yield update_handler(progress, batch_size)
+        items_updated = await update_handler(progress, batch_size)
         time_stop = self._clock.time_msec()
 
         duration_ms = time_stop - time_start
@@ -263,7 +265,9 @@ class BackgroundUpdater(object):
         * A dict of the current progress
         * An integer count of the number of items to update in this batch.
 
-        The handler should return a deferred integer count of items updated.
+        The handler should return a deferred or coroutine which returns an integer count
+        of items updated.
+
         The handler is responsible for updating the progress of the update.
 
         Args:
-- 
cgit 1.4.1


From 85db7f73be15cc088f5e378980021e335001ce87 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 7 Jan 2020 14:18:43 +0000
Subject: Add a background update to clear tombstoned rooms from the directory
 (#6648)

* Add a background update to clear tombstoned rooms from the directory

* use the ABC metaclass
---
 changelog.d/6648.bugfix                            |  1 +
 scripts/synapse_port_db                            |  5 ++
 synapse/storage/_base.py                           |  4 +-
 synapse/storage/background_updates.py              | 15 +++++
 synapse/storage/data_stores/main/room.py           | 64 ++++++++++++++++++++++
 .../56/remove_tombstoned_rooms_from_directory.sql  | 18 ++++++
 6 files changed, 106 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6648.bugfix
 create mode 100644 synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql

(limited to 'changelog.d')

diff --git a/changelog.d/6648.bugfix b/changelog.d/6648.bugfix
new file mode 100644
index 0000000000..39916de437
--- /dev/null
+++ b/changelog.d/6648.bugfix
@@ -0,0 +1 @@
+Ensure that upgraded rooms are removed from the directory.
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index eb927f2094..cb77314f1e 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -166,6 +166,11 @@ class Store(
             logger.exception("Failed to insert: %s", table)
             raise
 
+    def set_room_is_public(self, room_id, is_public):
+        raise Exception(
+            "Attempt to set room_is_public during port_db: database not empty?"
+        )
+
 
 class MockHomeserver:
     def __init__(self, config):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 88546ad614..3bb9381663 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -16,6 +16,7 @@
 # limitations under the License.
 import logging
 import random
+from abc import ABCMeta
 
 from six import PY2
 from six.moves import builtins
@@ -30,7 +31,8 @@ from synapse.types import get_domain_from_id
 logger = logging.getLogger(__name__)
 
 
-class SQLBaseStore(object):
+# some of our subclasses have abstract methods, so we use the ABCMeta metaclass.
+class SQLBaseStore(metaclass=ABCMeta):
     """Base class for data stores that holds helper functions.
 
     Note that multiple instances of this class will exist as there will be one
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index b4825acc7b..bd547f35cf 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -436,6 +436,21 @@ class BackgroundUpdater(object):
             "background_updates", keyvalues={"update_name": update_name}
         )
 
+    def _background_update_progress(self, update_name: str, progress: dict):
+        """Update the progress of a background update
+
+        Args:
+            update_name: The name of the background update task
+            progress: The progress of the update.
+        """
+
+        return self.db.runInteraction(
+            "background_update_progress",
+            self._background_update_progress_txn,
+            update_name,
+            progress,
+        )
+
     def _background_update_progress_txn(self, txn, update_name, progress):
         """Update the progress of a background update
 
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index aa476d0fbf..79cfd39194 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -17,6 +17,7 @@
 import collections
 import logging
 import re
+from abc import abstractmethod
 from typing import Optional, Tuple
 
 from six import integer_types
@@ -367,6 +368,8 @@ class RoomWorkerStore(SQLBaseStore):
 
 
 class RoomBackgroundUpdateStore(SQLBaseStore):
+    REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
+
     def __init__(self, database: Database, db_conn, hs):
         super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
@@ -376,6 +379,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             "insert_room_retention", self._background_insert_retention,
         )
 
+        self.db.updates.register_background_update_handler(
+            self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE,
+            self._remove_tombstoned_rooms_from_directory,
+        )
+
     @defer.inlineCallbacks
     def _background_insert_retention(self, progress, batch_size):
         """Retrieves a list of all rooms within a range and inserts an entry for each of
@@ -444,6 +452,62 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
 
         defer.returnValue(batch_size)
 
+    async def _remove_tombstoned_rooms_from_directory(
+        self, progress, batch_size
+    ) -> int:
+        """Removes any rooms with tombstone events from the room directory
+
+        Nowadays this is handled by the room upgrade handler, but we may have some
+        that got left behind
+        """
+
+        last_room = progress.get("room_id", "")
+
+        def _get_rooms(txn):
+            txn.execute(
+                """
+                SELECT room_id
+                FROM rooms r
+                INNER JOIN current_state_events cse USING (room_id)
+                WHERE room_id > ? AND r.is_public
+                AND cse.type = '%s' AND cse.state_key = ''
+                ORDER BY room_id ASC
+                LIMIT ?;
+                """
+                % EventTypes.Tombstone,
+                (last_room, batch_size),
+            )
+
+            return [row[0] for row in txn]
+
+        rooms = await self.db.runInteraction(
+            "get_tombstoned_directory_rooms", _get_rooms
+        )
+
+        if not rooms:
+            await self.db.updates._end_background_update(
+                self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE
+            )
+            return 0
+
+        for room_id in rooms:
+            logger.info("Removing tombstoned room %s from the directory", room_id)
+            await self.set_room_is_public(room_id, False)
+
+        await self.db.updates._background_update_progress(
+            self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE, {"room_id": rooms[-1]}
+        )
+
+        return len(rooms)
+
+    @abstractmethod
+    def set_room_is_public(self, room_id, is_public):
+        # this will need to be implemented if a background update is performed with
+        # existing (tombstoned, public) rooms in the database.
+        #
+        # It's overridden by RoomStore for the synapse master.
+        raise NotImplementedError()
+
 
 class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
     def __init__(self, database: Database, db_conn, hs):
diff --git a/synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql
new file mode 100644
index 0000000000..aeb17813d3
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql
@@ -0,0 +1,18 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Now that #6232 is a thing, we can remove old rooms from the directory.
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('remove_tombstoned_rooms_from_directory', '{}');
-- 
cgit 1.4.1


From 7f0e706ebf256cc8561b1cdb2efec1b91738694f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 7 Jan 2020 14:31:13 +0000
Subject: 1.8.0rc1

---
 CHANGES.md               | 80 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/6245.misc    |  1 -
 changelog.d/6349.feature |  1 -
 changelog.d/6377.bugfix  |  1 -
 changelog.d/6385.bugfix  |  1 -
 changelog.d/6394.feature |  1 -
 changelog.d/6411.feature |  1 -
 changelog.d/6453.feature |  1 -
 changelog.d/6486.bugfix  |  1 -
 changelog.d/6496.misc    |  1 -
 changelog.d/6502.removal |  1 -
 changelog.d/6504.misc    |  1 -
 changelog.d/6505.misc    |  1 -
 changelog.d/6506.misc    |  1 -
 changelog.d/6510.misc    |  1 -
 changelog.d/6511.misc    |  1 -
 changelog.d/6512.misc    |  1 -
 changelog.d/6513.misc    |  1 -
 changelog.d/6514.bugfix  |  1 -
 changelog.d/6515.misc    |  1 -
 changelog.d/6517.misc    |  1 -
 changelog.d/6522.bugfix  |  1 -
 changelog.d/6523.feature |  1 -
 changelog.d/6534.misc    |  1 -
 changelog.d/6537.misc    |  1 -
 changelog.d/6538.misc    |  1 -
 changelog.d/6541.doc     |  1 -
 changelog.d/6546.feature |  1 -
 changelog.d/6555.bugfix  |  1 -
 changelog.d/6557.misc    |  1 -
 changelog.d/6558.misc    |  1 -
 changelog.d/6559.misc    |  1 -
 changelog.d/6564.misc    |  1 -
 changelog.d/6565.misc    |  1 -
 changelog.d/6570.misc    |  1 -
 changelog.d/6571.bugfix  |  1 -
 changelog.d/6580.feature |  1 -
 changelog.d/6601.doc     |  1 -
 changelog.d/6614.doc     |  1 -
 changelog.d/6617.misc    |  1 -
 changelog.d/6619.misc    |  1 -
 changelog.d/6620.misc    |  1 -
 changelog.d/6625.bugfix  |  1 -
 changelog.d/6626.feature |  1 -
 changelog.d/6627.misc    |  1 -
 changelog.d/6628.removal |  1 -
 changelog.d/6629.misc    |  1 -
 changelog.d/6633.bugfix  |  1 -
 changelog.d/6640.bugfix  |  1 -
 changelog.d/6642.misc    |  1 -
 changelog.d/6645.bugfix  |  1 -
 changelog.d/6647.misc    |  1 -
 changelog.d/6648.bugfix  |  1 -
 changelog.d/6652.bugfix  |  1 -
 changelog.d/6653.misc    |  1 -
 synapse/__init__.py      |  2 +-
 56 files changed, 81 insertions(+), 55 deletions(-)
 delete mode 100644 changelog.d/6245.misc
 delete mode 100644 changelog.d/6349.feature
 delete mode 100644 changelog.d/6377.bugfix
 delete mode 100644 changelog.d/6385.bugfix
 delete mode 100644 changelog.d/6394.feature
 delete mode 100644 changelog.d/6411.feature
 delete mode 100644 changelog.d/6453.feature
 delete mode 100644 changelog.d/6486.bugfix
 delete mode 100644 changelog.d/6496.misc
 delete mode 100644 changelog.d/6502.removal
 delete mode 100644 changelog.d/6504.misc
 delete mode 100644 changelog.d/6505.misc
 delete mode 100644 changelog.d/6506.misc
 delete mode 100644 changelog.d/6510.misc
 delete mode 100644 changelog.d/6511.misc
 delete mode 100644 changelog.d/6512.misc
 delete mode 100644 changelog.d/6513.misc
 delete mode 100644 changelog.d/6514.bugfix
 delete mode 100644 changelog.d/6515.misc
 delete mode 100644 changelog.d/6517.misc
 delete mode 100644 changelog.d/6522.bugfix
 delete mode 100644 changelog.d/6523.feature
 delete mode 100644 changelog.d/6534.misc
 delete mode 100644 changelog.d/6537.misc
 delete mode 100644 changelog.d/6538.misc
 delete mode 100644 changelog.d/6541.doc
 delete mode 100644 changelog.d/6546.feature
 delete mode 100644 changelog.d/6555.bugfix
 delete mode 100644 changelog.d/6557.misc
 delete mode 100644 changelog.d/6558.misc
 delete mode 100644 changelog.d/6559.misc
 delete mode 100644 changelog.d/6564.misc
 delete mode 100644 changelog.d/6565.misc
 delete mode 100644 changelog.d/6570.misc
 delete mode 100644 changelog.d/6571.bugfix
 delete mode 100644 changelog.d/6580.feature
 delete mode 100644 changelog.d/6601.doc
 delete mode 100644 changelog.d/6614.doc
 delete mode 100644 changelog.d/6617.misc
 delete mode 100644 changelog.d/6619.misc
 delete mode 100644 changelog.d/6620.misc
 delete mode 100644 changelog.d/6625.bugfix
 delete mode 100644 changelog.d/6626.feature
 delete mode 100644 changelog.d/6627.misc
 delete mode 100644 changelog.d/6628.removal
 delete mode 100644 changelog.d/6629.misc
 delete mode 100644 changelog.d/6633.bugfix
 delete mode 100644 changelog.d/6640.bugfix
 delete mode 100644 changelog.d/6642.misc
 delete mode 100644 changelog.d/6645.bugfix
 delete mode 100644 changelog.d/6647.misc
 delete mode 100644 changelog.d/6648.bugfix
 delete mode 100644 changelog.d/6652.bugfix
 delete mode 100644 changelog.d/6653.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index 361fd1fc6c..2f1cd87e1a 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,83 @@
+Synapse 1.8.0rc1 (2020-01-07)
+=============================
+
+Features
+--------
+
+- Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)). ([\#6349](https://github.com/matrix-org/synapse/issues/6349))
+- Add a develop script to generate full SQL schemas. ([\#6394](https://github.com/matrix-org/synapse/issues/6394))
+- Allow custom SAML username mapping functinality through an external provider plugin. ([\#6411](https://github.com/matrix-org/synapse/issues/6411))
+- Automatically delete empty groups/communities. ([\#6453](https://github.com/matrix-org/synapse/issues/6453))
+- Add option `limit_profile_requests_to_users_who_share_rooms` to prevent requirement of a local user sharing a room with another user to query their profile information. ([\#6523](https://github.com/matrix-org/synapse/issues/6523))
+- Add an export_signing_key script to extract the public part of signing keys when rotating them. ([\#6546](https://github.com/matrix-org/synapse/issues/6546))
+- Add experimental config option to specify multiple databases. ([\#6580](https://github.com/matrix-org/synapse/issues/6580))
+- Raise an error if someone tries to use the log_file config option. ([\#6626](https://github.com/matrix-org/synapse/issues/6626))
+
+
+Bugfixes
+--------
+
+- Prevent redacted events from being returned during message search. ([\#6377](https://github.com/matrix-org/synapse/issues/6377), [\#6522](https://github.com/matrix-org/synapse/issues/6522))
+- Prevent error on trying to search a upgraded room when the server is not in the predecessor room. ([\#6385](https://github.com/matrix-org/synapse/issues/6385))
+- Improve performance of looking up cross-signing keys. ([\#6486](https://github.com/matrix-org/synapse/issues/6486))
+- Fix race which occasionally caused deleted devices to reappear. ([\#6514](https://github.com/matrix-org/synapse/issues/6514))
+- Fix missing row in device_max_stream_id that could cause unable to decrypt errors after server restart. ([\#6555](https://github.com/matrix-org/synapse/issues/6555))
+- Fix a bug which meant that we did not send systemd notifications on startup if acme was enabled. ([\#6571](https://github.com/matrix-org/synapse/issues/6571))
+- Fix exception when fetching the `matrix.org:ed25519:auto` key. ([\#6625](https://github.com/matrix-org/synapse/issues/6625))
+- Fix bug where a moderator upgraded a room and became an admin in the new room. ([\#6633](https://github.com/matrix-org/synapse/issues/6633))
+- Fix an error which was thrown by the PresenceHandler _on_shutdown handler. ([\#6640](https://github.com/matrix-org/synapse/issues/6640))
+- Fix exceptions in the synchrotron worker log when events are rejected. ([\#6645](https://github.com/matrix-org/synapse/issues/6645))
+- Ensure that upgraded rooms are removed from the directory. ([\#6648](https://github.com/matrix-org/synapse/issues/6648))
+- Fix a bug causing Synapse not to fetch missing events when it believes it has every event in the room. ([\#6652](https://github.com/matrix-org/synapse/issues/6652))
+
+
+Improved Documentation
+----------------------
+
+- Document the Room Shutdown Admin API. ([\#6541](https://github.com/matrix-org/synapse/issues/6541))
+- Reword sections of federate.md that explained delegation at time of Synapse 1.0 transition. ([\#6601](https://github.com/matrix-org/synapse/issues/6601))
+- Added the section 'Configuration' in /docs/turn-howto.md. ([\#6614](https://github.com/matrix-org/synapse/issues/6614))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove redundant code from event authorisation implementation. ([\#6502](https://github.com/matrix-org/synapse/issues/6502))
+- Remove unused, undocumented /_matrix/content API. ([\#6628](https://github.com/matrix-org/synapse/issues/6628))
+
+
+Internal Changes
+----------------
+
+- Split out state storage into separate data store. ([\#6245](https://github.com/matrix-org/synapse/issues/6245))
+- Port synapse.handlers.initial_sync to async/await. ([\#6496](https://github.com/matrix-org/synapse/issues/6496))
+- Port handlers.account_data and handlers.account_validity to async/await. ([\#6504](https://github.com/matrix-org/synapse/issues/6504))
+- Make `make_deferred_yieldable` to work with async/await. ([\#6505](https://github.com/matrix-org/synapse/issues/6505))
+- Remove `SnapshotCache` in favour of `ResponseCache`. ([\#6506](https://github.com/matrix-org/synapse/issues/6506))
+- Change phone home stats to not assume there is a single database and report information about the database used by the main data store. ([\#6510](https://github.com/matrix-org/synapse/issues/6510))
+- Move database config from apps into HomeServer object. ([\#6511](https://github.com/matrix-org/synapse/issues/6511))
+- Silence mypy errors for files outside those specified. ([\#6512](https://github.com/matrix-org/synapse/issues/6512))
+- Remove all assumptions of there being a single phyiscal DB apart from the `synapse.config`. ([\#6513](https://github.com/matrix-org/synapse/issues/6513))
+- Clean up some logging when handling incoming events over federation. ([\#6515](https://github.com/matrix-org/synapse/issues/6515))
+- Port some of FederationHandler to async/await. ([\#6517](https://github.com/matrix-org/synapse/issues/6517))
+- Test more folders against mypy. ([\#6534](https://github.com/matrix-org/synapse/issues/6534))
+- Update `mypy` to new version. ([\#6537](https://github.com/matrix-org/synapse/issues/6537))
+- Adjust the sytest blacklist for worker mode. ([\#6538](https://github.com/matrix-org/synapse/issues/6538))
+- Remove unused `get_pagination_rows` methods from `EventSource` classes. ([\#6557](https://github.com/matrix-org/synapse/issues/6557))
+- Clean up logs from the push notifier at startup. ([\#6558](https://github.com/matrix-org/synapse/issues/6558))
+- Port `synapse.handlers.admin` and `synapse.handlers.deactivate_account` to async/await. ([\#6559](https://github.com/matrix-org/synapse/issues/6559))
+- Change `EventContext` to use the `Storage` class, in preparation for moving state database queries to a separate data store. ([\#6564](https://github.com/matrix-org/synapse/issues/6564))
+- Add assertion that schema delta file names are unique. ([\#6565](https://github.com/matrix-org/synapse/issues/6565))
+- Improve diagnostics on database upgrade failure. ([\#6570](https://github.com/matrix-org/synapse/issues/6570))
+- Reduce the reconnect time when worker replication fails, to make it easier to catch up. ([\#6617](https://github.com/matrix-org/synapse/issues/6617))
+- Simplify http handling by removing redundant SynapseRequestFactory. ([\#6619](https://github.com/matrix-org/synapse/issues/6619))
+- Add a workaround for synapse raising exceptions when fetching the notary's own key from the notary. ([\#6620](https://github.com/matrix-org/synapse/issues/6620))
+- Automate generation of the sample log config. ([\#6627](https://github.com/matrix-org/synapse/issues/6627))
+- Simplify event creation code by removing redundant queries on the event_reference_hashes table. ([\#6629](https://github.com/matrix-org/synapse/issues/6629))
+- Fix errors when frozen_dicts are enabled. ([\#6642](https://github.com/matrix-org/synapse/issues/6642))
+- Port core background update routines to async/await. ([\#6647](https://github.com/matrix-org/synapse/issues/6647), [\#6653](https://github.com/matrix-org/synapse/issues/6653))
+
+
 Synapse 1.7.3 (2019-12-31)
 ==========================
 
diff --git a/changelog.d/6245.misc b/changelog.d/6245.misc
deleted file mode 100644
index a3e6b8296e..0000000000
--- a/changelog.d/6245.misc
+++ /dev/null
@@ -1 +0,0 @@
-Split out state storage into separate data store.
diff --git a/changelog.d/6349.feature b/changelog.d/6349.feature
deleted file mode 100644
index 56c4fbf78e..0000000000
--- a/changelog.d/6349.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)).
diff --git a/changelog.d/6377.bugfix b/changelog.d/6377.bugfix
deleted file mode 100644
index ccda96962f..0000000000
--- a/changelog.d/6377.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent redacted events from being returned during message search.
\ No newline at end of file
diff --git a/changelog.d/6385.bugfix b/changelog.d/6385.bugfix
deleted file mode 100644
index 7a2bc02170..0000000000
--- a/changelog.d/6385.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent error on trying to search a upgraded room when the server is not in the predecessor room.
\ No newline at end of file
diff --git a/changelog.d/6394.feature b/changelog.d/6394.feature
deleted file mode 100644
index 1a0e8845ad..0000000000
--- a/changelog.d/6394.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a develop script to generate full SQL schemas.
\ No newline at end of file
diff --git a/changelog.d/6411.feature b/changelog.d/6411.feature
deleted file mode 100644
index ebea4a208d..0000000000
--- a/changelog.d/6411.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow custom SAML username mapping functinality through an external provider plugin.
\ No newline at end of file
diff --git a/changelog.d/6453.feature b/changelog.d/6453.feature
deleted file mode 100644
index e7bb801c6a..0000000000
--- a/changelog.d/6453.feature
+++ /dev/null
@@ -1 +0,0 @@
-Automatically delete empty groups/communities.
diff --git a/changelog.d/6486.bugfix b/changelog.d/6486.bugfix
deleted file mode 100644
index b98c5a9ae5..0000000000
--- a/changelog.d/6486.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance of looking up cross-signing keys.
diff --git a/changelog.d/6496.misc b/changelog.d/6496.misc
deleted file mode 100644
index 19c6e926b8..0000000000
--- a/changelog.d/6496.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port synapse.handlers.initial_sync to async/await.
diff --git a/changelog.d/6502.removal b/changelog.d/6502.removal
deleted file mode 100644
index 0b72261d58..0000000000
--- a/changelog.d/6502.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant code from event authorisation implementation.
diff --git a/changelog.d/6504.misc b/changelog.d/6504.misc
deleted file mode 100644
index 7c873459af..0000000000
--- a/changelog.d/6504.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port handlers.account_data and handlers.account_validity to async/await.
diff --git a/changelog.d/6505.misc b/changelog.d/6505.misc
deleted file mode 100644
index 3a75b2d9dd..0000000000
--- a/changelog.d/6505.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make `make_deferred_yieldable` to work with async/await.
diff --git a/changelog.d/6506.misc b/changelog.d/6506.misc
deleted file mode 100644
index 99d7a70bcf..0000000000
--- a/changelog.d/6506.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove `SnapshotCache` in favour of `ResponseCache`.
diff --git a/changelog.d/6510.misc b/changelog.d/6510.misc
deleted file mode 100644
index 214f06539b..0000000000
--- a/changelog.d/6510.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change phone home stats to not assume there is a single database and report information about the database used by the main data store.
diff --git a/changelog.d/6511.misc b/changelog.d/6511.misc
deleted file mode 100644
index 19ce435e68..0000000000
--- a/changelog.d/6511.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move database config from apps into HomeServer object.
diff --git a/changelog.d/6512.misc b/changelog.d/6512.misc
deleted file mode 100644
index 37a8099eec..0000000000
--- a/changelog.d/6512.misc
+++ /dev/null
@@ -1 +0,0 @@
-Silence mypy errors for files outside those specified.
diff --git a/changelog.d/6513.misc b/changelog.d/6513.misc
deleted file mode 100644
index 36700f5657..0000000000
--- a/changelog.d/6513.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove all assumptions of there being a single phyiscal DB apart from the `synapse.config`.
diff --git a/changelog.d/6514.bugfix b/changelog.d/6514.bugfix
deleted file mode 100644
index 6dc1985c24..0000000000
--- a/changelog.d/6514.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix race which occasionally caused deleted devices to reappear.
diff --git a/changelog.d/6515.misc b/changelog.d/6515.misc
deleted file mode 100644
index a9c303ed1c..0000000000
--- a/changelog.d/6515.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some logging when handling incoming events over federation.
diff --git a/changelog.d/6517.misc b/changelog.d/6517.misc
deleted file mode 100644
index c6ffed9952..0000000000
--- a/changelog.d/6517.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port some of FederationHandler to async/await.
\ No newline at end of file
diff --git a/changelog.d/6522.bugfix b/changelog.d/6522.bugfix
deleted file mode 100644
index ccda96962f..0000000000
--- a/changelog.d/6522.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent redacted events from being returned during message search.
\ No newline at end of file
diff --git a/changelog.d/6523.feature b/changelog.d/6523.feature
deleted file mode 100644
index 798fa143df..0000000000
--- a/changelog.d/6523.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add option `limit_profile_requests_to_users_who_share_rooms` to prevent requirement of a local user sharing a room with another user to query their profile information.
diff --git a/changelog.d/6534.misc b/changelog.d/6534.misc
deleted file mode 100644
index 7df6bb442a..0000000000
--- a/changelog.d/6534.misc
+++ /dev/null
@@ -1 +0,0 @@
-Test more folders against mypy.
diff --git a/changelog.d/6537.misc b/changelog.d/6537.misc
deleted file mode 100644
index 3543153584..0000000000
--- a/changelog.d/6537.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update `mypy` to new version.
diff --git a/changelog.d/6538.misc b/changelog.d/6538.misc
deleted file mode 100644
index cb4fd56948..0000000000
--- a/changelog.d/6538.misc
+++ /dev/null
@@ -1 +0,0 @@
-Adjust the sytest blacklist for worker mode.
diff --git a/changelog.d/6541.doc b/changelog.d/6541.doc
deleted file mode 100644
index c20029edc0..0000000000
--- a/changelog.d/6541.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document the Room Shutdown Admin API.
\ No newline at end of file
diff --git a/changelog.d/6546.feature b/changelog.d/6546.feature
deleted file mode 100644
index 954aacb0d0..0000000000
--- a/changelog.d/6546.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an export_signing_key script to extract the public part of signing keys when rotating them.
diff --git a/changelog.d/6555.bugfix b/changelog.d/6555.bugfix
deleted file mode 100644
index 86a5a56cf6..0000000000
--- a/changelog.d/6555.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix missing row in device_max_stream_id that could cause unable to decrypt errors after server restart.
\ No newline at end of file
diff --git a/changelog.d/6557.misc b/changelog.d/6557.misc
deleted file mode 100644
index 80e7eaedb8..0000000000
--- a/changelog.d/6557.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `get_pagination_rows` methods from `EventSource` classes.
diff --git a/changelog.d/6558.misc b/changelog.d/6558.misc
deleted file mode 100644
index a7572f1a85..0000000000
--- a/changelog.d/6558.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up logs from the push notifier at startup.
\ No newline at end of file
diff --git a/changelog.d/6559.misc b/changelog.d/6559.misc
deleted file mode 100644
index 8bca37457d..0000000000
--- a/changelog.d/6559.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `synapse.handlers.admin` and `synapse.handlers.deactivate_account` to async/await.
diff --git a/changelog.d/6564.misc b/changelog.d/6564.misc
deleted file mode 100644
index f644f5868b..0000000000
--- a/changelog.d/6564.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change `EventContext` to use the `Storage` class, in preparation for moving state database queries to a separate data store.
diff --git a/changelog.d/6565.misc b/changelog.d/6565.misc
deleted file mode 100644
index e83f245bf0..0000000000
--- a/changelog.d/6565.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add assertion that schema delta file names are unique.
diff --git a/changelog.d/6570.misc b/changelog.d/6570.misc
deleted file mode 100644
index e89955a51e..0000000000
--- a/changelog.d/6570.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve diagnostics on database upgrade failure.
diff --git a/changelog.d/6571.bugfix b/changelog.d/6571.bugfix
deleted file mode 100644
index e38ea7b4f7..0000000000
--- a/changelog.d/6571.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug which meant that we did not send systemd notifications on startup if acme was enabled.
diff --git a/changelog.d/6580.feature b/changelog.d/6580.feature
deleted file mode 100644
index 233c589c66..0000000000
--- a/changelog.d/6580.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental config option to specify multiple databases.
diff --git a/changelog.d/6601.doc b/changelog.d/6601.doc
deleted file mode 100644
index 08c5b3d215..0000000000
--- a/changelog.d/6601.doc
+++ /dev/null
@@ -1 +0,0 @@
-Reword sections of federate.md that explained delegation at time of Synapse 1.0 transition.
\ No newline at end of file
diff --git a/changelog.d/6614.doc b/changelog.d/6614.doc
deleted file mode 100644
index 38b962b062..0000000000
--- a/changelog.d/6614.doc
+++ /dev/null
@@ -1 +0,0 @@
-Added the section 'Configuration' in /docs/turn-howto.md.
diff --git a/changelog.d/6617.misc b/changelog.d/6617.misc
deleted file mode 100644
index 94aa271d38..0000000000
--- a/changelog.d/6617.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce the reconnect time when worker replication fails, to make it easier to catch up.
diff --git a/changelog.d/6619.misc b/changelog.d/6619.misc
deleted file mode 100644
index b608133219..0000000000
--- a/changelog.d/6619.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify http handling by removing redundant SynapseRequestFactory.
diff --git a/changelog.d/6620.misc b/changelog.d/6620.misc
deleted file mode 100644
index 8bfb78fb20..0000000000
--- a/changelog.d/6620.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a workaround for synapse raising exceptions when fetching the notary's own key from the notary.
diff --git a/changelog.d/6625.bugfix b/changelog.d/6625.bugfix
deleted file mode 100644
index a8dc5587dc..0000000000
--- a/changelog.d/6625.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix exception when fetching the `matrix.org:ed25519:auto` key.
diff --git a/changelog.d/6626.feature b/changelog.d/6626.feature
deleted file mode 100644
index 15798fa59b..0000000000
--- a/changelog.d/6626.feature
+++ /dev/null
@@ -1 +0,0 @@
-Raise an error if someone tries to use the log_file config option.
diff --git a/changelog.d/6627.misc b/changelog.d/6627.misc
deleted file mode 100644
index 702f067070..0000000000
--- a/changelog.d/6627.misc
+++ /dev/null
@@ -1 +0,0 @@
-Automate generation of the sample log config.
diff --git a/changelog.d/6628.removal b/changelog.d/6628.removal
deleted file mode 100644
index 66cd6aeca4..0000000000
--- a/changelog.d/6628.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused, undocumented /_matrix/content API.
diff --git a/changelog.d/6629.misc b/changelog.d/6629.misc
deleted file mode 100644
index 68f77af05b..0000000000
--- a/changelog.d/6629.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify event creation code by removing redundant queries on the event_reference_hashes table.
\ No newline at end of file
diff --git a/changelog.d/6633.bugfix b/changelog.d/6633.bugfix
deleted file mode 100644
index 4bacf26021..0000000000
--- a/changelog.d/6633.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where a moderator upgraded a room and became an admin in the new room.
\ No newline at end of file
diff --git a/changelog.d/6640.bugfix b/changelog.d/6640.bugfix
deleted file mode 100644
index 8c2a129933..0000000000
--- a/changelog.d/6640.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix an error which was thrown by the PresenceHandler _on_shutdown handler.
diff --git a/changelog.d/6642.misc b/changelog.d/6642.misc
deleted file mode 100644
index a480bbd134..0000000000
--- a/changelog.d/6642.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix errors when frozen_dicts are enabled.
diff --git a/changelog.d/6645.bugfix b/changelog.d/6645.bugfix
deleted file mode 100644
index f648df3fc0..0000000000
--- a/changelog.d/6645.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix exceptions in the synchrotron worker log when events are rejected.
diff --git a/changelog.d/6647.misc b/changelog.d/6647.misc
deleted file mode 100644
index fbe7c0e7db..0000000000
--- a/changelog.d/6647.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port core background update routines to async/await.
diff --git a/changelog.d/6648.bugfix b/changelog.d/6648.bugfix
deleted file mode 100644
index 39916de437..0000000000
--- a/changelog.d/6648.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Ensure that upgraded rooms are removed from the directory.
diff --git a/changelog.d/6652.bugfix b/changelog.d/6652.bugfix
deleted file mode 100644
index 7e9781d652..0000000000
--- a/changelog.d/6652.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing Synapse not to fetch missing events when it believes it has every event in the room.
diff --git a/changelog.d/6653.misc b/changelog.d/6653.misc
deleted file mode 100644
index fbe7c0e7db..0000000000
--- a/changelog.d/6653.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port core background update routines to async/await.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 71cb611820..a3bd855045 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.7.3"
+__version__ = "1.8.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 03edfc58500197fee40c808680551ea55d1560e8 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 7 Jan 2020 15:59:05 +0100
Subject: Update changelog.d/6624.doc

Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
 changelog.d/6624.doc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6624.doc b/changelog.d/6624.doc
index c8aade0974..bc9a022db2 100644
--- a/changelog.d/6624.doc
+++ b/changelog.d/6624.doc
@@ -1 +1 @@
-Add a complete documentation of the message retention policies support.
+Add complete documentation of the message retention policies support.
-- 
cgit 1.4.1


From be29ed7ad86a150f603722d7dc307b71f7e98726 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 7 Jan 2020 15:36:41 +0000
Subject: Correctly proxy remote group HTTP errors. (#6654)

e.g. if remote returns a 404 then that shouldn't be treated as an error
but should be proxied through.
---
 changelog.d/6654.bugfix          |  1 +
 synapse/handlers/groups_local.py | 16 ++++++++++++++++
 2 files changed, 17 insertions(+)
 create mode 100644 changelog.d/6654.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6654.bugfix b/changelog.d/6654.bugfix
new file mode 100644
index 0000000000..fed35252db
--- /dev/null
+++ b/changelog.d/6654.bugfix
@@ -0,0 +1 @@
+Correctly proxy HTTP errors due to API calls to remote group servers.
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 92fecbfc44..319565510f 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -130,6 +130,8 @@ class GroupsLocalHandler(object):
                 res = yield self.transport_client.get_group_summary(
                     get_domain_from_id(group_id), group_id, requester_user_id
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -190,6 +192,8 @@ class GroupsLocalHandler(object):
                 res = yield self.transport_client.create_group(
                     get_domain_from_id(group_id), group_id, user_id, content
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -231,6 +235,8 @@ class GroupsLocalHandler(object):
             res = yield self.transport_client.get_users_in_group(
                 get_domain_from_id(group_id), group_id, requester_user_id
             )
+        except HttpResponseException as e:
+            raise e.to_synapse_error()
         except RequestSendFailed:
             raise SynapseError(502, "Failed to contact group server")
 
@@ -271,6 +277,8 @@ class GroupsLocalHandler(object):
                 res = yield self.transport_client.join_group(
                     get_domain_from_id(group_id), group_id, user_id, content
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -315,6 +323,8 @@ class GroupsLocalHandler(object):
                 res = yield self.transport_client.accept_group_invite(
                     get_domain_from_id(group_id), group_id, user_id, content
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -361,6 +371,8 @@ class GroupsLocalHandler(object):
                     requester_user_id,
                     content,
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -424,6 +436,8 @@ class GroupsLocalHandler(object):
                     user_id,
                     content,
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
@@ -460,6 +474,8 @@ class GroupsLocalHandler(object):
                 bulk_result = yield self.transport_client.bulk_get_publicised_groups(
                     get_domain_from_id(user_id), [user_id]
                 )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
             except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
-- 
cgit 1.4.1


From dd57715de2b9a0742b38aaab63893e2495b68841 Mon Sep 17 00:00:00 2001
From: Fabian Meyer 
Date: Wed, 8 Jan 2020 08:25:05 +0100
Subject:  contrib/docker-compose: fixing mount that overrides containers' /etc
 (#6656)

The mount in the form of ./matrix-config:/etc overwrites the contents of the container /etc folder. Since all valid ca certificates are stored in /etc, the synapse.push.httppusher, for example, cannot validate the certificate from matrix.org.
---
 changelog.d/6656.doc              | 1 +
 contrib/docker/docker-compose.yml | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6656.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6656.doc b/changelog.d/6656.doc
new file mode 100644
index 0000000000..9f32da1a88
--- /dev/null
+++ b/changelog.d/6656.doc
@@ -0,0 +1 @@
+No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 72c87054e5..2b044baf78 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -18,7 +18,7 @@ services:
       - SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
     volumes:
       # You may either store all the files in a local folder
-      - ./matrix-config:/etc
+      - ./matrix-config/homeserver.yaml:/etc/homeserver.yaml
       - ./files:/data
       # .. or you may split this between different storage points
       # - ./files:/data
-- 
cgit 1.4.1


From 573fee759cbd76fca93bf90783cd013a11b9b4e5 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 8 Jan 2020 13:24:10 +0000
Subject: Back out ill-advised notary server hackery (#6657)

This was ill-advised. We can't modify verify_keys here, because the response
object has already been signed by the requested key.

Furthermore, it's somewhat unnecessary because existing versions of Synapse
(which get upset that the notary key isn't present in verify_keys) will fall
back to a direct fetch via `/key/v2/server`.

Also: more tests for fetching keys via perspectives: it would be nice if we actually tested when our fetcher can't talk to our notary impl.
---
 changelog.d/6657.bugfix                       |   1 +
 synapse/rest/key/v2/remote_key_resource.py    |  30 ++----
 tests/rest/key/__init__.py                    |   0
 tests/rest/key/v2/__init__.py                 |   0
 tests/rest/key/v2/test_remote_key_resource.py | 135 +++++++++++++++++++++++++-
 5 files changed, 140 insertions(+), 26 deletions(-)
 create mode 100644 changelog.d/6657.bugfix
 create mode 100644 tests/rest/key/__init__.py
 create mode 100644 tests/rest/key/v2/__init__.py

(limited to 'changelog.d')

diff --git a/changelog.d/6657.bugfix b/changelog.d/6657.bugfix
new file mode 100644
index 0000000000..94e51a9896
--- /dev/null
+++ b/changelog.d/6657.bugfix
@@ -0,0 +1 @@
+Fix incorrect signing of responses from the key server implementation.
\ No newline at end of file
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index bf5e0eb844..e7fc3f0431 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -15,7 +15,6 @@
 import logging
 
 from canonicaljson import encode_canonical_json, json
-from signedjson.key import encode_verify_key_base64
 from signedjson.sign import sign_json
 
 from twisted.internet import defer
@@ -217,28 +216,15 @@ class RemoteKey(DirectServeResource):
         if cache_misses and query_remote_on_cache_miss:
             yield self.fetcher.get_keys(cache_misses)
             yield self.query_keys(request, query, query_remote_on_cache_miss=False)
-            return
-
-        signed_keys = []
-        for key_json in json_results:
-            key_json = json.loads(key_json)
-
-            # backwards-compatibility hack for #6596: if the requested key belongs
-            # to us, make sure that all of the signing keys appear in the
-            # "verify_keys" section.
-            if key_json["server_name"] == self.config.server_name:
-                verify_keys = key_json["verify_keys"]
+        else:
+            signed_keys = []
+            for key_json in json_results:
+                key_json = json.loads(key_json)
                 for signing_key in self.config.key_server_signing_keys:
-                    key_id = "%s:%s" % (signing_key.alg, signing_key.version)
-                    verify_keys[key_id] = {
-                        "key": encode_verify_key_base64(signing_key.verify_key)
-                    }
-
-            for signing_key in self.config.key_server_signing_keys:
-                key_json = sign_json(key_json, self.config.server_name, signing_key)
+                    key_json = sign_json(key_json, self.config.server_name, signing_key)
 
-            signed_keys.append(key_json)
+                signed_keys.append(key_json)
 
-        results = {"server_keys": signed_keys}
+            results = {"server_keys": signed_keys}
 
-        respond_with_json_bytes(request, 200, encode_canonical_json(results))
+            respond_with_json_bytes(request, 200, encode_canonical_json(results))
diff --git a/tests/rest/key/__init__.py b/tests/rest/key/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/rest/key/v2/__init__.py b/tests/rest/key/v2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index d8246b4e78..6776a56cad 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -13,25 +13,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import urllib.parse
-from io import BytesIO
+from io import BytesIO, StringIO
 
 from mock import Mock
 
 import signedjson.key
+from canonicaljson import encode_canonical_json
 from nacl.signing import SigningKey
 from signedjson.sign import sign_json
 
 from twisted.web.resource import NoResource
 
+from synapse.crypto.keyring import PerspectivesKeyFetcher
 from synapse.http.site import SynapseRequest
 from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.storage.keys import FetchKeyResult
 from synapse.util.httpresourcetree import create_resource_tree
+from synapse.util.stringutils import random_string
 
 from tests import unittest
 from tests.server import FakeChannel, wait_until_result
+from tests.utils import default_config
 
 
-class RemoteKeyResourceTestCase(unittest.HomeserverTestCase):
+class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
         self.http_client = Mock()
         return self.setup_test_homeserver(http_client=self.http_client)
@@ -73,6 +78,8 @@ class RemoteKeyResourceTestCase(unittest.HomeserverTestCase):
 
         self.http_client.get_json.side_effect = get_json
 
+
+class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
     def make_notary_request(self, server_name: str, key_id: str) -> dict:
         """Send a GET request to the test server requesting the given key.
 
@@ -125,6 +132,126 @@ class RemoteKeyResourceTestCase(unittest.HomeserverTestCase):
         oursigs = sigs[self.hs.hostname]
         self.assertEqual(len(oursigs), 2)
 
-        # and both keys should be present in the verify_keys section
+        # the requested key should be present in the verify_keys section
         self.assertIn("ed25519:ver1", keys[0]["verify_keys"])
-        self.assertIn("ed25519:a_lPym", keys[0]["verify_keys"])
+
+
+class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
+    """End-to-end tests of the perspectives fetch case
+
+    The idea here is to actually wire up a PerspectivesKeyFetcher to the notary
+    endpoint, to check that the two implementations are compatible.
+    """
+
+    def default_config(self, *args, **kwargs):
+        config = super().default_config(*args, **kwargs)
+
+        # replace the signing key with our own
+        self.hs_signing_key = signedjson.key.generate_signing_key("kssk")
+        strm = StringIO()
+        signedjson.key.write_signing_keys(strm, [self.hs_signing_key])
+        config["signing_key"] = strm.getvalue()
+
+        return config
+
+    def prepare(self, reactor, clock, homeserver):
+        # make a second homeserver, configured to use the first one as a key notary
+        self.http_client2 = Mock()
+        config = default_config(name="keyclient")
+        config["trusted_key_servers"] = [
+            {
+                "server_name": self.hs.hostname,
+                "verify_keys": {
+                    "ed25519:%s"
+                    % (
+                        self.hs_signing_key.version,
+                    ): signedjson.key.encode_verify_key_base64(
+                        self.hs_signing_key.verify_key
+                    )
+                },
+            }
+        ]
+        self.hs2 = self.setup_test_homeserver(
+            http_client=self.http_client2, config=config
+        )
+
+        # wire up outbound POST /key/v2/query requests from hs2 so that they
+        # will be forwarded to hs1
+        def post_json(destination, path, data):
+            self.assertEqual(destination, self.hs.hostname)
+            self.assertEqual(
+                path, "/_matrix/key/v2/query",
+            )
+
+            channel = FakeChannel(self.site, self.reactor)
+            req = SynapseRequest(channel)
+            req.content = BytesIO(encode_canonical_json(data))
+
+            req.requestReceived(
+                b"POST", path.encode("utf-8"), b"1.1",
+            )
+            wait_until_result(self.reactor, req)
+            self.assertEqual(channel.code, 200)
+            resp = channel.json_body
+            return resp
+
+        self.http_client2.post_json.side_effect = post_json
+
+    def test_get_key(self):
+        """Fetch a key belonging to a random server"""
+        # make up a key to be fetched.
+        testkey = signedjson.key.generate_signing_key("abc")
+
+        # we expect hs1 to make a regular key request to the target server
+        self.expect_outgoing_key_request("targetserver", testkey)
+        keyid = "ed25519:%s" % (testkey.version,)
+
+        fetcher = PerspectivesKeyFetcher(self.hs2)
+        d = fetcher.get_keys({"targetserver": {keyid: 1000}})
+        res = self.get_success(d)
+        self.assertIn("targetserver", res)
+        keyres = res["targetserver"][keyid]
+        assert isinstance(keyres, FetchKeyResult)
+        self.assertEqual(
+            signedjson.key.encode_verify_key_base64(keyres.verify_key),
+            signedjson.key.encode_verify_key_base64(testkey.verify_key),
+        )
+
+    def test_get_notary_key(self):
+        """Fetch a key belonging to the notary server"""
+        # make up a key to be fetched. We randomise the keyid to try to get it to
+        # appear before the key server signing key sometimes (otherwise we bail out
+        # before fetching its signature)
+        testkey = signedjson.key.generate_signing_key(random_string(5))
+
+        # we expect hs1 to make a regular key request to itself
+        self.expect_outgoing_key_request(self.hs.hostname, testkey)
+        keyid = "ed25519:%s" % (testkey.version,)
+
+        fetcher = PerspectivesKeyFetcher(self.hs2)
+        d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}})
+        res = self.get_success(d)
+        self.assertIn(self.hs.hostname, res)
+        keyres = res[self.hs.hostname][keyid]
+        assert isinstance(keyres, FetchKeyResult)
+        self.assertEqual(
+            signedjson.key.encode_verify_key_base64(keyres.verify_key),
+            signedjson.key.encode_verify_key_base64(testkey.verify_key),
+        )
+
+    def test_get_notary_keyserver_key(self):
+        """Fetch the notary's keyserver key"""
+        # we expect hs1 to make a regular key request to itself
+        self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key)
+        keyid = "ed25519:%s" % (self.hs_signing_key.version,)
+
+        fetcher = PerspectivesKeyFetcher(self.hs2)
+        d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}})
+        res = self.get_success(d)
+        self.assertIn(self.hs.hostname, res)
+        keyres = res[self.hs.hostname][keyid]
+        assert isinstance(keyres, FetchKeyResult)
+        self.assertEqual(
+            signedjson.key.encode_verify_key_base64(keyres.verify_key),
+            signedjson.key.encode_verify_key_base64(self.hs_signing_key.verify_key),
+        )
-- 
cgit 1.4.1


From 7caaa29daab7bde331dcec0bb760a8fe5870f18e Mon Sep 17 00:00:00 2001
From: Manuel Stahl <37705355+awesome-manuel@users.noreply.github.com>
Date: Wed, 8 Jan 2020 14:26:40 +0100
Subject: Fix GET request on /_synapse/admin/v2/users endpoint (#6563)

Fixes #6552
---
 changelog.d/6563.bugfix                      |  1 +
 synapse/storage/data_stores/main/__init__.py |  4 +--
 tests/rest/admin/test_admin.py               | 41 ++++++++++++++++++++++++++++
 3 files changed, 44 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6563.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6563.bugfix b/changelog.d/6563.bugfix
new file mode 100644
index 0000000000..3325fb1dcf
--- /dev/null
+++ b/changelog.d/6563.bugfix
@@ -0,0 +1 @@
+Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index c577c0df5f..2700cca822 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -526,9 +526,9 @@ class DataStore(
 
         attr_filter = {}
         if not guests:
-            attr_filter["is_guest"] = False
+            attr_filter["is_guest"] = 0
         if not deactivated:
-            attr_filter["deactivated"] = False
+            attr_filter["deactivated"] = 0
 
         return self.db.simple_select_list_paginate(
             desc="get_users_paginate",
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 0ed2594381..325bd6a608 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -341,6 +341,47 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual("Invalid user type", channel.json_body["error"])
 
 
+class UsersListTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+    ]
+    url = "/_synapse/admin/v2/users"
+
+    def prepare(self, reactor, clock, hs):
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.register_user("user1", "pass1", admin=False)
+        self.register_user("user2", "pass2", admin=False)
+
+    def test_no_auth(self):
+        """
+        Try to list users without authentication.
+        """
+        request, channel = self.make_request("GET", self.url, b"{}")
+        self.render(request)
+
+        self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"])
+
+    def test_all_users(self):
+        """
+        List all users, including deactivated users.
+        """
+        request, channel = self.make_request(
+            "GET",
+            self.url + "?deactivated=true",
+            b"{}",
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(3, len(channel.json_body["users"]))
+
+
 class ShutdownRoomTestCase(unittest.HomeserverTestCase):
     servlets = [
         synapse.rest.admin.register_servlets_for_client_rest_resource,
-- 
cgit 1.4.1


From 32ad2a3349e262a431aa9c57fef2d89f629aac31 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 8 Jan 2020 13:28:12 +0000
Subject: Changelog

---
 changelog.d/6665.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6665.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6665.doc b/changelog.d/6665.doc
new file mode 100644
index 0000000000..bc9a022db2
--- /dev/null
+++ b/changelog.d/6665.doc
@@ -0,0 +1 @@
+Add complete documentation of the message retention policies support.
-- 
cgit 1.4.1


From 4e2a072a05c1f894687772e6e55a943f3d941fbc Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 8 Jan 2020 13:28:19 +0000
Subject: Newsfile

---
 changelog.d/6664.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6664.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6664.bugfix b/changelog.d/6664.bugfix
new file mode 100644
index 0000000000..8c6a6fa1c8
--- /dev/null
+++ b/changelog.d/6664.bugfix
@@ -0,0 +1 @@
+Fix media repo admin APIs when using a media worker.
-- 
cgit 1.4.1


From 24b2c940fb657270bfbe3d7a18c5e9363c42663d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 9 Jan 2020 11:39:29 +0000
Subject: 1.8.0

---
 CHANGES.md              | 10 ++++++++++
 changelog.d/6563.bugfix |  1 -
 changelog.d/6657.bugfix |  1 -
 debian/changelog        |  8 ++++++--
 synapse/__init__.py     |  2 +-
 5 files changed, 17 insertions(+), 5 deletions(-)
 delete mode 100644 changelog.d/6563.bugfix
 delete mode 100644 changelog.d/6657.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index df94f742c0..e33e0d7f07 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,13 @@
+Synapse 1.8.0 (2020-01-09)
+==========================
+
+Bugfixes
+--------
+
+- Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6563](https://github.com/matrix-org/synapse/issues/6563))
+- Fix incorrect signing of responses from the key server implementation. ([\#6657](https://github.com/matrix-org/synapse/issues/6657))
+
+
 Synapse 1.8.0rc1 (2020-01-07)
 =============================
 
diff --git a/changelog.d/6563.bugfix b/changelog.d/6563.bugfix
deleted file mode 100644
index 3325fb1dcf..0000000000
--- a/changelog.d/6563.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
\ No newline at end of file
diff --git a/changelog.d/6657.bugfix b/changelog.d/6657.bugfix
deleted file mode 100644
index 94e51a9896..0000000000
--- a/changelog.d/6657.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix incorrect signing of responses from the key server implementation.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 75fe89fa97..7413c238e6 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,12 @@
-matrix-synapse-py3 (1.7.3ubuntu1) UNRELEASED; urgency=medium
+matrix-synapse-py3 (1.8.0) stable; urgency=medium
 
+  [ Richard van der Hoff ]
   * Automate generation of the default log configuration file.
 
- -- Richard van der Hoff   Fri, 03 Jan 2020 13:55:38 +0000
+  [ Synapse Packaging team ]
+  * New synapse release 1.8.0.
+
+ -- Synapse Packaging team   Thu, 09 Jan 2020 11:39:27 +0000
 
 matrix-synapse-py3 (1.7.3) stable; urgency=medium
 
diff --git a/synapse/__init__.py b/synapse/__init__.py
index a3bd855045..0dd538d804 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.8.0rc1"
+__version__ = "1.8.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From d2906fe6667d3384f37ef03ca87172d643d49587 Mon Sep 17 00:00:00 2001
From: Manuel Stahl <37705355+awesome-manuel@users.noreply.github.com>
Date: Thu, 9 Jan 2020 14:31:00 +0100
Subject: Allow admin users to create or modify users without a shared secret
 (#6495)

Signed-off-by: Manuel Stahl 
---
 changelog.d/5742.feature                         |   1 +
 docs/admin_api/user_admin_api.rst                |  33 +-
 synapse/handlers/admin.py                        |   9 +
 synapse/rest/admin/__init__.py                   |   2 +
 synapse/rest/admin/users.py                      | 142 +++++++
 synapse/storage/data_stores/main/registration.py |   2 +
 tests/rest/admin/test_admin.py                   | 338 ----------------
 tests/rest/admin/test_user.py                    | 465 +++++++++++++++++++++++
 tests/storage/test_registration.py               |   2 +
 9 files changed, 655 insertions(+), 339 deletions(-)
 create mode 100644 changelog.d/5742.feature
 create mode 100644 tests/rest/admin/test_user.py

(limited to 'changelog.d')

diff --git a/changelog.d/5742.feature b/changelog.d/5742.feature
new file mode 100644
index 0000000000..de10302275
--- /dev/null
+++ b/changelog.d/5742.feature
@@ -0,0 +1 @@
+Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index b451dc5014..0b3d09d694 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -1,3 +1,33 @@
+Create or modify Account
+========================
+
+This API allows an administrator to create or modify a user account with a
+specific ``user_id``.
+
+This api is::
+
+    PUT /_synapse/admin/v2/users/
+
+with a body of:
+
+.. code:: json
+
+    {
+        "password": "user_password",
+        "displayname": "User",
+        "avatar_url": "",
+        "admin": false,
+        "deactivated": false
+    }
+
+including an ``access_token`` of a server admin.
+
+The parameter ``displayname`` is optional and defaults to ``user_id``.
+The parameter ``avatar_url`` is optional.
+The parameter ``admin`` is optional and defaults to 'false'.
+The parameter ``deactivated`` is optional and defaults to 'false'.
+If the user already exists then optional parameters default to the current value.
+
 List Accounts
 =============
 
@@ -50,7 +80,8 @@ This API returns information about a specific user account.
 
 The api is::
 
-    GET /_synapse/admin/v1/whois/
+    GET /_synapse/admin/v1/whois/ (deprecated)
+    GET /_synapse/admin/v2/users/
 
 including an ``access_token`` of a server admin.
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 1a4ba12385..76d18a8ba8 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -51,6 +51,15 @@ class AdminHandler(BaseHandler):
 
         return ret
 
+    async def get_user(self, user):
+        """Function to get user details"""
+        ret = await self.store.get_user_by_id(user.to_string())
+        if ret:
+            profile = await self.store.get_profileinfo(user.localpart)
+            ret["displayname"] = profile.display_name
+            ret["avatar_url"] = profile.avatar_url
+        return ret
+
     async def get_users(self):
         """Function to retrieve a list of users in users table.
 
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index c122c449f4..a10b4a9b72 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -38,6 +38,7 @@ from synapse.rest.admin.users import (
     SearchUsersRestServlet,
     UserAdminServlet,
     UserRegisterServlet,
+    UserRestServletV2,
     UsersRestServlet,
     UsersRestServletV2,
     WhoisRestServlet,
@@ -191,6 +192,7 @@ def register_servlets(hs, http_server):
     SendServerNoticeServlet(hs).register(http_server)
     VersionServlet(hs).register(http_server)
     UserAdminServlet(hs).register(http_server)
+    UserRestServletV2(hs).register(http_server)
     UsersRestServletV2(hs).register(http_server)
 
 
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 1937879dbe..574cb90c74 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -102,6 +102,148 @@ class UsersRestServletV2(RestServlet):
         return 200, ret
 
 
+class UserRestServletV2(RestServlet):
+    PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P@[^/]+)$"),)
+
+    """Get request to list user details.
+    This needs user to have administrator access in Synapse.
+
+    GET /_synapse/admin/v2/users/
+
+    returns:
+        200 OK with user details if success otherwise an error.
+
+    Put request to allow an administrator to add or modify a user.
+    This needs user to have administrator access in Synapse.
+    We use PUT instead of POST since we already know the id of the user
+    object to create. POST could be used to create guests.
+
+    PUT /_synapse/admin/v2/users/
+    {
+        "password": "secret",
+        "displayname": "User"
+    }
+
+    returns:
+        201 OK with new user object if user was created or
+        200 OK with modified user object if user was modified
+        otherwise an error.
+    """
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.admin_handler = hs.get_handlers().admin_handler
+        self.profile_handler = hs.get_profile_handler()
+        self.set_password_handler = hs.get_set_password_handler()
+        self.deactivate_account_handler = hs.get_deactivate_account_handler()
+        self.registration_handler = hs.get_registration_handler()
+
+    async def on_GET(self, request, user_id):
+        await assert_requester_is_admin(self.auth, request)
+
+        target_user = UserID.from_string(user_id)
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "Can only lookup local users")
+
+        ret = await self.admin_handler.get_user(target_user)
+
+        return 200, ret
+
+    async def on_PUT(self, request, user_id):
+        await assert_requester_is_admin(self.auth, request)
+
+        target_user = UserID.from_string(user_id)
+        body = parse_json_object_from_request(request)
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "This endpoint can only be used with local users")
+
+        user = await self.admin_handler.get_user(target_user)
+
+        if user:  # modify user
+            requester = await self.auth.get_user_by_req(request)
+
+            if "displayname" in body:
+                await self.profile_handler.set_displayname(
+                    target_user, requester, body["displayname"], True
+                )
+
+            if "avatar_url" in body:
+                await self.profile_handler.set_avatar_url(
+                    target_user, requester, body["avatar_url"], True
+                )
+
+            if "admin" in body:
+                set_admin_to = bool(body["admin"])
+                if set_admin_to != user["admin"]:
+                    auth_user = requester.user
+                    if target_user == auth_user and not set_admin_to:
+                        raise SynapseError(400, "You may not demote yourself.")
+
+                    await self.admin_handler.set_user_server_admin(
+                        target_user, set_admin_to
+                    )
+
+            if "password" in body:
+                if (
+                    not isinstance(body["password"], text_type)
+                    or len(body["password"]) > 512
+                ):
+                    raise SynapseError(400, "Invalid password")
+                else:
+                    new_password = body["password"]
+                    await self._set_password_handler.set_password(
+                        target_user, new_password, requester
+                    )
+
+            if "deactivated" in body:
+                deactivate = bool(body["deactivated"])
+                if deactivate and not user["deactivated"]:
+                    result = await self.deactivate_account_handler.deactivate_account(
+                        target_user.to_string(), False
+                    )
+                    if not result:
+                        raise SynapseError(500, "Could not deactivate user")
+
+            user = await self.admin_handler.get_user(target_user)
+            return 200, user
+
+        else:  # create user
+            if "password" not in body:
+                raise SynapseError(
+                    400, "password must be specified", errcode=Codes.BAD_JSON
+                )
+            elif (
+                not isinstance(body["password"], text_type)
+                or len(body["password"]) > 512
+            ):
+                raise SynapseError(400, "Invalid password")
+
+            admin = body.get("admin", None)
+            user_type = body.get("user_type", None)
+            displayname = body.get("displayname", None)
+
+            if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
+                raise SynapseError(400, "Invalid user type")
+
+            user_id = await self.registration_handler.register_user(
+                localpart=target_user.localpart,
+                password=body["password"],
+                admin=bool(admin),
+                default_display_name=displayname,
+                user_type=user_type,
+            )
+            if "avatar_url" in body:
+                await self.profile_handler.set_avatar_url(
+                    user_id, requester, body["avatar_url"], True
+                )
+
+            ret = await self.admin_handler.get_user(target_user)
+
+            return 201, ret
+
+
 class UserRegisterServlet(RestServlet):
     """
     Attributes:
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 5e8ecac0ea..cb4b2b39a0 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -52,11 +52,13 @@ class RegistrationWorkerStore(SQLBaseStore):
                 "name",
                 "password_hash",
                 "is_guest",
+                "admin",
                 "consent_version",
                 "consent_server_notice_sent",
                 "appservice_id",
                 "creation_ts",
                 "user_type",
+                "deactivated",
             ],
             allow_none=True,
             desc="get_user_by_id",
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 325bd6a608..6ceb483aa8 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -13,14 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
-import hmac
 import json
 
 from mock import Mock
 
 import synapse.rest.admin
-from synapse.api.constants import UserTypes
 from synapse.http.server import JsonResource
 from synapse.rest.admin import VersionServlet
 from synapse.rest.client.v1 import events, login, room
@@ -47,341 +44,6 @@ class VersionTestCase(unittest.HomeserverTestCase):
         )
 
 
-class UserRegisterTestCase(unittest.HomeserverTestCase):
-
-    servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource]
-
-    def make_homeserver(self, reactor, clock):
-
-        self.url = "/_matrix/client/r0/admin/register"
-
-        self.registration_handler = Mock()
-        self.identity_handler = Mock()
-        self.login_handler = Mock()
-        self.device_handler = Mock()
-        self.device_handler.check_device_registered = Mock(return_value="FAKE")
-
-        self.datastore = Mock(return_value=Mock())
-        self.datastore.get_current_state_deltas = Mock(return_value=(0, []))
-
-        self.secrets = Mock()
-
-        self.hs = self.setup_test_homeserver()
-
-        self.hs.config.registration_shared_secret = "shared"
-
-        self.hs.get_media_repository = Mock()
-        self.hs.get_deactivate_account_handler = Mock()
-
-        return self.hs
-
-    def test_disabled(self):
-        """
-        If there is no shared secret, registration through this method will be
-        prevented.
-        """
-        self.hs.config.registration_shared_secret = None
-
-        request, channel = self.make_request("POST", self.url, b"{}")
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(
-            "Shared secret registration is not enabled", channel.json_body["error"]
-        )
-
-    def test_get_nonce(self):
-        """
-        Calling GET on the endpoint will return a randomised nonce, using the
-        homeserver's secrets provider.
-        """
-        secrets = Mock()
-        secrets.token_hex = Mock(return_value="abcd")
-
-        self.hs.get_secrets = Mock(return_value=secrets)
-
-        request, channel = self.make_request("GET", self.url)
-        self.render(request)
-
-        self.assertEqual(channel.json_body, {"nonce": "abcd"})
-
-    def test_expired_nonce(self):
-        """
-        Calling GET on the endpoint will return a randomised nonce, which will
-        only last for SALT_TIMEOUT (60s).
-        """
-        request, channel = self.make_request("GET", self.url)
-        self.render(request)
-        nonce = channel.json_body["nonce"]
-
-        # 59 seconds
-        self.reactor.advance(59)
-
-        body = json.dumps({"nonce": nonce})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("username must be specified", channel.json_body["error"])
-
-        # 61 seconds
-        self.reactor.advance(2)
-
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("unrecognised nonce", channel.json_body["error"])
-
-    def test_register_incorrect_nonce(self):
-        """
-        Only the provided nonce can be used, as it's checked in the MAC.
-        """
-        request, channel = self.make_request("GET", self.url)
-        self.render(request)
-        nonce = channel.json_body["nonce"]
-
-        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-        want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
-        want_mac = want_mac.hexdigest()
-
-        body = json.dumps(
-            {
-                "nonce": nonce,
-                "username": "bob",
-                "password": "abc123",
-                "admin": True,
-                "mac": want_mac,
-            }
-        )
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("HMAC incorrect", channel.json_body["error"])
-
-    def test_register_correct_nonce(self):
-        """
-        When the correct nonce is provided, and the right key is provided, the
-        user is registered.
-        """
-        request, channel = self.make_request("GET", self.url)
-        self.render(request)
-        nonce = channel.json_body["nonce"]
-
-        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-        want_mac.update(
-            nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin\x00support"
-        )
-        want_mac = want_mac.hexdigest()
-
-        body = json.dumps(
-            {
-                "nonce": nonce,
-                "username": "bob",
-                "password": "abc123",
-                "admin": True,
-                "user_type": UserTypes.SUPPORT,
-                "mac": want_mac,
-            }
-        )
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["user_id"])
-
-    def test_nonce_reuse(self):
-        """
-        A valid unrecognised nonce.
-        """
-        request, channel = self.make_request("GET", self.url)
-        self.render(request)
-        nonce = channel.json_body["nonce"]
-
-        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-        want_mac.update(nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin")
-        want_mac = want_mac.hexdigest()
-
-        body = json.dumps(
-            {
-                "nonce": nonce,
-                "username": "bob",
-                "password": "abc123",
-                "admin": True,
-                "mac": want_mac,
-            }
-        )
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["user_id"])
-
-        # Now, try and reuse it
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("unrecognised nonce", channel.json_body["error"])
-
-    def test_missing_parts(self):
-        """
-        Synapse will complain if you don't give nonce, username, password, and
-        mac.  Admin and user_types are optional.  Additional checks are done for length
-        and type.
-        """
-
-        def nonce():
-            request, channel = self.make_request("GET", self.url)
-            self.render(request)
-            return channel.json_body["nonce"]
-
-        #
-        # Nonce check
-        #
-
-        # Must be present
-        body = json.dumps({})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("nonce must be specified", channel.json_body["error"])
-
-        #
-        # Username checks
-        #
-
-        # Must be present
-        body = json.dumps({"nonce": nonce()})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("username must be specified", channel.json_body["error"])
-
-        # Must be a string
-        body = json.dumps({"nonce": nonce(), "username": 1234})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid username", channel.json_body["error"])
-
-        # Must not have null bytes
-        body = json.dumps({"nonce": nonce(), "username": "abcd\u0000"})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid username", channel.json_body["error"])
-
-        # Must not have null bytes
-        body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid username", channel.json_body["error"])
-
-        #
-        # Password checks
-        #
-
-        # Must be present
-        body = json.dumps({"nonce": nonce(), "username": "a"})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("password must be specified", channel.json_body["error"])
-
-        # Must be a string
-        body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid password", channel.json_body["error"])
-
-        # Must not have null bytes
-        body = json.dumps({"nonce": nonce(), "username": "a", "password": "abcd\u0000"})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid password", channel.json_body["error"])
-
-        # Super long
-        body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid password", channel.json_body["error"])
-
-        #
-        # user_type check
-        #
-
-        # Invalid user_type
-        body = json.dumps(
-            {
-                "nonce": nonce(),
-                "username": "a",
-                "password": "1234",
-                "user_type": "invalid",
-            }
-        )
-        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("Invalid user type", channel.json_body["error"])
-
-
-class UsersListTestCase(unittest.HomeserverTestCase):
-
-    servlets = [
-        synapse.rest.admin.register_servlets,
-        login.register_servlets,
-    ]
-    url = "/_synapse/admin/v2/users"
-
-    def prepare(self, reactor, clock, hs):
-        self.admin_user = self.register_user("admin", "pass", admin=True)
-        self.admin_user_tok = self.login("admin", "pass")
-
-        self.register_user("user1", "pass1", admin=False)
-        self.register_user("user2", "pass2", admin=False)
-
-    def test_no_auth(self):
-        """
-        Try to list users without authentication.
-        """
-        request, channel = self.make_request("GET", self.url, b"{}")
-        self.render(request)
-
-        self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"])
-
-    def test_all_users(self):
-        """
-        List all users, including deactivated users.
-        """
-        request, channel = self.make_request(
-            "GET",
-            self.url + "?deactivated=true",
-            b"{}",
-            access_token=self.admin_user_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(3, len(channel.json_body["users"]))
-
-
 class ShutdownRoomTestCase(unittest.HomeserverTestCase):
     servlets = [
         synapse.rest.admin.register_servlets_for_client_rest_resource,
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
new file mode 100644
index 0000000000..7352d609e6
--- /dev/null
+++ b/tests/rest/admin/test_user.py
@@ -0,0 +1,465 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import hmac
+import json
+
+from mock import Mock
+
+import synapse.rest.admin
+from synapse.api.constants import UserTypes
+from synapse.rest.client.v1 import login
+
+from tests import unittest
+
+
+class UserRegisterTestCase(unittest.HomeserverTestCase):
+
+    servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource]
+
+    def make_homeserver(self, reactor, clock):
+
+        self.url = "/_matrix/client/r0/admin/register"
+
+        self.registration_handler = Mock()
+        self.identity_handler = Mock()
+        self.login_handler = Mock()
+        self.device_handler = Mock()
+        self.device_handler.check_device_registered = Mock(return_value="FAKE")
+
+        self.datastore = Mock(return_value=Mock())
+        self.datastore.get_current_state_deltas = Mock(return_value=(0, []))
+
+        self.secrets = Mock()
+
+        self.hs = self.setup_test_homeserver()
+
+        self.hs.config.registration_shared_secret = "shared"
+
+        self.hs.get_media_repository = Mock()
+        self.hs.get_deactivate_account_handler = Mock()
+
+        return self.hs
+
+    def test_disabled(self):
+        """
+        If there is no shared secret, registration through this method will be
+        prevented.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        request, channel = self.make_request("POST", self.url, b"{}")
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "Shared secret registration is not enabled", channel.json_body["error"]
+        )
+
+    def test_get_nonce(self):
+        """
+        Calling GET on the endpoint will return a randomised nonce, using the
+        homeserver's secrets provider.
+        """
+        secrets = Mock()
+        secrets.token_hex = Mock(return_value="abcd")
+
+        self.hs.get_secrets = Mock(return_value=secrets)
+
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+
+        self.assertEqual(channel.json_body, {"nonce": "abcd"})
+
+    def test_expired_nonce(self):
+        """
+        Calling GET on the endpoint will return a randomised nonce, which will
+        only last for SALT_TIMEOUT (60s).
+        """
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+        nonce = channel.json_body["nonce"]
+
+        # 59 seconds
+        self.reactor.advance(59)
+
+        body = json.dumps({"nonce": nonce})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("username must be specified", channel.json_body["error"])
+
+        # 61 seconds
+        self.reactor.advance(2)
+
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("unrecognised nonce", channel.json_body["error"])
+
+    def test_register_incorrect_nonce(self):
+        """
+        Only the provided nonce can be used, as it's checked in the MAC.
+        """
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "mac": want_mac,
+            }
+        )
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("HMAC incorrect", channel.json_body["error"])
+
+    def test_register_correct_nonce(self):
+        """
+        When the correct nonce is provided, and the right key is provided, the
+        user is registered.
+        """
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(
+            nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin\x00support"
+        )
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "user_type": UserTypes.SUPPORT,
+                "mac": want_mac,
+            }
+        )
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+    def test_nonce_reuse(self):
+        """
+        A valid unrecognised nonce.
+        """
+        request, channel = self.make_request("GET", self.url)
+        self.render(request)
+        nonce = channel.json_body["nonce"]
+
+        want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+        want_mac.update(nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin")
+        want_mac = want_mac.hexdigest()
+
+        body = json.dumps(
+            {
+                "nonce": nonce,
+                "username": "bob",
+                "password": "abc123",
+                "admin": True,
+                "mac": want_mac,
+            }
+        )
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+        # Now, try and reuse it
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("unrecognised nonce", channel.json_body["error"])
+
+    def test_missing_parts(self):
+        """
+        Synapse will complain if you don't give nonce, username, password, and
+        mac.  Admin and user_types are optional.  Additional checks are done for length
+        and type.
+        """
+
+        def nonce():
+            request, channel = self.make_request("GET", self.url)
+            self.render(request)
+            return channel.json_body["nonce"]
+
+        #
+        # Nonce check
+        #
+
+        # Must be present
+        body = json.dumps({})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("nonce must be specified", channel.json_body["error"])
+
+        #
+        # Username checks
+        #
+
+        # Must be present
+        body = json.dumps({"nonce": nonce()})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("username must be specified", channel.json_body["error"])
+
+        # Must be a string
+        body = json.dumps({"nonce": nonce(), "username": 1234})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid username", channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps({"nonce": nonce(), "username": "abcd\u0000"})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid username", channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid username", channel.json_body["error"])
+
+        #
+        # Password checks
+        #
+
+        # Must be present
+        body = json.dumps({"nonce": nonce(), "username": "a"})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("password must be specified", channel.json_body["error"])
+
+        # Must be a string
+        body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid password", channel.json_body["error"])
+
+        # Must not have null bytes
+        body = json.dumps({"nonce": nonce(), "username": "a", "password": "abcd\u0000"})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid password", channel.json_body["error"])
+
+        # Super long
+        body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid password", channel.json_body["error"])
+
+        #
+        # user_type check
+        #
+
+        # Invalid user_type
+        body = json.dumps(
+            {
+                "nonce": nonce(),
+                "username": "a",
+                "password": "1234",
+                "user_type": "invalid",
+            }
+        )
+        request, channel = self.make_request("POST", self.url, body.encode("utf8"))
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("Invalid user type", channel.json_body["error"])
+
+
+class UsersListTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+    ]
+    url = "/_synapse/admin/v2/users"
+
+    def prepare(self, reactor, clock, hs):
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.register_user("user1", "pass1", admin=False)
+        self.register_user("user2", "pass2", admin=False)
+
+    def test_no_auth(self):
+        """
+        Try to list users without authentication.
+        """
+        request, channel = self.make_request("GET", self.url, b"{}")
+        self.render(request)
+
+        self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"])
+
+    def test_all_users(self):
+        """
+        List all users, including deactivated users.
+        """
+        request, channel = self.make_request(
+            "GET",
+            self.url + "?deactivated=true",
+            b"{}",
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(3, len(channel.json_body["users"]))
+
+
+class UserRestTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        self.url = "/_synapse/admin/v2/users/@bob:test"
+
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.other_user = self.register_user("user", "pass")
+        self.other_user_token = self.login("user", "pass")
+
+    def test_requester_is_no_admin(self):
+        """
+        If the user is not a server admin, an error is returned.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        request, channel = self.make_request(
+            "GET", self.url, access_token=self.other_user_token,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("You are not a server admin", channel.json_body["error"])
+
+        request, channel = self.make_request(
+            "PUT", self.url, access_token=self.other_user_token, content=b"{}",
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("You are not a server admin", channel.json_body["error"])
+
+    def test_requester_is_admin(self):
+        """
+        If the user is a server admin, a new user is created.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        body = json.dumps({"password": "abc123", "admin": True})
+
+        # Create user
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("bob", channel.json_body["displayname"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("bob", channel.json_body["displayname"])
+        self.assertEqual(1, channel.json_body["admin"])
+        self.assertEqual(0, channel.json_body["is_guest"])
+        self.assertEqual(0, channel.json_body["deactivated"])
+
+        # Modify user
+        body = json.dumps({"displayname": "foobar", "deactivated": True})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+        self.assertEqual(True, channel.json_body["deactivated"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+        self.assertEqual(1, channel.json_body["admin"])
+        self.assertEqual(0, channel.json_body["is_guest"])
+        self.assertEqual(1, channel.json_body["deactivated"])
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index ed5786865a..71a40a0a49 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -43,12 +43,14 @@ class RegistrationStoreTestCase(unittest.TestCase):
                 # TODO(paul): Surely this field should be 'user_id', not 'name'
                 "name": self.user_id,
                 "password_hash": self.pwhash,
+                "admin": 0,
                 "is_guest": 0,
                 "consent_version": None,
                 "consent_server_notice_sent": None,
                 "appservice_id": None,
                 "creation_ts": 1000,
                 "user_type": None,
+                "deactivated": 0,
             },
             (yield self.store.get_user_by_id(self.user_id)),
         )
-- 
cgit 1.4.1


From c3843fd075c5c20f800837afce534de352517db6 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Thu, 9 Jan 2020 17:52:12 +0000
Subject: changelog

---
 changelog.d/6675.removal | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6675.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6675.removal b/changelog.d/6675.removal
new file mode 100644
index 0000000000..95df9a2d83
--- /dev/null
+++ b/changelog.d/6675.removal
@@ -0,0 +1 @@
+Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)).
-- 
cgit 1.4.1


From 96ed33739a000ea539a4e7840bc99ac8a972c500 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Sun, 12 Jan 2020 21:36:10 +0000
Subject: changelog

---
 changelog.d/6688.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6688.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6688.misc b/changelog.d/6688.misc
new file mode 100644
index 0000000000..2a9f28ce5c
--- /dev/null
+++ b/changelog.d/6688.misc
@@ -0,0 +1 @@
+Updates and extensions to the module API.
\ No newline at end of file
-- 
cgit 1.4.1


From d56e95ea8ba316f1fddfe33fa080d5fd0b19c008 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Sun, 12 Jan 2020 21:42:15 +0000
Subject: changelog

---
 changelog.d/6689.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6689.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6689.misc b/changelog.d/6689.misc
new file mode 100644
index 0000000000..17f15e73a8
--- /dev/null
+++ b/changelog.d/6689.misc
@@ -0,0 +1 @@
+Updates to the SAML mapping provider API.
-- 
cgit 1.4.1


From feee8199734c9d8a18fa0be12fc5ec09ae140a3a Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 13 Jan 2020 12:41:51 +0000
Subject: Fix exceptions on requests for non-ascii urls (#6682)

Fixes #6402
---
 changelog.d/6682.bugfix | 2 ++
 synapse/http/site.py    | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6682.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6682.bugfix b/changelog.d/6682.bugfix
new file mode 100644
index 0000000000..d48ea31477
--- /dev/null
+++ b/changelog.d/6682.bugfix
@@ -0,0 +1,2 @@
+Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters.
+
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 9f2d035fa0..911251c0bc 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -88,7 +88,7 @@ class SynapseRequest(Request):
     def get_redacted_uri(self):
         uri = self.uri
         if isinstance(uri, bytes):
-            uri = self.uri.decode("ascii")
+            uri = self.uri.decode("ascii", errors="replace")
         return redact_uri(uri)
 
     def get_method(self):
-- 
cgit 1.4.1


From 8039685051c08354b64890abb2522f2535c784b8 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 13 Jan 2020 12:42:44 +0000
Subject: Allow additional_resources to implement Resource directly (#6686)

AdditionalResource really doesn't add any value, and it gets in the way for
resources which want to support child resources or the like. So, if the
resource object already implements the IResource interface, don't bother
wrapping it.
---
 changelog.d/6686.misc     |  1 +
 synapse/app/homeserver.py | 13 +++++++++++--
 2 files changed, 12 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6686.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6686.misc b/changelog.d/6686.misc
new file mode 100644
index 0000000000..4070f2e563
--- /dev/null
+++ b/changelog.d/6686.misc
@@ -0,0 +1 @@
+Allow additional_resources to implement IResource directly.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index e5b44a5eed..c2a334a2b0 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -31,7 +31,7 @@ from prometheus_client import Gauge
 from twisted.application import service
 from twisted.internet import defer, reactor
 from twisted.python.failure import Failure
-from twisted.web.resource import EncodingResourceWrapper, NoResource
+from twisted.web.resource import EncodingResourceWrapper, IResource, NoResource
 from twisted.web.server import GzipEncoderFactory
 from twisted.web.static import File
 
@@ -109,7 +109,16 @@ class SynapseHomeServer(HomeServer):
         for path, resmodule in additional_resources.items():
             handler_cls, config = load_module(resmodule)
             handler = handler_cls(config, module_api)
-            resources[path] = AdditionalResource(self, handler.handle_request)
+            if IResource.providedBy(handler):
+                resource = handler
+            elif hasattr(handler, "handle_request"):
+                resource = AdditionalResource(self, handler.handle_request)
+            else:
+                raise ConfigError(
+                    "additional_resource %s does not implement a known interface"
+                    % (resmodule["module"],)
+                )
+            resources[path] = resource
 
         # try to find something useful to redirect '/' to
         if WEB_CLIENT_PREFIX in resources:
-- 
cgit 1.4.1


From 2d07c73777e837213f1c3c85b9cb446aac8b6170 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 13 Jan 2020 12:47:30 +0000
Subject: Don't assign numeric IDs for empty usernames (#6690)

Fix a bug where we would assign a numeric userid if somebody tried registering
with an empty username
---
 changelog.d/6690.bugfix      | 1 +
 synapse/handlers/register.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6690.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6690.bugfix b/changelog.d/6690.bugfix
new file mode 100644
index 0000000000..30ce1dc9f7
--- /dev/null
+++ b/changelog.d/6690.bugfix
@@ -0,0 +1 @@
+Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username.
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 8a7d965feb..885da82985 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -174,7 +174,7 @@ class RegistrationHandler(BaseHandler):
         if password:
             password_hash = yield self._auth_handler.hash(password)
 
-        if localpart:
+        if localpart is not None:
             yield self.check_username(localpart, guest_access_token=guest_access_token)
 
             was_guest = guest_access_token is not None
-- 
cgit 1.4.1


From 326c893d24e0f03b62bd9d1136a335f329bb8528 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 13 Jan 2020 12:48:22 +0000
Subject: Kill off RegistrationError (#6691)

This is pretty pointless. Let's just use SynapseError.
---
 changelog.d/6691.misc           |  1 +
 synapse/api/errors.py           |  6 ------
 synapse/handlers/register.py    | 12 +++---------
 tests/handlers/test_register.py |  2 --
 4 files changed, 4 insertions(+), 17 deletions(-)
 create mode 100644 changelog.d/6691.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6691.misc b/changelog.d/6691.misc
new file mode 100644
index 0000000000..104e9ce648
--- /dev/null
+++ b/changelog.d/6691.misc
@@ -0,0 +1 @@
+Remove redundant RegistrationError class.
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 5853a54c95..9e9844b47c 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -158,12 +158,6 @@ class UserDeactivatedError(SynapseError):
         )
 
 
-class RegistrationError(SynapseError):
-    """An error raised when a registration event fails."""
-
-    pass
-
-
 class FederationDeniedError(SynapseError):
     """An error raised when the server tries to federate with a server which
     is not on its federation whitelist.
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 885da82985..7ffc194f0c 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -20,13 +20,7 @@ from twisted.internet import defer
 
 from synapse import types
 from synapse.api.constants import MAX_USERID_LENGTH, LoginType
-from synapse.api.errors import (
-    AuthError,
-    Codes,
-    ConsentNotGivenError,
-    RegistrationError,
-    SynapseError,
-)
+from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
 from synapse.config.server import is_threepid_reserved
 from synapse.http.servlet import assert_params_in_dict
 from synapse.replication.http.login import RegisterDeviceReplicationServlet
@@ -165,7 +159,7 @@ class RegistrationHandler(BaseHandler):
         Returns:
             Deferred[str]: user_id
         Raises:
-            RegistrationError if there was a problem registering.
+            SynapseError if there was a problem registering.
         """
         yield self.check_registration_ratelimit(address)
 
@@ -182,7 +176,7 @@ class RegistrationHandler(BaseHandler):
             if not was_guest:
                 try:
                     int(localpart)
-                    raise RegistrationError(
+                    raise SynapseError(
                         400, "Numeric user IDs are reserved for guest users."
                     )
                 except ValueError:
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 1e9ba3a201..e2915eb7b1 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -269,8 +269,6 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
               one will be randomly generated.
         Returns:
             A tuple of (user_id, access_token).
-        Raises:
-            RegistrationError if there was a problem registering.
         """
         if localpart is None:
             raise SynapseError(400, "Request must include user id")
-- 
cgit 1.4.1


From 47f4f493f0886af5c9aad5c78885bb6869018dda Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 13 Jan 2020 15:32:02 +0000
Subject: Document more supported endpoints for workers (#6698)

---
 changelog.d/6698.doc | 1 +
 docs/workers.md      | 4 ++++
 2 files changed, 5 insertions(+)
 create mode 100644 changelog.d/6698.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6698.doc b/changelog.d/6698.doc
new file mode 100644
index 0000000000..5aba51252d
--- /dev/null
+++ b/changelog.d/6698.doc
@@ -0,0 +1 @@
+Add more endpoints to the documentation for Synapse workers.
diff --git a/docs/workers.md b/docs/workers.md
index 1b5d94f5eb..f4283aeb05 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -168,8 +168,11 @@ endpoints matching the following regular expressions:
     ^/_matrix/federation/v1/make_join/
     ^/_matrix/federation/v1/make_leave/
     ^/_matrix/federation/v1/send_join/
+    ^/_matrix/federation/v2/send_join/
     ^/_matrix/federation/v1/send_leave/
+    ^/_matrix/federation/v2/send_leave/
     ^/_matrix/federation/v1/invite/
+    ^/_matrix/federation/v2/invite/
     ^/_matrix/federation/v1/query_auth/
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
@@ -288,6 +291,7 @@ file. For example:
 Handles some event creation. It can handle REST endpoints matching:
 
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
     ^/_matrix/client/(api/v1|r0|unstable)/join/
     ^/_matrix/client/(api/v1|r0|unstable)/profile/
-- 
cgit 1.4.1


From 1177d3f3a33bd3ae1eef46fba360d319598359ad Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 13 Jan 2020 18:10:43 +0000
Subject: Quarantine media by ID or user ID (#6681)

---
 changelog.d/6681.feature                 |   1 +
 docs/admin_api/media_admin_api.md        |  76 ++++++-
 docs/workers.md                          |   4 +-
 synapse/rest/admin/media.py              |  68 +++++-
 synapse/storage/data_stores/main/room.py | 116 ++++++++++-
 tests/rest/admin/test_admin.py           | 341 +++++++++++++++++++++++++++++++
 tests/rest/client/v1/utils.py            |  37 ++++
 7 files changed, 632 insertions(+), 11 deletions(-)
 create mode 100644 changelog.d/6681.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6681.feature b/changelog.d/6681.feature
new file mode 100644
index 0000000000..5cf19a4e0e
--- /dev/null
+++ b/changelog.d/6681.feature
@@ -0,0 +1 @@
+Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media.
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 8b3666d5f5..46ba7a1a71 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -22,19 +22,81 @@ It returns a JSON body like the following:
 }
 ```
 
-# Quarantine media in a room
+# Quarantine media
 
-This API 'quarantines' all the media in a room.
+Quarantining media means that it is marked as inaccessible by users. It applies
+to any local media, and any locally-cached copies of remote media.
 
-The API is:
+The media file itself (and any thumbnails) is not deleted from the server.
+
+## Quarantining media by ID
+
+This API quarantines a single piece of local or remote media.
+
+Request:
 
 ```
-POST /_synapse/admin/v1/quarantine_media/
+POST /_synapse/admin/v1/media/quarantine//
 
 {}
 ```
 
-Quarantining media means that it is marked as inaccessible by users. It applies
-to any local media, and any locally-cached copies of remote media.
+Where `server_name` is in the form of `example.org`, and `media_id` is in the
+form of `abcdefg12345...`.
+
+Response:
+
+```
+{}
+```
+
+## Quarantining media in a room
+
+This API quarantines all local and remote media in a room.
+
+Request:
+
+```
+POST /_synapse/admin/v1/room//media/quarantine
+
+{}
+```
+
+Where `room_id` is in the form of `!roomid12345:example.org`.
+
+Response:
+
+```
+{
+  "num_quarantined": 10  # The number of media items successfully quarantined
+}
+```
+
+Note that there is a legacy endpoint, `POST
+/_synapse/admin/v1/quarantine_media/`, that operates the same.
+However, it is deprecated and may be removed in a future release.
+
+## Quarantining all media of a user
+
+This API quarantines all *local* media that a *local* user has uploaded. That is to say, if
+you would like to quarantine media uploaded by a user on a remote homeserver, you should
+instead use one of the other APIs.
+
+Request:
+
+```
+POST /_synapse/admin/v1/user//media/quarantine
+
+{}
+```
+
+Where `user_id` is in the form of `@bob:example.org`.
+
+Response:
+
+```
+{
+  "num_quarantined": 10  # The number of media items successfully quarantined
+}
+```
 
-The media file itself (and any thumbnails) is not deleted from the server.
diff --git a/docs/workers.md b/docs/workers.md
index f4283aeb05..0ab269fd96 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -202,7 +202,9 @@ Handles the media repository. It can handle all endpoints starting with:
 ... and the following regular expressions matching media-specific administration APIs:
 
     ^/_synapse/admin/v1/purge_media_cache$
-    ^/_synapse/admin/v1/room/.*/media$
+    ^/_synapse/admin/v1/room/.*/media.*$
+    ^/_synapse/admin/v1/user/.*/media.*$
+    ^/_synapse/admin/v1/media/.*$
     ^/_synapse/admin/v1/quarantine_media/.*$
 
 You should also set `enable_media_repo: False` in the shared configuration
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index fa833e54cf..3a445d6eed 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -32,16 +32,24 @@ class QuarantineMediaInRoom(RestServlet):
     this server.
     """
 
-    PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P[^/]+)")
+    PATTERNS = (
+        historical_admin_path_patterns("/room/(?P[^/]+)/media/quarantine")
+        +
+        # This path kept around for legacy reasons
+        historical_admin_path_patterns("/quarantine_media/(?P![^/]+)")
+    )
 
     def __init__(self, hs):
         self.store = hs.get_datastore()
         self.auth = hs.get_auth()
 
-    async def on_POST(self, request, room_id):
+    async def on_POST(self, request, room_id: str):
         requester = await self.auth.get_user_by_req(request)
         await assert_user_is_admin(self.auth, requester.user)
 
+        logging.info("Quarantining room: %s", room_id)
+
+        # Quarantine all media in this room
         num_quarantined = await self.store.quarantine_media_ids_in_room(
             room_id, requester.user.to_string()
         )
@@ -49,6 +57,60 @@ class QuarantineMediaInRoom(RestServlet):
         return 200, {"num_quarantined": num_quarantined}
 
 
+class QuarantineMediaByUser(RestServlet):
+    """Quarantines all local media by a given user so that no one can download it via
+    this server.
+    """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/user/(?P[^/]+)/media/quarantine"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, user_id: str):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        logging.info("Quarantining local media by user: %s", user_id)
+
+        # Quarantine all media this user has uploaded
+        num_quarantined = await self.store.quarantine_media_ids_by_user(
+            user_id, requester.user.to_string()
+        )
+
+        return 200, {"num_quarantined": num_quarantined}
+
+
+class QuarantineMediaByID(RestServlet):
+    """Quarantines local or remote media by a given ID so that no one can download
+    it via this server.
+    """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/media/quarantine/(?P[^/]+)/(?P[^/]+)"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, server_name: str, media_id: str):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        logging.info("Quarantining local media by ID: %s/%s", server_name, media_id)
+
+        # Quarantine this media id
+        await self.store.quarantine_media_by_id(
+            server_name, media_id, requester.user.to_string()
+        )
+
+        return 200, {}
+
+
 class ListMediaInRoom(RestServlet):
     """Lists all of the media in a given room.
     """
@@ -94,4 +156,6 @@ def register_servlets_for_media_repo(hs, http_server):
     """
     PurgeMediaCacheRestServlet(hs).register(http_server)
     QuarantineMediaInRoom(hs).register(http_server)
+    QuarantineMediaByID(hs).register(http_server)
+    QuarantineMediaByUser(hs).register(http_server)
     ListMediaInRoom(hs).register(http_server)
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 8636d75030..49bab62be3 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -18,7 +18,7 @@ import collections
 import logging
 import re
 from abc import abstractmethod
-from typing import Optional, Tuple
+from typing import List, Optional, Tuple
 
 from six import integer_types
 
@@ -399,6 +399,8 @@ class RoomWorkerStore(SQLBaseStore):
         the associated media
         """
 
+        logger.info("Quarantining media in room: %s", room_id)
+
         def _quarantine_media_in_room_txn(txn):
             local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id)
             total_media_quarantined = 0
@@ -494,6 +496,118 @@ class RoomWorkerStore(SQLBaseStore):
 
         return local_media_mxcs, remote_media_mxcs
 
+    def quarantine_media_by_id(
+        self, server_name: str, media_id: str, quarantined_by: str,
+    ):
+        """quarantines a single local or remote media id
+
+        Args:
+            server_name: The name of the server that holds this media
+            media_id: The ID of the media to be quarantined
+            quarantined_by: The user ID that initiated the quarantine request
+        """
+        logger.info("Quarantining media: %s/%s", server_name, media_id)
+        is_local = server_name == self.config.server_name
+
+        def _quarantine_media_by_id_txn(txn):
+            local_mxcs = [media_id] if is_local else []
+            remote_mxcs = [(server_name, media_id)] if not is_local else []
+
+            return self._quarantine_media_txn(
+                txn, local_mxcs, remote_mxcs, quarantined_by
+            )
+
+        return self.db.runInteraction(
+            "quarantine_media_by_user", _quarantine_media_by_id_txn
+        )
+
+    def quarantine_media_ids_by_user(self, user_id: str, quarantined_by: str):
+        """quarantines all local media associated with a single user
+
+        Args:
+            user_id: The ID of the user to quarantine media of
+            quarantined_by: The ID of the user who made the quarantine request
+        """
+
+        def _quarantine_media_by_user_txn(txn):
+            local_media_ids = self._get_media_ids_by_user_txn(txn, user_id)
+            return self._quarantine_media_txn(txn, local_media_ids, [], quarantined_by)
+
+        return self.db.runInteraction(
+            "quarantine_media_by_user", _quarantine_media_by_user_txn
+        )
+
+    def _get_media_ids_by_user_txn(self, txn, user_id: str, filter_quarantined=True):
+        """Retrieves local media IDs by a given user
+
+        Args:
+            txn (cursor)
+            user_id: The ID of the user to retrieve media IDs of
+
+        Returns:
+            The local and remote media as a lists of tuples where the key is
+            the hostname and the value is the media ID.
+        """
+        # Local media
+        sql = """
+            SELECT media_id
+            FROM local_media_repository
+            WHERE user_id = ?
+            """
+        if filter_quarantined:
+            sql += "AND quarantined_by IS NULL"
+        txn.execute(sql, (user_id,))
+
+        local_media_ids = [row[0] for row in txn]
+
+        # TODO: Figure out all remote media a user has referenced in a message
+
+        return local_media_ids
+
+    def _quarantine_media_txn(
+        self,
+        txn,
+        local_mxcs: List[str],
+        remote_mxcs: List[Tuple[str, str]],
+        quarantined_by: str,
+    ) -> int:
+        """Quarantine local and remote media items
+
+        Args:
+            txn (cursor)
+            local_mxcs: A list of local mxc URLs
+            remote_mxcs: A list of (remote server, media id) tuples representing
+                remote mxc URLs
+            quarantined_by: The ID of the user who initiated the quarantine request
+        Returns:
+            The total number of media items quarantined
+        """
+        total_media_quarantined = 0
+
+        # Update all the tables to set the quarantined_by flag
+        txn.executemany(
+            """
+            UPDATE local_media_repository
+            SET quarantined_by = ?
+            WHERE media_id = ?
+        """,
+            ((quarantined_by, media_id) for media_id in local_mxcs),
+        )
+
+        txn.executemany(
+            """
+                UPDATE remote_media_cache
+                SET quarantined_by = ?
+                WHERE media_origin = ? AND media_id = ?
+            """,
+            ((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs),
+        )
+
+        total_media_quarantined += len(local_mxcs)
+        total_media_quarantined += len(remote_mxcs)
+
+        return total_media_quarantined
+
 
 class RoomBackgroundUpdateStore(SQLBaseStore):
     REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 6ceb483aa8..7a7e898843 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -14,11 +14,17 @@
 # limitations under the License.
 
 import json
+import os
+import urllib.parse
+from binascii import unhexlify
 
 from mock import Mock
 
+from twisted.internet.defer import Deferred
+
 import synapse.rest.admin
 from synapse.http.server import JsonResource
+from synapse.logging.context import make_deferred_yieldable
 from synapse.rest.admin import VersionServlet
 from synapse.rest.client.v1 import events, login, room
 from synapse.rest.client.v2_alpha import groups
@@ -346,3 +352,338 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
             self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
 
     test_purge_room.skip = "Disabled because it's currently broken"
+
+
+class QuarantineMediaTestCase(unittest.HomeserverTestCase):
+    """Test /quarantine_media admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        synapse.rest.admin.register_servlets_for_media_repo,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+        self.hs = hs
+
+        # Allow for uploading and downloading to/from the media repo
+        self.media_repo = hs.get_media_repository_resource()
+        self.download_resource = self.media_repo.children[b"download"]
+        self.upload_resource = self.media_repo.children[b"upload"]
+        self.image_data = unhexlify(
+            b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+            b"0000001f15c4890000000a49444154789c63000100000500010d"
+            b"0a2db40000000049454e44ae426082"
+        )
+
+    def make_homeserver(self, reactor, clock):
+
+        self.fetches = []
+
+        def get_file(destination, path, output_stream, args=None, max_size=None):
+            """
+            Returns tuple[int,dict,str,int] of file length, response headers,
+            absolute URI, and response code.
+            """
+
+            def write_to(r):
+                data, response = r
+                output_stream.write(data)
+                return response
+
+            d = Deferred()
+            d.addCallback(write_to)
+            self.fetches.append((d, destination, path, args))
+            return make_deferred_yieldable(d)
+
+        client = Mock()
+        client.get_file = get_file
+
+        self.storage_path = self.mktemp()
+        self.media_store_path = self.mktemp()
+        os.mkdir(self.storage_path)
+        os.mkdir(self.media_store_path)
+
+        config = self.default_config()
+        config["media_store_path"] = self.media_store_path
+        config["thumbnail_requirements"] = {}
+        config["max_image_pixels"] = 2000000
+
+        provider_config = {
+            "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+        config["media_storage_providers"] = [provider_config]
+
+        hs = self.setup_test_homeserver(config=config, http_client=client)
+
+        return hs
+
+    def test_quarantine_media_requires_admin(self):
+        self.register_user("nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("nonadmin", "pass")
+
+        # Attempt quarantine media APIs as non-admin
+        url = "/_synapse/admin/v1/media/quarantine/example.org/abcde12345"
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=non_admin_user_tok,
+        )
+        self.render(request)
+
+        # Expect a forbidden error
+        self.assertEqual(
+            403,
+            int(channel.result["code"]),
+            msg="Expected forbidden on quarantining media as a non-admin",
+        )
+
+        # And the roomID/userID endpoint
+        url = "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine"
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=non_admin_user_tok,
+        )
+        self.render(request)
+
+        # Expect a forbidden error
+        self.assertEqual(
+            403,
+            int(channel.result["code"]),
+            msg="Expected forbidden on quarantining media as a non-admin",
+        )
+
+    def test_quarantine_media_by_id(self):
+        self.register_user("id_admin", "pass", admin=True)
+        admin_user_tok = self.login("id_admin", "pass")
+
+        self.register_user("id_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("id_nonadmin", "pass")
+
+        # Upload some media into the room
+        response = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=admin_user_tok
+        )
+
+        # Extract media ID from the response
+        server_name_and_media_id = response["content_uri"][
+            6:
+        ]  # Cut off the 'mxc://' bit
+        server_name, media_id = server_name_and_media_id.split("/")
+
+        # Attempt to access the media
+        request, channel = self.make_request(
+            "GET",
+            server_name_and_media_id,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be successful
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+
+        # Quarantine the media
+        url = "/_synapse/admin/v1/media/quarantine/%s/%s" % (
+            urllib.parse.quote(server_name),
+            urllib.parse.quote(media_id),
+        )
+        request, channel = self.make_request("POST", url, access_token=admin_user_tok,)
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+
+        # Attempt to access the media
+        request, channel = self.make_request(
+            "GET",
+            server_name_and_media_id,
+            shorthand=False,
+            access_token=admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_name_and_media_id
+            ),
+        )
+
+    def test_quarantine_all_media_in_room(self):
+        self.register_user("room_admin", "pass", admin=True)
+        admin_user_tok = self.login("room_admin", "pass")
+
+        non_admin_user = self.register_user("room_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("room_nonadmin", "pass")
+
+        room_id = self.helper.create_room_as(non_admin_user, tok=admin_user_tok)
+        self.helper.join(room_id, non_admin_user, tok=non_admin_user_tok)
+
+        # Upload some media
+        response_1 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+        response_2 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+
+        # Extract mxcs
+        mxc_1 = response_1["content_uri"]
+        mxc_2 = response_2["content_uri"]
+
+        # Send it into the room
+        self.helper.send_event(
+            room_id,
+            "m.room.message",
+            content={"body": "image-1", "msgtype": "m.image", "url": mxc_1},
+            txn_id="111",
+            tok=non_admin_user_tok,
+        )
+        self.helper.send_event(
+            room_id,
+            "m.room.message",
+            content={"body": "image-2", "msgtype": "m.image", "url": mxc_2},
+            txn_id="222",
+            tok=non_admin_user_tok,
+        )
+
+        # Quarantine all media in the room
+        url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote(
+            room_id
+        )
+        request, channel = self.make_request("POST", url, access_token=admin_user_tok,)
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+        self.assertEqual(
+            json.loads(channel.result["body"].decode("utf-8")),
+            {"num_quarantined": 2},
+            "Expected 2 quarantined items",
+        )
+
+        # Convert mxc URLs to server/media_id strings
+        server_and_media_id_1 = mxc_1[6:]
+        server_and_media_id_2 = mxc_2[6:]
+
+        # Test that we cannot download any of the media anymore
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_1,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_1
+            ),
+        )
+
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_2,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_2
+            ),
+        )
+
+    def test_quarantine_all_media_by_user(self):
+        self.register_user("user_admin", "pass", admin=True)
+        admin_user_tok = self.login("user_admin", "pass")
+
+        non_admin_user = self.register_user("user_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("user_nonadmin", "pass")
+
+        # Upload some media
+        response_1 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+        response_2 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+
+        # Extract media IDs
+        server_and_media_id_1 = response_1["content_uri"][6:]
+        server_and_media_id_2 = response_2["content_uri"][6:]
+
+        # Quarantine all media by this user
+        url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote(
+            non_admin_user
+        )
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=admin_user_tok,
+        )
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            json.loads(channel.result["body"].decode("utf-8")),
+            {"num_quarantined": 2},
+            "Expected 2 quarantined items",
+        )
+
+        # Attempt to access each piece of media
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_1,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_1,
+            ),
+        )
+
+        # Attempt to access each piece of media
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_2,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_2
+            ),
+        )
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index e7417b3d14..873d5ef99c 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -21,6 +21,8 @@ import time
 
 import attr
 
+from twisted.web.resource import Resource
+
 from synapse.api.constants import Membership
 
 from tests.server import make_request, render
@@ -160,3 +162,38 @@ class RestHelper(object):
         )
 
         return channel.json_body
+
+    def upload_media(
+        self,
+        resource: Resource,
+        image_data: bytes,
+        tok: str,
+        filename: str = "test.png",
+        expect_code: int = 200,
+    ) -> dict:
+        """Upload a piece of test media to the media repo
+        Args:
+            resource: The resource that will handle the upload request
+            image_data: The image data to upload
+            tok: The user token to use during the upload
+            filename: The filename of the media to be uploaded
+            expect_code: The return code to expect from attempting to upload the media
+        """
+        image_length = len(image_data)
+        path = "/_matrix/media/r0/upload?filename=%s" % (filename,)
+        request, channel = make_request(
+            self.hs.get_reactor(), "POST", path, content=image_data, access_token=tok
+        )
+        request.requestHeaders.addRawHeader(
+            b"Content-Length", str(image_length).encode("UTF-8")
+        )
+        request.render(resource)
+        self.hs.get_reactor().pump([100])
+
+        assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
+            expect_code,
+            int(channel.result["code"]),
+            channel.result["body"],
+        )
+
+        return channel.json_body
-- 
cgit 1.4.1


From e8b68a4e4b439065536c281d8997af85880f6ee2 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 14 Jan 2020 14:08:06 +0000
Subject: Fixup synapse.replication to pass mypy checks (#6667)

---
 changelog.d/6667.misc                         |  1 +
 synapse/replication/http/_base.py             | 10 ++---
 synapse/replication/slave/storage/_base.py    |  7 ++--
 synapse/replication/slave/storage/presence.py |  2 +-
 synapse/replication/tcp/client.py             | 12 +++---
 synapse/replication/tcp/commands.py           | 42 ++++++++++----------
 synapse/replication/tcp/protocol.py           | 36 ++++++++++-------
 synapse/replication/tcp/resource.py           |  3 +-
 synapse/replication/tcp/streams/_base.py      | 57 ++++++++++++++-------------
 synapse/replication/tcp/streams/events.py     | 16 +++++---
 synapse/replication/tcp/streams/federation.py |  4 +-
 tox.ini                                       |  1 +
 12 files changed, 105 insertions(+), 86 deletions(-)
 create mode 100644 changelog.d/6667.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6667.misc b/changelog.d/6667.misc
new file mode 100644
index 0000000000..227f80a508
--- /dev/null
+++ b/changelog.d/6667.misc
@@ -0,0 +1 @@
+Fixup `synapse.replication` to pass mypy checks.
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index c8056b0c0c..444eb7b7f4 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -16,6 +16,7 @@
 import abc
 import logging
 import re
+from typing import Dict, List, Tuple
 
 from six import raise_from
 from six.moves import urllib
@@ -78,9 +79,8 @@ class ReplicationEndpoint(object):
 
     __metaclass__ = abc.ABCMeta
 
-    NAME = abc.abstractproperty()
-    PATH_ARGS = abc.abstractproperty()
-
+    NAME = abc.abstractproperty()  # type: str  # type: ignore
+    PATH_ARGS = abc.abstractproperty()  # type: Tuple[str, ...]  # type: ignore
     METHOD = "POST"
     CACHE = True
     RETRY_ON_TIMEOUT = True
@@ -171,7 +171,7 @@ class ReplicationEndpoint(object):
                 # have a good idea that the request has either succeeded or failed on
                 # the master, and so whether we should clean up or not.
                 while True:
-                    headers = {}
+                    headers = {}  # type: Dict[bytes, List[bytes]]
                     inject_active_span_byte_dict(headers, None, check_destination=False)
                     try:
                         result = yield request_func(uri, data, headers=headers)
@@ -207,7 +207,7 @@ class ReplicationEndpoint(object):
         method = self.METHOD
 
         if self.CACHE:
-            handler = self._cached_handler
+            handler = self._cached_handler  # type: ignore
             url_args.append("txn_id")
 
         args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index b91a528245..704282c800 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import logging
-from typing import Dict
+from typing import Dict, Optional
 
 import six
 
@@ -41,7 +41,7 @@ class BaseSlavedStore(SQLBaseStore):
         if isinstance(self.database_engine, PostgresEngine):
             self._cache_id_gen = SlavedIdTracker(
                 db_conn, "cache_invalidation_stream", "stream_id"
-            )
+            )  # type: Optional[SlavedIdTracker]
         else:
             self._cache_id_gen = None
 
@@ -62,7 +62,8 @@ class BaseSlavedStore(SQLBaseStore):
 
     def process_replication_rows(self, stream_name, token, rows):
         if stream_name == "caches":
-            self._cache_id_gen.advance(token)
+            if self._cache_id_gen:
+                self._cache_id_gen.advance(token)
             for row in rows:
                 if row.cache_func == CURRENT_STATE_CACHE_NAME:
                     room_id = row.keys[0]
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index f552e7c972..ad8f0c15a9 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -29,7 +29,7 @@ class SlavedPresenceStore(BaseSlavedStore):
 
         self._presence_on_startup = self._get_active_presence(db_conn)
 
-        self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache(
+        self.presence_stream_cache = StreamChangeCache(
             "PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
         )
 
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index bbcb84646c..aa7fd90e26 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -16,7 +16,7 @@
 """
 
 import logging
-from typing import Dict
+from typing import Dict, List, Optional
 
 from twisted.internet import defer
 from twisted.internet.protocol import ReconnectingClientFactory
@@ -28,6 +28,7 @@ from synapse.replication.tcp.protocol import (
 )
 
 from .commands import (
+    Command,
     FederationAckCommand,
     InvalidateCacheCommand,
     RemovePusherCommand,
@@ -89,15 +90,15 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
 
         # Any pending commands to be sent once a new connection has been
         # established
-        self.pending_commands = []
+        self.pending_commands = []  # type: List[Command]
 
         # Map from string -> deferred, to wake up when receiveing a SYNC with
         # the given string.
         # Used for tests.
-        self.awaiting_syncs = {}
+        self.awaiting_syncs = {}  # type: Dict[str, defer.Deferred]
 
         # The factory used to create connections.
-        self.factory = None
+        self.factory = None  # type: Optional[ReplicationClientFactory]
 
     def start_replication(self, hs):
         """Helper method to start a replication connection to the remote server
@@ -235,4 +236,5 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         # We don't reset the delay any earlier as otherwise if there is a
         # problem during start up we'll end up tight looping connecting to the
         # server.
-        self.factory.resetDelay()
+        if self.factory:
+            self.factory.resetDelay()
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 0ff2a7199f..cbb36b9acf 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -20,15 +20,16 @@ allowed to be sent by which side.
 
 import logging
 import platform
+from typing import Tuple, Type
 
 if platform.python_implementation() == "PyPy":
     import json
 
     _json_encoder = json.JSONEncoder()
 else:
-    import simplejson as json
+    import simplejson as json  # type: ignore[no-redef]  # noqa: F821
 
-    _json_encoder = json.JSONEncoder(namedtuple_as_object=False)
+    _json_encoder = json.JSONEncoder(namedtuple_as_object=False)  # type: ignore[call-arg]  # noqa: F821
 
 logger = logging.getLogger(__name__)
 
@@ -44,7 +45,7 @@ class Command(object):
     The default implementation creates a command of form ` `
     """
 
-    NAME = None
+    NAME = None  # type: str
 
     def __init__(self, data):
         self.data = data
@@ -386,25 +387,24 @@ class UserIpCommand(Command):
         )
 
 
+_COMMANDS = (
+    ServerCommand,
+    RdataCommand,
+    PositionCommand,
+    ErrorCommand,
+    PingCommand,
+    NameCommand,
+    ReplicateCommand,
+    UserSyncCommand,
+    FederationAckCommand,
+    SyncCommand,
+    RemovePusherCommand,
+    InvalidateCacheCommand,
+    UserIpCommand,
+)  # type: Tuple[Type[Command], ...]
+
 # Map of command name to command type.
-COMMAND_MAP = {
-    cmd.NAME: cmd
-    for cmd in (
-        ServerCommand,
-        RdataCommand,
-        PositionCommand,
-        ErrorCommand,
-        PingCommand,
-        NameCommand,
-        ReplicateCommand,
-        UserSyncCommand,
-        FederationAckCommand,
-        SyncCommand,
-        RemovePusherCommand,
-        InvalidateCacheCommand,
-        UserIpCommand,
-    )
-}
+COMMAND_MAP = {cmd.NAME: cmd for cmd in _COMMANDS}
 
 # The commands the server is allowed to send
 VALID_SERVER_COMMANDS = (
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index afaf002fe6..db0353c996 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -53,6 +53,7 @@ import fcntl
 import logging
 import struct
 from collections import defaultdict
+from typing import Any, DefaultDict, Dict, List, Set, Tuple
 
 from six import iteritems, iterkeys
 
@@ -65,13 +66,11 @@ from twisted.python.failure import Failure
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util import Clock
-from synapse.util.stringutils import random_string
-
-from .commands import (
+from synapse.replication.tcp.commands import (
     COMMAND_MAP,
     VALID_CLIENT_COMMANDS,
     VALID_SERVER_COMMANDS,
+    Command,
     ErrorCommand,
     NameCommand,
     PingCommand,
@@ -82,6 +81,10 @@ from .commands import (
     SyncCommand,
     UserSyncCommand,
 )
+from synapse.types import Collection
+from synapse.util import Clock
+from synapse.util.stringutils import random_string
+
 from .streams import STREAMS_MAP
 
 connection_close_counter = Counter(
@@ -124,8 +127,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
     delimiter = b"\n"
 
-    VALID_INBOUND_COMMANDS = []  # Valid commands we expect to receive
-    VALID_OUTBOUND_COMMANDS = []  # Valid commans we can send
+    # Valid commands we expect to receive
+    VALID_INBOUND_COMMANDS = []  # type: Collection[str]
+
+    # Valid commands we can send
+    VALID_OUTBOUND_COMMANDS = []  # type: Collection[str]
 
     max_line_buffer = 10000
 
@@ -144,13 +150,13 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         self.conn_id = random_string(5)  # To dedupe in case of name clashes.
 
         # List of pending commands to send once we've established the connection
-        self.pending_commands = []
+        self.pending_commands = []  # type: List[Command]
 
         # The LoopingCall for sending pings.
         self._send_ping_loop = None
 
-        self.inbound_commands_counter = defaultdict(int)
-        self.outbound_commands_counter = defaultdict(int)
+        self.inbound_commands_counter = defaultdict(int)  # type: DefaultDict[str, int]
+        self.outbound_commands_counter = defaultdict(int)  # type: DefaultDict[str, int]
 
     def connectionMade(self):
         logger.info("[%s] Connection established", self.id())
@@ -409,14 +415,14 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.streamer = streamer
 
         # The streams the client has subscribed to and is up to date with
-        self.replication_streams = set()
+        self.replication_streams = set()  # type: Set[str]
 
         # The streams the client is currently subscribing to.
-        self.connecting_streams = set()
+        self.connecting_streams = set()  # type:  Set[str]
 
         # Map from stream name to list of updates to send once we've finished
         # subscribing the client to the stream.
-        self.pending_rdata = {}
+        self.pending_rdata = {}  # type: Dict[str, List[Tuple[int, Any]]]
 
     def connectionMade(self):
         self.send_command(ServerCommand(self.server_name))
@@ -642,11 +648,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         # Set of stream names that have been subscribe to, but haven't yet
         # caught up with. This is used to track when the client has been fully
         # connected to the remote.
-        self.streams_connecting = set()
+        self.streams_connecting = set()  # type: Set[str]
 
         # Map of stream to batched updates. See RdataCommand for info on how
         # batching works.
-        self.pending_batches = {}
+        self.pending_batches = {}  # type: Dict[str, Any]
 
     def connectionMade(self):
         self.send_command(NameCommand(self.client_name))
@@ -766,7 +772,7 @@ def transport_kernel_read_buffer_size(protocol, read=True):
             op = SIOCINQ
         else:
             op = SIOCOUTQ
-        size = struct.unpack("I", fcntl.ioctl(fileno, op, "\0\0\0\0"))[0]
+        size = struct.unpack("I", fcntl.ioctl(fileno, op, b"\0\0\0\0"))[0]
         return size
     return 0
 
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index d1e98428bc..cbfdaf5773 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -17,6 +17,7 @@
 
 import logging
 import random
+from typing import List
 
 from six import itervalues
 
@@ -79,7 +80,7 @@ class ReplicationStreamer(object):
         self._replication_torture_level = hs.config.replication_torture_level
 
         # Current connections.
-        self.connections = []
+        self.connections = []  # type: List[ServerReplicationStreamProtocol]
 
         LaterGauge(
             "synapse_replication_tcp_resource_total_connections",
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 8512923eae..4ab0334fc1 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -14,10 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import itertools
 import logging
 from collections import namedtuple
+from typing import Any
 
 from twisted.internet import defer
 
@@ -104,8 +104,9 @@ class Stream(object):
     time it was called up until the point `advance_current_token` was called.
     """
 
-    NAME = None  # The name of the stream
-    ROW_TYPE = None  # The type of the row. Used by the default impl of parse_row.
+    NAME = None  # type: str  # The name of the stream
+    # The type of the row. Used by the default impl of parse_row.
+    ROW_TYPE = None  # type: Any
     _LIMITED = True  # Whether the update function takes a limit
 
     @classmethod
@@ -231,8 +232,8 @@ class BackfillStream(Stream):
 
     def __init__(self, hs):
         store = hs.get_datastore()
-        self.current_token = store.get_current_backfill_token
-        self.update_function = store.get_all_new_backfill_event_rows
+        self.current_token = store.get_current_backfill_token  # type: ignore
+        self.update_function = store.get_all_new_backfill_event_rows  # type: ignore
 
         super(BackfillStream, self).__init__(hs)
 
@@ -246,8 +247,8 @@ class PresenceStream(Stream):
         store = hs.get_datastore()
         presence_handler = hs.get_presence_handler()
 
-        self.current_token = store.get_current_presence_token
-        self.update_function = presence_handler.get_all_presence_updates
+        self.current_token = store.get_current_presence_token  # type: ignore
+        self.update_function = presence_handler.get_all_presence_updates  # type: ignore
 
         super(PresenceStream, self).__init__(hs)
 
@@ -260,8 +261,8 @@ class TypingStream(Stream):
     def __init__(self, hs):
         typing_handler = hs.get_typing_handler()
 
-        self.current_token = typing_handler.get_current_token
-        self.update_function = typing_handler.get_all_typing_updates
+        self.current_token = typing_handler.get_current_token  # type: ignore
+        self.update_function = typing_handler.get_all_typing_updates  # type: ignore
 
         super(TypingStream, self).__init__(hs)
 
@@ -273,8 +274,8 @@ class ReceiptsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_max_receipt_stream_id
-        self.update_function = store.get_all_updated_receipts
+        self.current_token = store.get_max_receipt_stream_id  # type: ignore
+        self.update_function = store.get_all_updated_receipts  # type: ignore
 
         super(ReceiptsStream, self).__init__(hs)
 
@@ -310,8 +311,8 @@ class PushersStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_pushers_stream_token
-        self.update_function = store.get_all_updated_pushers_rows
+        self.current_token = store.get_pushers_stream_token  # type: ignore
+        self.update_function = store.get_all_updated_pushers_rows  # type: ignore
 
         super(PushersStream, self).__init__(hs)
 
@@ -327,8 +328,8 @@ class CachesStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_cache_stream_token
-        self.update_function = store.get_all_updated_caches
+        self.current_token = store.get_cache_stream_token  # type: ignore
+        self.update_function = store.get_all_updated_caches  # type: ignore
 
         super(CachesStream, self).__init__(hs)
 
@@ -343,8 +344,8 @@ class PublicRoomsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_current_public_room_stream_id
-        self.update_function = store.get_all_new_public_rooms
+        self.current_token = store.get_current_public_room_stream_id  # type: ignore
+        self.update_function = store.get_all_new_public_rooms  # type: ignore
 
         super(PublicRoomsStream, self).__init__(hs)
 
@@ -360,8 +361,8 @@ class DeviceListsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_device_stream_token
-        self.update_function = store.get_all_device_list_changes_for_remotes
+        self.current_token = store.get_device_stream_token  # type: ignore
+        self.update_function = store.get_all_device_list_changes_for_remotes  # type: ignore
 
         super(DeviceListsStream, self).__init__(hs)
 
@@ -376,8 +377,8 @@ class ToDeviceStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_to_device_stream_token
-        self.update_function = store.get_all_new_device_messages
+        self.current_token = store.get_to_device_stream_token  # type: ignore
+        self.update_function = store.get_all_new_device_messages  # type: ignore
 
         super(ToDeviceStream, self).__init__(hs)
 
@@ -392,8 +393,8 @@ class TagAccountDataStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_max_account_data_stream_id
-        self.update_function = store.get_all_updated_tags
+        self.current_token = store.get_max_account_data_stream_id  # type: ignore
+        self.update_function = store.get_all_updated_tags  # type: ignore
 
         super(TagAccountDataStream, self).__init__(hs)
 
@@ -408,7 +409,7 @@ class AccountDataStream(Stream):
     def __init__(self, hs):
         self.store = hs.get_datastore()
 
-        self.current_token = self.store.get_max_account_data_stream_id
+        self.current_token = self.store.get_max_account_data_stream_id  # type: ignore
 
         super(AccountDataStream, self).__init__(hs)
 
@@ -434,8 +435,8 @@ class GroupServerStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_group_stream_token
-        self.update_function = store.get_all_groups_changes
+        self.current_token = store.get_group_stream_token  # type: ignore
+        self.update_function = store.get_all_groups_changes  # type: ignore
 
         super(GroupServerStream, self).__init__(hs)
 
@@ -451,7 +452,7 @@ class UserSignatureStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_device_stream_token
-        self.update_function = store.get_all_user_signature_changes_for_remotes
+        self.current_token = store.get_device_stream_token  # type: ignore
+        self.update_function = store.get_all_user_signature_changes_for_remotes  # type: ignore
 
         super(UserSignatureStream, self).__init__(hs)
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index d97669c886..0843e5aa90 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -13,7 +13,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import heapq
+from typing import Tuple, Type
 
 import attr
 
@@ -63,7 +65,8 @@ class BaseEventsStreamRow(object):
     Specifies how to identify, serialize and deserialize the different types.
     """
 
-    TypeId = None  # Unique string that ids the type. Must be overriden in sub classes.
+    # Unique string that ids the type. Must be overriden in sub classes.
+    TypeId = None  # type: str
 
     @classmethod
     def from_data(cls, data):
@@ -99,9 +102,12 @@ class EventsStreamCurrentStateRow(BaseEventsStreamRow):
     event_id = attr.ib()  # str, optional
 
 
-TypeToRow = {
-    Row.TypeId: Row for Row in (EventsStreamEventRow, EventsStreamCurrentStateRow)
-}
+_EventRows = (
+    EventsStreamEventRow,
+    EventsStreamCurrentStateRow,
+)  # type: Tuple[Type[BaseEventsStreamRow], ...]
+
+TypeToRow = {Row.TypeId: Row for Row in _EventRows}
 
 
 class EventsStream(Stream):
@@ -112,7 +118,7 @@ class EventsStream(Stream):
 
     def __init__(self, hs):
         self._store = hs.get_datastore()
-        self.current_token = self._store.get_current_events_token
+        self.current_token = self._store.get_current_events_token  # type: ignore
 
         super(EventsStream, self).__init__(hs)
 
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
index dc2484109d..615f3dc9ac 100644
--- a/synapse/replication/tcp/streams/federation.py
+++ b/synapse/replication/tcp/streams/federation.py
@@ -37,7 +37,7 @@ class FederationStream(Stream):
     def __init__(self, hs):
         federation_sender = hs.get_federation_sender()
 
-        self.current_token = federation_sender.get_current_token
-        self.update_function = federation_sender.get_replication_rows
+        self.current_token = federation_sender.get_current_token  # type: ignore
+        self.update_function = federation_sender.get_replication_rows  # type: ignore
 
         super(FederationStream, self).__init__(hs)
diff --git a/tox.ini b/tox.ini
index 0ab6d5666b..b73a993053 100644
--- a/tox.ini
+++ b/tox.ini
@@ -181,6 +181,7 @@ commands = mypy \
             synapse/handlers/ui_auth \
             synapse/logging/ \
             synapse/module_api \
+            synapse/replication \
             synapse/rest/consent \
             synapse/rest/saml2 \
             synapse/spam_checker_api \
-- 
cgit 1.4.1


From b5ce7f5874b1d6983a4bb992cb3a8093df6b1802 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 14 Jan 2020 14:08:35 +0000
Subject: Process EDUs in parallel with PDUs. (#6697)

This means that things like to device messages don't get blocked behind
processing PDUs, which can potentially take *ages*.
---
 changelog.d/6697.misc                   |  1 +
 synapse/federation/federation_server.py | 70 +++++++++++++++++++++++++++------
 2 files changed, 59 insertions(+), 12 deletions(-)
 create mode 100644 changelog.d/6697.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6697.misc b/changelog.d/6697.misc
new file mode 100644
index 0000000000..5650387804
--- /dev/null
+++ b/changelog.d/6697.misc
@@ -0,0 +1 @@
+Don't block processing of incoming EDUs behind processing PDUs in the same transaction.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d7ce333822..8eddb3bf2c 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Dict
 
 import six
 from six import iteritems
@@ -22,6 +23,7 @@ from six import iteritems
 from canonicaljson import json
 from prometheus_client import Counter
 
+from twisted.internet import defer
 from twisted.internet.abstract import isIPAddress
 from twisted.python import failure
 
@@ -41,7 +43,11 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
 from synapse.federation.persistence import TransactionActions
 from synapse.federation.units import Edu, Transaction
 from synapse.http.endpoint import parse_server_name
-from synapse.logging.context import nested_logging_context
+from synapse.logging.context import (
+    make_deferred_yieldable,
+    nested_logging_context,
+    run_in_background,
+)
 from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
 from synapse.logging.utils import log_function
 from synapse.replication.http.federation import (
@@ -49,7 +55,7 @@ from synapse.replication.http.federation import (
     ReplicationGetQueryRestServlet,
 )
 from synapse.types import get_domain_from_id
-from synapse.util import glob_to_regex
+from synapse.util import glob_to_regex, unwrapFirstError
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.caches.response_cache import ResponseCache
 
@@ -160,6 +166,43 @@ class FederationServer(FederationBase):
             )
             return 400, response
 
+        # We process PDUs and EDUs in parallel. This is important as we don't
+        # want to block things like to device messages from reaching clients
+        # behind the potentially expensive handling of PDUs.
+        pdu_results, _ = await make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(
+                        self._handle_pdus_in_txn, origin, transaction, request_time
+                    ),
+                    run_in_background(self._handle_edus_in_txn, origin, transaction),
+                ],
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError)
+        )
+
+        response = {"pdus": pdu_results}
+
+        logger.debug("Returning: %s", str(response))
+
+        await self.transaction_actions.set_response(origin, transaction, 200, response)
+        return 200, response
+
+    async def _handle_pdus_in_txn(
+        self, origin: str, transaction: Transaction, request_time: int
+    ) -> Dict[str, dict]:
+        """Process the PDUs in a received transaction.
+
+        Args:
+            origin: the server making the request
+            transaction: incoming transaction
+            request_time: timestamp that the HTTP request arrived at
+
+        Returns:
+            A map from event ID of a processed PDU to any errors we should
+            report back to the sending server.
+        """
+
         received_pdus_counter.inc(len(transaction.pdus))
 
         origin_host, _ = parse_server_name(origin)
@@ -250,20 +293,23 @@ class FederationServer(FederationBase):
             process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
         )
 
-        if hasattr(transaction, "edus"):
-            for edu in (Edu(**x) for x in transaction.edus):
-                await self.received_edu(origin, edu.edu_type, edu.content)
+        return pdu_results
 
-        response = {"pdus": pdu_results}
+    async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
+        """Process the EDUs in a received transaction.
+        """
 
-        logger.debug("Returning: %s", str(response))
+        async def _process_edu(edu_dict):
+            received_edus_counter.inc()
 
-        await self.transaction_actions.set_response(origin, transaction, 200, response)
-        return 200, response
+            edu = Edu(**edu_dict)
+            await self.registry.on_edu(edu.edu_type, origin, edu.content)
 
-    async def received_edu(self, origin, edu_type, content):
-        received_edus_counter.inc()
-        await self.registry.on_edu(edu_type, origin, content)
+        await concurrently_execute(
+            _process_edu,
+            getattr(transaction, "edus", []),
+            TRANSACTION_CONCURRENCY_LIMIT,
+        )
 
     async def on_context_state_request(self, origin, room_id, event_id):
         origin_host, _ = parse_server_name(origin)
-- 
cgit 1.4.1


From 28c98e51ffa166bd717646b0b34228e59f253485 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 15 Jan 2020 14:59:33 +0000
Subject: Add `local_current_membership` table (#6655)

Currently we rely on `current_state_events` to figure out what rooms a
user was in and their last membership event in there. However, if the
server leaves the room then the table may be cleaned up and that
information is lost. So lets add a table that separately holds that
information.
---
 changelog.d/6655.misc                              |   1 +
 scripts/synapse_port_db                            |   2 +-
 synapse/handlers/admin.py                          |   2 +-
 synapse/handlers/deactivate_account.py             |   2 +-
 synapse/handlers/initial_sync.py                   |   2 +-
 synapse/handlers/room_member.py                    |   2 +-
 synapse/handlers/search.py                         |   2 +-
 synapse/handlers/sync.py                           |   2 +-
 synapse/push/push_tools.py                         |   2 +-
 synapse/replication/slave/storage/events.py        |   2 +-
 synapse/server_notices/server_notices_manager.py   |   2 +-
 synapse/storage/data_stores/main/events.py         |  30 ++++
 synapse/storage/data_stores/main/roommember.py     | 189 ++++++++++++---------
 .../schema/delta/57/local_current_membership.py    |  97 +++++++++++
 synapse/storage/prepare_database.py                |   2 +-
 tests/handlers/test_sync.py                        |   4 +-
 tests/replication/slave/storage/test_events.py     |   4 +-
 tests/rest/client/v2_alpha/test_account.py         |  12 +-
 tests/rest/client/v2_alpha/test_sync.py            |   9 -
 tests/storage/test_roommember.py                   |   2 +-
 20 files changed, 263 insertions(+), 107 deletions(-)
 create mode 100644 changelog.d/6655.misc
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py

(limited to 'changelog.d')

diff --git a/changelog.d/6655.misc b/changelog.d/6655.misc
new file mode 100644
index 0000000000..01e78bc84e
--- /dev/null
+++ b/changelog.d/6655.misc
@@ -0,0 +1 @@
+Add `local_current_membership` table for tracking local user membership state in rooms.
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index f135c8bc54..5e69104b97 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -470,7 +470,7 @@ class Porter(object):
             engine.check_database(
                 db_conn, allow_outdated_version=allow_outdated_version
             )
-            prepare_database(db_conn, engine, config=None)
+            prepare_database(db_conn, engine, config=self.hs_config)
             store = Store(Database(hs, db_config, engine), db_conn, hs)
             db_conn.commit()
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 76d18a8ba8..a9407553b4 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -134,7 +134,7 @@ class AdminHandler(BaseHandler):
             The returned value is that returned by `writer.finished()`.
         """
         # Get all rooms the user is in or has been in
-        rooms = await self.store.get_rooms_for_user_where_membership_is(
+        rooms = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id,
             membership_list=(
                 Membership.JOIN,
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 4426967f88..2afb390a92 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -140,7 +140,7 @@ class DeactivateAccountHandler(BaseHandler):
             user_id (str): The user ID to reject pending invites for.
         """
         user = UserID.from_string(user_id)
-        pending_invites = await self.store.get_invited_rooms_for_user(user_id)
+        pending_invites = await self.store.get_invited_rooms_for_local_user(user_id)
 
         for room in pending_invites:
             try:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 44ec3e66ae..2e6755f19c 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -101,7 +101,7 @@ class InitialSyncHandler(BaseHandler):
         if include_archived:
             memberships.append(Membership.LEAVE)
 
-        room_list = await self.store.get_rooms_for_user_where_membership_is(
+        room_list = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id=user_id, membership_list=memberships
         )
 
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 03bb52ccfb..15e8aa5249 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -690,7 +690,7 @@ class RoomMemberHandler(object):
 
     @defer.inlineCallbacks
     def _get_inviter(self, user_id, room_id):
-        invite = yield self.store.get_invite_for_user_in_room(
+        invite = yield self.store.get_invite_for_local_user_in_room(
             user_id=user_id, room_id=room_id
         )
         if invite:
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index ef750d1497..110097eab9 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -179,7 +179,7 @@ class SearchHandler(BaseHandler):
         search_filter = Filter(filter_dict)
 
         # TODO: Search through left rooms too
-        rooms = yield self.store.get_rooms_for_user_where_membership_is(
+        rooms = yield self.store.get_rooms_for_local_user_where_membership_is(
             user.to_string(),
             membership_list=[Membership.JOIN],
             # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2d3b8ba73c..cd95f85e3f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1662,7 +1662,7 @@ class SyncHandler(object):
             Membership.BAN,
         )
 
-        room_list = await self.store.get_rooms_for_user_where_membership_is(
+        room_list = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id=user_id, membership_list=membership_list
         )
 
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index de5c101a58..5dae4648c0 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -21,7 +21,7 @@ from synapse.storage import Storage
 
 @defer.inlineCallbacks
 def get_badge_count(store, user_id):
-    invites = yield store.get_invited_rooms_for_user(user_id)
+    invites = yield store.get_invited_rooms_for_local_user(user_id)
     joins = yield store.get_rooms_for_user(user_id)
 
     my_receipts_by_room = yield store.get_receipts_for_user(user_id, "m.read")
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 29f35b9915..3aa6cb8b96 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -152,7 +152,7 @@ class SlavedEventStore(
 
         if etype == EventTypes.Member:
             self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
-            self.get_invited_rooms_for_user.invalidate((state_key,))
+            self.get_invited_rooms_for_local_user.invalidate((state_key,))
 
         if relates_to:
             self.get_relations_for_event.invalidate_many((relates_to,))
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index 2dac90578c..f7432c8d2f 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -105,7 +105,7 @@ class ServerNoticesManager(object):
 
         assert self._is_mine_id(user_id), "Cannot send server notices to remote users"
 
-        rooms = yield self._store.get_rooms_for_user_where_membership_is(
+        rooms = yield self._store.get_rooms_for_local_user_where_membership_is(
             user_id, [Membership.INVITE, Membership.JOIN]
         )
         system_mxid = self._config.server_notices_mxid
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 58f35d7f56..e9fe63037b 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -128,6 +128,7 @@ class EventsStore(
             hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
 
         self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+        self.is_mine_id = hs.is_mine_id
 
     @defer.inlineCallbacks
     def _read_forward_extremities(self):
@@ -547,6 +548,34 @@ class EventsStore(
                 ],
             )
 
+            # Note: Do we really want to delete rows here (that we do not
+            # subsequently reinsert below)? While technically correct it means
+            # we have no record of the fact the user *was* a member of the
+            # room but got, say, state reset out of it.
+            if to_delete or to_insert:
+                txn.executemany(
+                    "DELETE FROM local_current_membership"
+                    " WHERE room_id = ? AND user_id = ?",
+                    (
+                        (room_id, state_key)
+                        for etype, state_key in itertools.chain(to_delete, to_insert)
+                        if etype == EventTypes.Member and self.is_mine_id(state_key)
+                    ),
+                )
+
+            if to_insert:
+                txn.executemany(
+                    """INSERT INTO local_current_membership
+                        (room_id, user_id, event_id, membership)
+                    VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
+                    """,
+                    [
+                        (room_id, key[1], ev_id, ev_id)
+                        for key, ev_id in to_insert.items()
+                        if key[0] == EventTypes.Member and self.is_mine_id(key[1])
+                    ],
+                )
+
             txn.call_after(
                 self._curr_state_delta_stream_cache.entity_has_changed,
                 room_id,
@@ -1724,6 +1753,7 @@ class EventsStore(
             "local_invites",
             "room_account_data",
             "room_tags",
+            "local_current_membership",
         ):
             logger.info("[purge] removing %s from %s", room_id, table)
             txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 70ff5751b6..9acef7c950 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -297,19 +297,22 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         return {row[0]: row[1] for row in txn}
 
     @cached()
-    def get_invited_rooms_for_user(self, user_id):
-        """ Get all the rooms the user is invited to
+    def get_invited_rooms_for_local_user(self, user_id):
+        """ Get all the rooms the *local* user is invited to
+
         Args:
             user_id (str): The user ID.
         Returns:
             A deferred list of RoomsForUser.
         """
 
-        return self.get_rooms_for_user_where_membership_is(user_id, [Membership.INVITE])
+        return self.get_rooms_for_local_user_where_membership_is(
+            user_id, [Membership.INVITE]
+        )
 
     @defer.inlineCallbacks
-    def get_invite_for_user_in_room(self, user_id, room_id):
-        """Gets the invite for the given user and room
+    def get_invite_for_local_user_in_room(self, user_id, room_id):
+        """Gets the invite for the given *local* user and room
 
         Args:
             user_id (str)
@@ -319,15 +322,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             Deferred: Resolves to either a RoomsForUser or None if no invite was
                 found.
         """
-        invites = yield self.get_invited_rooms_for_user(user_id)
+        invites = yield self.get_invited_rooms_for_local_user(user_id)
         for invite in invites:
             if invite.room_id == room_id:
                 return invite
         return None
 
     @defer.inlineCallbacks
-    def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
-        """ Get all the rooms for this user where the membership for this user
+    def get_rooms_for_local_user_where_membership_is(self, user_id, membership_list):
+        """ Get all the rooms for this *local* user where the membership for this user
         matches one in the membership list.
 
         Filters out forgotten rooms.
@@ -344,8 +347,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             return defer.succeed(None)
 
         rooms = yield self.db.runInteraction(
-            "get_rooms_for_user_where_membership_is",
-            self._get_rooms_for_user_where_membership_is_txn,
+            "get_rooms_for_local_user_where_membership_is",
+            self._get_rooms_for_local_user_where_membership_is_txn,
             user_id,
             membership_list,
         )
@@ -354,76 +357,42 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id)
         return [room for room in rooms if room.room_id not in forgotten_rooms]
 
-    def _get_rooms_for_user_where_membership_is_txn(
+    def _get_rooms_for_local_user_where_membership_is_txn(
         self, txn, user_id, membership_list
     ):
+        # Paranoia check.
+        if not self.hs.is_mine_id(user_id):
+            raise Exception(
+                "Cannot call 'get_rooms_for_local_user_where_membership_is' on non-local user %r"
+                % (user_id,),
+            )
 
-        do_invite = Membership.INVITE in membership_list
-        membership_list = [m for m in membership_list if m != Membership.INVITE]
-
-        results = []
-        if membership_list:
-            if self._current_state_events_membership_up_to_date:
-                clause, args = make_in_list_sql_clause(
-                    self.database_engine, "c.membership", membership_list
-                )
-                sql = """
-                    SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
-                    FROM current_state_events AS c
-                    INNER JOIN events AS e USING (room_id, event_id)
-                    WHERE
-                        c.type = 'm.room.member'
-                        AND state_key = ?
-                        AND %s
-                """ % (
-                    clause,
-                )
-            else:
-                clause, args = make_in_list_sql_clause(
-                    self.database_engine, "m.membership", membership_list
-                )
-                sql = """
-                    SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering
-                    FROM current_state_events AS c
-                    INNER JOIN room_memberships AS m USING (room_id, event_id)
-                    INNER JOIN events AS e USING (room_id, event_id)
-                    WHERE
-                        c.type = 'm.room.member'
-                        AND state_key = ?
-                        AND %s
-                """ % (
-                    clause,
-                )
-
-            txn.execute(sql, (user_id, *args))
-            results = [RoomsForUser(**r) for r in self.db.cursor_to_dict(txn)]
+        clause, args = make_in_list_sql_clause(
+            self.database_engine, "c.membership", membership_list
+        )
 
-        if do_invite:
-            sql = (
-                "SELECT i.room_id, inviter, i.event_id, e.stream_ordering"
-                " FROM local_invites as i"
-                " INNER JOIN events as e USING (event_id)"
-                " WHERE invitee = ? AND locally_rejected is NULL"
-                " AND replaced_by is NULL"
-            )
+        sql = """
+            SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
+            FROM local_current_membership AS c
+            INNER JOIN events AS e USING (room_id, event_id)
+            WHERE
+                user_id = ?
+                AND %s
+        """ % (
+            clause,
+        )
 
-            txn.execute(sql, (user_id,))
-            results.extend(
-                RoomsForUser(
-                    room_id=r["room_id"],
-                    sender=r["inviter"],
-                    event_id=r["event_id"],
-                    stream_ordering=r["stream_ordering"],
-                    membership=Membership.INVITE,
-                )
-                for r in self.db.cursor_to_dict(txn)
-            )
+        txn.execute(sql, (user_id, *args))
+        results = [RoomsForUser(**r) for r in self.db.cursor_to_dict(txn)]
 
         return results
 
-    @cachedInlineCallbacks(max_entries=500000, iterable=True)
+    @cached(max_entries=500000, iterable=True)
     def get_rooms_for_user_with_stream_ordering(self, user_id):
-        """Returns a set of room_ids the user is currently joined to
+        """Returns a set of room_ids the user is currently joined to.
+
+        If a remote user only returns rooms this server is currently
+        participating in.
 
         Args:
             user_id (str)
@@ -433,17 +402,49 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             the rooms the user is in currently, along with the stream ordering
             of the most recent join for that user and room.
         """
-        rooms = yield self.get_rooms_for_user_where_membership_is(
-            user_id, membership_list=[Membership.JOIN]
-        )
-        return frozenset(
-            GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering)
-            for r in rooms
+        return self.db.runInteraction(
+            "get_rooms_for_user_with_stream_ordering",
+            self._get_rooms_for_user_with_stream_ordering_txn,
+            user_id,
         )
 
+    def _get_rooms_for_user_with_stream_ordering_txn(self, txn, user_id):
+        # We use `current_state_events` here and not `local_current_membership`
+        # as a) this gets called with remote users and b) this only gets called
+        # for rooms the server is participating in.
+        if self._current_state_events_membership_up_to_date:
+            sql = """
+                SELECT room_id, e.stream_ordering
+                FROM current_state_events AS c
+                INNER JOIN events AS e USING (room_id, event_id)
+                WHERE
+                    c.type = 'm.room.member'
+                    AND state_key = ?
+                    AND c.membership = ?
+            """
+        else:
+            sql = """
+                SELECT room_id, e.stream_ordering
+                FROM current_state_events AS c
+                INNER JOIN room_memberships AS m USING (room_id, event_id)
+                INNER JOIN events AS e USING (room_id, event_id)
+                WHERE
+                    c.type = 'm.room.member'
+                    AND state_key = ?
+                    AND m.membership = ?
+            """
+
+        txn.execute(sql, (user_id, Membership.JOIN))
+        results = frozenset(GetRoomsForUserWithStreamOrdering(*row) for row in txn)
+
+        return results
+
     @defer.inlineCallbacks
     def get_rooms_for_user(self, user_id, on_invalidate=None):
-        """Returns a set of room_ids the user is currently joined to
+        """Returns a set of room_ids the user is currently joined to.
+
+        If a remote user only returns rooms this server is currently
+        participating in.
         """
         rooms = yield self.get_rooms_for_user_with_stream_ordering(
             user_id, on_invalidate=on_invalidate
@@ -1022,7 +1023,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
                 event.internal_metadata.stream_ordering,
             )
             txn.call_after(
-                self.get_invited_rooms_for_user.invalidate, (event.state_key,)
+                self.get_invited_rooms_for_local_user.invalidate, (event.state_key,)
             )
 
             # We update the local_invites table only if the event is "current",
@@ -1064,6 +1065,27 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
                         ),
                     )
 
+                # We also update the `local_current_membership` table with
+                # latest invite info. This will usually get updated by the
+                # `current_state_events` handling, unless its an outlier.
+                if event.internal_metadata.is_outlier():
+                    # This should only happen for out of band memberships, so
+                    # we add a paranoia check.
+                    assert event.internal_metadata.is_out_of_band_membership()
+
+                    self.db.simple_upsert_txn(
+                        txn,
+                        table="local_current_membership",
+                        keyvalues={
+                            "room_id": event.room_id,
+                            "user_id": event.state_key,
+                        },
+                        values={
+                            "event_id": event.event_id,
+                            "membership": event.membership,
+                        },
+                    )
+
     @defer.inlineCallbacks
     def locally_reject_invite(self, user_id, room_id):
         sql = (
@@ -1075,6 +1097,15 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
         def f(txn, stream_ordering):
             txn.execute(sql, (stream_ordering, True, room_id, user_id))
 
+            # We also clear this entry from `local_current_membership`.
+            # Ideally we'd point to a leave event, but we don't have one, so
+            # nevermind.
+            self.db.simple_delete_txn(
+                txn,
+                table="local_current_membership",
+                keyvalues={"room_id": room_id, "user_id": user_id},
+            )
+
         with self._stream_id_gen.get_next() as stream_ordering:
             yield self.db.runInteraction("locally_reject_invite", f, stream_ordering)
 
diff --git a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
new file mode 100644
index 0000000000..601c236c4a
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# We create a new table called `local_current_membership` that stores the latest
+# membership state of local users in rooms, which helps track leaves/bans/etc
+# even if the server has left the room (and so has deleted the room from
+# `current_state_events`). This will also include outstanding invites for local
+# users for rooms the server isn't in.
+#
+# If the server isn't and hasn't been in the room then it will only include
+# outsstanding invites, and not e.g. pre-emptive bans of local users.
+#
+# If the server later rejoins a room `local_current_membership` can simply be
+# replaced with the new current state of the room (which results in the
+# equivalent behaviour as if the server had remained in the room).
+
+
+def run_upgrade(cur, database_engine, config, *args, **kwargs):
+    # We need to do the insert in `run_upgrade` section as we don't have access
+    # to `config` in `run_create`.
+
+    # This upgrade may take a bit of time for large servers (e.g. one minute for
+    # matrix.org) but means we avoid a lots of book keeping required to do it as
+    # a background update.
+
+    # We check if the `current_state_events.membership` is up to date by
+    # checking if the relevant background update has finished. If it has
+    # finished we can avoid doing a join against `room_memberships`, which
+    # speesd things up.
+    cur.execute(
+        """SELECT 1 FROM background_updates
+            WHERE update_name = 'current_state_events_membership'
+        """
+    )
+    current_state_membership_up_to_date = not bool(cur.fetchone())
+
+    # Cheekily drop and recreate indices, as that is faster.
+    cur.execute("DROP INDEX local_current_membership_idx")
+    cur.execute("DROP INDEX local_current_membership_room_idx")
+
+    if current_state_membership_up_to_date:
+        sql = """
+            INSERT INTO local_current_membership (room_id, user_id, event_id, membership)
+                SELECT c.room_id, state_key AS user_id, event_id, c.membership
+                FROM current_state_events AS c
+                WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key like '%' || ?
+        """
+    else:
+        # We can't rely on the membership column, so we need to join against
+        # `room_memberships`.
+        sql = """
+            INSERT INTO local_current_membership (room_id, user_id, event_id, membership)
+                SELECT c.room_id, state_key AS user_id, event_id, r.membership
+                FROM current_state_events AS c
+                INNER JOIN room_memberships AS r USING (event_id)
+                WHERE type = 'm.room.member' and state_key like '%' || ?
+        """
+    cur.execute(sql, (config.server_name,))
+
+    cur.execute(
+        "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
+    )
+    cur.execute(
+        "CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id)"
+    )
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    cur.execute(
+        """
+        CREATE TABLE local_current_membership (
+            room_id TEXT NOT NULL,
+            user_id TEXT NOT NULL,
+            event_id TEXT NOT NULL,
+            membership TEXT NOT NULL
+        )"""
+    )
+
+    cur.execute(
+        "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
+    )
+    cur.execute(
+        "CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id)"
+    )
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index e70026b80a..e86984cd50 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 56
+SCHEMA_VERSION = 57
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 758ee071a5..4cbe9784ed 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -32,8 +32,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
 
     def test_wait_for_sync_for_user_auth_blocking(self):
 
-        user_id1 = "@user1:server"
-        user_id2 = "@user2:server"
+        user_id1 = "@user1:test"
+        user_id2 = "@user2:test"
         sync_config = self._generate_sync_config(user_id1)
 
         self.reactor.advance(100)  # So we get not 0 time
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index b68e9fe082..b1b037006d 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -115,13 +115,13 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
 
     def test_invites(self):
         self.persist(type="m.room.create", key="", creator=USER_ID)
-        self.check("get_invited_rooms_for_user", [USER_ID_2], [])
+        self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
         event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
 
         self.replicate()
 
         self.check(
-            "get_invited_rooms_for_user",
+            "get_invited_rooms_for_local_user",
             [USER_ID_2],
             [
                 RoomsForUser(
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index 0f51895b81..c3facc00eb 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -285,7 +285,9 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         )
 
         # Make sure the invite is here.
-        pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+        pending_invites = self.get_success(
+            store.get_invited_rooms_for_local_user(invitee_id)
+        )
         self.assertEqual(len(pending_invites), 1, pending_invites)
         self.assertEqual(pending_invites[0].room_id, room_id, pending_invites)
 
@@ -293,12 +295,16 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         self.deactivate(invitee_id, invitee_tok)
 
         # Check that the invite isn't there anymore.
-        pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+        pending_invites = self.get_success(
+            store.get_invited_rooms_for_local_user(invitee_id)
+        )
         self.assertEqual(len(pending_invites), 0, pending_invites)
 
         # Check that the membership of @invitee:test in the room is now "leave".
         memberships = self.get_success(
-            store.get_rooms_for_user_where_membership_is(invitee_id, [Membership.LEAVE])
+            store.get_rooms_for_local_user_where_membership_is(
+                invitee_id, [Membership.LEAVE]
+            )
         )
         self.assertEqual(len(memberships), 1, memberships)
         self.assertEqual(memberships[0].room_id, room_id, memberships)
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 661c1f88b9..9c13a13786 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -15,8 +15,6 @@
 # limitations under the License.
 import json
 
-from mock import Mock
-
 import synapse.rest.admin
 from synapse.api.constants import EventContentFields, EventTypes
 from synapse.rest.client.v1 import login, room
@@ -36,13 +34,6 @@ class FilterTestCase(unittest.HomeserverTestCase):
         sync.register_servlets,
     ]
 
-    def make_homeserver(self, reactor, clock):
-
-        hs = self.setup_test_homeserver(
-            "red", http_client=None, federation_client=Mock()
-        )
-        return hs
-
     def test_sync_argless(self):
         request, channel = self.make_request("GET", "/sync")
         self.render(request)
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 7840f63fe3..00df0ea68e 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -57,7 +57,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
 
         rooms_for_user = self.get_success(
-            self.store.get_rooms_for_user_where_membership_is(
+            self.store.get_rooms_for_local_user_where_membership_is(
                 self.u_alice, [Membership.JOIN]
             )
         )
-- 
cgit 1.4.1


From 8f5d7302acb7f6d15ba7051df7fd7fda7375a29e Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 15 Jan 2020 15:58:55 +0000
Subject: Implement RedirectException (#6687)

Allow REST endpoint implemnentations to raise a RedirectException, which will
redirect the user's browser to a given location.
---
 changelog.d/6687.misc  |  1 +
 synapse/api/errors.py  | 27 ++++++++++++++++-
 synapse/http/server.py | 13 ++++++---
 tests/test_server.py   | 79 ++++++++++++++++++++++++++++++++++++++++++++++++--
 4 files changed, 113 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6687.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6687.misc b/changelog.d/6687.misc
new file mode 100644
index 0000000000..deb0454602
--- /dev/null
+++ b/changelog.d/6687.misc
@@ -0,0 +1 @@
+Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location.
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 9e9844b47c..1c9456e583 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -17,13 +17,15 @@
 """Contains exceptions and error codes."""
 
 import logging
-from typing import Dict
+from typing import Dict, List
 
 from six import iteritems
 from six.moves import http_client
 
 from canonicaljson import json
 
+from twisted.web import http
+
 logger = logging.getLogger(__name__)
 
 
@@ -80,6 +82,29 @@ class CodeMessageException(RuntimeError):
         self.msg = msg
 
 
+class RedirectException(CodeMessageException):
+    """A pseudo-error indicating that we want to redirect the client to a different
+    location
+
+    Attributes:
+        cookies: a list of set-cookies values to add to the response. For example:
+           b"sessionId=a3fWa; Expires=Wed, 21 Oct 2015 07:28:00 GMT"
+    """
+
+    def __init__(self, location: bytes, http_code: int = http.FOUND):
+        """
+
+        Args:
+            location: the URI to redirect to
+            http_code: the HTTP response code
+        """
+        msg = "Redirect to %s" % (location.decode("utf-8"),)
+        super().__init__(code=http_code, msg=msg)
+        self.location = location
+
+        self.cookies = []  # type: List[bytes]
+
+
 class SynapseError(CodeMessageException):
     """A base exception type for matrix errors which have an errcode and error
     message (as well as an HTTP status code).
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 943d12c907..04bc2385a2 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -14,8 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import cgi
 import collections
+import html
 import http.client
 import logging
 import types
@@ -36,6 +36,7 @@ import synapse.metrics
 from synapse.api.errors import (
     CodeMessageException,
     Codes,
+    RedirectException,
     SynapseError,
     UnrecognizedRequestError,
 )
@@ -153,14 +154,18 @@ def _return_html_error(f, request):
 
     Args:
         f (twisted.python.failure.Failure):
-        request (twisted.web.iweb.IRequest):
+        request (twisted.web.server.Request):
     """
     if f.check(CodeMessageException):
         cme = f.value
         code = cme.code
         msg = cme.msg
 
-        if isinstance(cme, SynapseError):
+        if isinstance(cme, RedirectException):
+            logger.info("%s redirect to %s", request, cme.location)
+            request.setHeader(b"location", cme.location)
+            request.cookies.extend(cme.cookies)
+        elif isinstance(cme, SynapseError):
             logger.info("%s SynapseError: %s - %s", request, code, msg)
         else:
             logger.error(
@@ -178,7 +183,7 @@ def _return_html_error(f, request):
             exc_info=(f.type, f.value, f.getTracebackObject()),
         )
 
-    body = HTML_ERROR_TEMPLATE.format(code=code, msg=cgi.escape(msg)).encode("utf-8")
+    body = HTML_ERROR_TEMPLATE.format(code=code, msg=html.escape(msg)).encode("utf-8")
     request.setResponseCode(code)
     request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
     request.setHeader(b"Content-Length", b"%i" % (len(body),))
diff --git a/tests/test_server.py b/tests/test_server.py
index 98fef21d55..0d57eed268 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -23,8 +23,12 @@ from twisted.test.proto_helpers import AccumulatingProtocol
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
-from synapse.api.errors import Codes, SynapseError
-from synapse.http.server import JsonResource
+from synapse.api.errors import Codes, RedirectException, SynapseError
+from synapse.http.server import (
+    DirectServeResource,
+    JsonResource,
+    wrap_html_request_handler,
+)
 from synapse.http.site import SynapseSite, logger
 from synapse.logging.context import make_deferred_yieldable
 from synapse.util import Clock
@@ -164,6 +168,77 @@ class JsonResourceTests(unittest.TestCase):
         self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
 
 
+class WrapHtmlRequestHandlerTests(unittest.TestCase):
+    class TestResource(DirectServeResource):
+        callback = None
+
+        @wrap_html_request_handler
+        async def _async_render_GET(self, request):
+            return await self.callback(request)
+
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+
+    def test_good_response(self):
+        def callback(request):
+            request.write(b"response")
+            request.finish()
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"200")
+        body = channel.result["body"]
+        self.assertEqual(body, b"response")
+
+    def test_redirect_exception(self):
+        """
+        If the callback raises a RedirectException, it is turned into a 30x
+        with the right location.
+        """
+
+        def callback(request, **kwargs):
+            raise RedirectException(b"/look/an/eagle", 301)
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"301")
+        headers = channel.result["headers"]
+        location_headers = [v for k, v in headers if k == b"Location"]
+        self.assertEqual(location_headers, [b"/look/an/eagle"])
+
+    def test_redirect_exception_with_cookie(self):
+        """
+        If the callback raises a RedirectException which sets a cookie, that is
+        returned too
+        """
+
+        def callback(request, **kwargs):
+            e = RedirectException(b"/no/over/there", 304)
+            e.cookies.append(b"session=yespls")
+            raise e
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"304")
+        headers = channel.result["headers"]
+        location_headers = [v for k, v in headers if k == b"Location"]
+        self.assertEqual(location_headers, [b"/no/over/there"])
+        cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
+        self.assertEqual(cookies_headers, [b"session=yespls"])
+
+
 class SiteTestCase(unittest.HomeserverTestCase):
     def test_lose_connection(self):
         """
-- 
cgit 1.4.1


From edc244eec429d587eee28e336e0baae9f9de0e0a Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 15 Jan 2020 18:05:18 +0000
Subject: Remove duplicate session check in web fallback servlet (#6702)

---
 changelog.d/6702.misc                | 1 +
 synapse/rest/client/v2_alpha/auth.py | 4 ----
 2 files changed, 1 insertion(+), 4 deletions(-)
 create mode 100644 changelog.d/6702.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6702.misc b/changelog.d/6702.misc
new file mode 100644
index 0000000000..f7bc98409c
--- /dev/null
+++ b/changelog.d/6702.misc
@@ -0,0 +1 @@
+Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint.
\ No newline at end of file
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 7a256b6ecb..50e080673b 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -206,10 +206,6 @@ class AuthRestServlet(RestServlet):
 
             return None
         elif stagetype == LoginType.TERMS:
-            if ("session" not in request.args or len(request.args["session"])) == 0:
-                raise SynapseError(400, "No session supplied")
-
-            session = request.args["session"][0]
             authdict = {"session": session}
 
             success = await self.auth_handler.add_oob_auth(
-- 
cgit 1.4.1


From 19a1aac48cc83fe41287a97bb0a96280a0e8c565 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 15 Jan 2020 18:13:47 +0000
Subject: Fix purge_room admin API (#6711)

---
 changelog.d/6711.bugfix         | 1 +
 synapse/storage/purge_events.py | 2 +-
 tests/rest/admin/test_admin.py  | 4 +---
 3 files changed, 3 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6711.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6711.bugfix b/changelog.d/6711.bugfix
new file mode 100644
index 0000000000..c70506bd88
--- /dev/null
+++ b/changelog.d/6711.bugfix
@@ -0,0 +1 @@
+Fix `purge_room` admin API.
diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py
index d6a7bd7834..fdc0abf5cf 100644
--- a/synapse/storage/purge_events.py
+++ b/synapse/storage/purge_events.py
@@ -34,7 +34,7 @@ class PurgeEventsStorage(object):
         """
 
         state_groups_to_delete = yield self.stores.main.purge_room(room_id)
-        yield self.stores.main.purge_room_state(room_id, state_groups_to_delete)
+        yield self.stores.state.purge_room_state(room_id, state_groups_to_delete)
 
     @defer.inlineCallbacks
     def purge_history(self, room_id, token, delete_local_events):
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 7a7e898843..f3b4a31e21 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -337,7 +337,7 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
             "local_invites",
             "room_account_data",
             "room_tags",
-            "state_groups",
+            # "state_groups",  # Current impl leaves orphaned state groups around.
             "state_groups_state",
         ):
             count = self.get_success(
@@ -351,8 +351,6 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
 
             self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
 
-    test_purge_room.skip = "Disabled because it's currently broken"
-
 
 class QuarantineMediaTestCase(unittest.HomeserverTestCase):
     """Test /quarantine_media admin API.
-- 
cgit 1.4.1


From 914e73cdd9053d6fd050e5ad04910db74a7b5cd9 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 15 Jan 2020 19:36:19 +0000
Subject: Changelog

---
 changelog.d/6713.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6713.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6713.bugfix b/changelog.d/6713.bugfix
new file mode 100644
index 0000000000..3924f1ad79
--- /dev/null
+++ b/changelog.d/6713.bugfix
@@ -0,0 +1 @@
+Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies.
-- 
cgit 1.4.1


From 48e57a6452be3fef4372832f9e8f8f630325a648 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 15 Jan 2020 19:40:46 +0000
Subject: Rename changelog

---
 changelog.d/6713.bugfix | 1 -
 changelog.d/6714.bugfix | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
 delete mode 100644 changelog.d/6713.bugfix
 create mode 100644 changelog.d/6714.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6713.bugfix b/changelog.d/6713.bugfix
deleted file mode 100644
index 3924f1ad79..0000000000
--- a/changelog.d/6713.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies.
diff --git a/changelog.d/6714.bugfix b/changelog.d/6714.bugfix
new file mode 100644
index 0000000000..3924f1ad79
--- /dev/null
+++ b/changelog.d/6714.bugfix
@@ -0,0 +1 @@
+Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies.
-- 
cgit 1.4.1


From 48c3a96886de64f3141ad68b8163cd2fc0c197ff Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 16 Jan 2020 09:16:12 +0000
Subject: Port synapse.replication.tcp to async/await (#6666)

* Port synapse.replication.tcp to async/await

* Newsfile

* Correctly document type of on_ functions as async

* Don't be overenthusiastic with the asyncing....
---
 changelog.d/6666.misc                     |  1 +
 synapse/app/admin_cmd.py                  |  3 +-
 synapse/app/appservice.py                 |  5 +--
 synapse/app/federation_sender.py          |  5 +--
 synapse/app/pusher.py                     |  5 +--
 synapse/app/synchrotron.py                |  5 +--
 synapse/app/user_dir.py                   |  5 +--
 synapse/federation/send_queue.py          |  4 +-
 synapse/handlers/typing.py                |  2 +-
 synapse/replication/tcp/client.py         | 11 ++---
 synapse/replication/tcp/protocol.py       | 72 ++++++++++++++-----------------
 synapse/replication/tcp/resource.py       | 31 ++++++-------
 synapse/replication/tcp/streams/_base.py  | 25 +++++------
 synapse/replication/tcp/streams/events.py |  9 ++--
 tests/replication/tcp/streams/_base.py    |  2 +-
 15 files changed, 80 insertions(+), 105 deletions(-)
 create mode 100644 changelog.d/6666.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6666.misc b/changelog.d/6666.misc
new file mode 100644
index 0000000000..e79c23d2d2
--- /dev/null
+++ b/changelog.d/6666.misc
@@ -0,0 +1 @@
+Port `synapse.replication.tcp` to async/await.
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 8e36bc57d3..1c7c6ec0c8 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -84,8 +84,7 @@ class AdminCmdServer(HomeServer):
 
 
 class AdminCmdReplicationHandler(ReplicationClientHandler):
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         pass
 
     def get_streams_to_replicate(self):
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index e82e0f11e3..2217d4a4fb 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -115,9 +115,8 @@ class ASReplicationHandler(ReplicationClientHandler):
         super(ASReplicationHandler, self).__init__(hs.get_datastore())
         self.appservice_handler = hs.get_application_service_handler()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
 
         if stream_name == "events":
             max_stream_id = self.store.get_room_max_stream_ordering()
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 83c436229c..a57cf991ac 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -145,9 +145,8 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
         super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
         self.send_handler = FederationSenderHandler(hs, self)
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(FederationSenderReplicationHandler, self).on_rdata(
+    async def on_rdata(self, stream_name, token, rows):
+        await super(FederationSenderReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         self.send_handler.process_replication_rows(stream_name, token, rows)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 09e639040a..e46b6ac598 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -141,9 +141,8 @@ class PusherReplicationHandler(ReplicationClientHandler):
 
         self.pusher_pool = hs.get_pusherpool()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.poke_pushers, stream_name, token, rows)
 
     @defer.inlineCallbacks
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 03031ee34d..3218da07bd 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -358,9 +358,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
         self.presence_handler = hs.get_presence_handler()
         self.notifier = hs.get_notifier()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.process_and_notify, stream_name, token, rows)
 
     def get_streams_to_replicate(self):
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index 1257098f92..ba536d6f04 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -172,9 +172,8 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
         super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
         self.user_directory = hs.get_user_directory_handler()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(UserDirectoryReplicationHandler, self).on_rdata(
+    async def on_rdata(self, stream_name, token, rows):
+        await super(UserDirectoryReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         if stream_name == EventsStream.NAME:
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index ced4925a98..174f6e42be 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -259,7 +259,9 @@ class FederationRemoteSendQueue(object):
     def federation_ack(self, token):
         self._clear_queue_before_pos(token)
 
-    def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
+    async def get_replication_rows(
+        self, from_token, to_token, limit, federation_ack=None
+    ):
         """Get rows to be sent over federation between the two tokens
 
         Args:
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index b635c339ed..d5ca9cb07b 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -257,7 +257,7 @@ class TypingHandler(object):
             "typing_key", self._latest_room_serial, rooms=[member.room_id]
         )
 
-    def get_all_typing_updates(self, last_id, current_id):
+    async def get_all_typing_updates(self, last_id, current_id):
         if last_id == current_id:
             return []
 
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index aa7fd90e26..52a0aefe68 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -110,7 +110,7 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         port = hs.config.worker_replication_port
         hs.get_reactor().connectTCP(host, port, self.factory)
 
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         """Called to handle a batch of replication data with a given stream token.
 
         By default this just pokes the slave store. Can be overridden in subclasses to
@@ -121,20 +121,17 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
             token (int): stream token for this batch of rows
             rows (list): a list of Stream.ROW_TYPE objects as returned by
                 Stream.parse_row.
-
-        Returns:
-            Deferred|None
         """
         logger.debug("Received rdata %s -> %s", stream_name, token)
-        return self.store.process_replication_rows(stream_name, token, rows)
+        self.store.process_replication_rows(stream_name, token, rows)
 
-    def on_position(self, stream_name, token):
+    async def on_position(self, stream_name, token):
         """Called when we get new position data. By default this just pokes
         the slave store.
 
         Can be overriden in subclasses to handle more.
         """
-        return self.store.process_replication_rows(stream_name, token, [])
+        self.store.process_replication_rows(stream_name, token, [])
 
     def on_sync(self, data):
         """When we received a SYNC we wake up any deferreds that were waiting
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index db0353c996..5f4bdf84d2 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -81,12 +81,11 @@ from synapse.replication.tcp.commands import (
     SyncCommand,
     UserSyncCommand,
 )
+from synapse.replication.tcp.streams import STREAMS_MAP
 from synapse.types import Collection
 from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
-from .streams import STREAMS_MAP
-
 connection_close_counter = Counter(
     "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"]
 )
@@ -241,19 +240,16 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
             "replication-" + cmd.get_logcontext_id(), self.handle_command, cmd
         )
 
-    def handle_command(self, cmd):
+    async def handle_command(self, cmd: Command):
         """Handle a command we have received over the replication stream.
 
-        By default delegates to on_
+        By default delegates to on_, which should return an awaitable.
 
         Args:
-            cmd (synapse.replication.tcp.commands.Command): received command
-
-        Returns:
-            Deferred
+            cmd: received command
         """
         handler = getattr(self, "on_%s" % (cmd.NAME,))
-        return handler(cmd)
+        await handler(cmd)
 
     def close(self):
         logger.warning("[%s] Closing connection", self.id())
@@ -326,10 +322,10 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         for cmd in pending:
             self.send_command(cmd)
 
-    def on_PING(self, line):
+    async def on_PING(self, line):
         self.received_ping = True
 
-    def on_ERROR(self, cmd):
+    async def on_ERROR(self, cmd):
         logger.error("[%s] Remote reported error: %r", self.id(), cmd.data)
 
     def pauseProducing(self):
@@ -429,16 +425,16 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         BaseReplicationStreamProtocol.connectionMade(self)
         self.streamer.new_connection(self)
 
-    def on_NAME(self, cmd):
+    async def on_NAME(self, cmd):
         logger.info("[%s] Renamed to %r", self.id(), cmd.data)
         self.name = cmd.data
 
-    def on_USER_SYNC(self, cmd):
-        return self.streamer.on_user_sync(
+    async def on_USER_SYNC(self, cmd):
+        await self.streamer.on_user_sync(
             self.conn_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms
         )
 
-    def on_REPLICATE(self, cmd):
+    async def on_REPLICATE(self, cmd):
         stream_name = cmd.stream_name
         token = cmd.token
 
@@ -449,23 +445,23 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
                 for stream in iterkeys(self.streamer.streams_by_name)
             ]
 
-            return make_deferred_yieldable(
+            await make_deferred_yieldable(
                 defer.gatherResults(deferreds, consumeErrors=True)
             )
         else:
-            return self.subscribe_to_stream(stream_name, token)
+            await self.subscribe_to_stream(stream_name, token)
 
-    def on_FEDERATION_ACK(self, cmd):
-        return self.streamer.federation_ack(cmd.token)
+    async def on_FEDERATION_ACK(self, cmd):
+        self.streamer.federation_ack(cmd.token)
 
-    def on_REMOVE_PUSHER(self, cmd):
-        return self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
+    async def on_REMOVE_PUSHER(self, cmd):
+        await self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
 
-    def on_INVALIDATE_CACHE(self, cmd):
-        return self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
+    async def on_INVALIDATE_CACHE(self, cmd):
+        self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
 
-    def on_USER_IP(self, cmd):
-        return self.streamer.on_user_ip(
+    async def on_USER_IP(self, cmd):
+        self.streamer.on_user_ip(
             cmd.user_id,
             cmd.access_token,
             cmd.ip,
@@ -474,8 +470,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
             cmd.last_seen,
         )
 
-    @defer.inlineCallbacks
-    def subscribe_to_stream(self, stream_name, token):
+    async def subscribe_to_stream(self, stream_name, token):
         """Subscribe the remote to a stream.
 
         This invloves checking if they've missed anything and sending those
@@ -487,7 +482,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
 
         try:
             # Get missing updates
-            updates, current_token = yield self.streamer.get_stream_updates(
+            updates, current_token = await self.streamer.get_stream_updates(
                 stream_name, token
             )
 
@@ -572,7 +567,7 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
     """
 
     @abc.abstractmethod
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         """Called to handle a batch of replication data with a given stream token.
 
         Args:
@@ -580,14 +575,11 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
             token (int): stream token for this batch of rows
             rows (list): a list of Stream.ROW_TYPE objects as returned by
                 Stream.parse_row.
-
-        Returns:
-            Deferred|None
         """
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def on_position(self, stream_name, token):
+    async def on_position(self, stream_name, token):
         """Called when we get new position data."""
         raise NotImplementedError()
 
@@ -676,12 +668,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         if not self.streams_connecting:
             self.handler.finished_connecting()
 
-    def on_SERVER(self, cmd):
+    async def on_SERVER(self, cmd):
         if cmd.data != self.server_name:
             logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
             self.send_error("Wrong remote")
 
-    def on_RDATA(self, cmd):
+    async def on_RDATA(self, cmd):
         stream_name = cmd.stream_name
         inbound_rdata_count.labels(stream_name).inc()
 
@@ -701,19 +693,19 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             # Check if this is the last of a batch of updates
             rows = self.pending_batches.pop(stream_name, [])
             rows.append(row)
-            return self.handler.on_rdata(stream_name, cmd.token, rows)
+            await self.handler.on_rdata(stream_name, cmd.token, rows)
 
-    def on_POSITION(self, cmd):
+    async def on_POSITION(self, cmd):
         # When we get a `POSITION` command it means we've finished getting
         # missing updates for the given stream, and are now up to date.
         self.streams_connecting.discard(cmd.stream_name)
         if not self.streams_connecting:
             self.handler.finished_connecting()
 
-        return self.handler.on_position(cmd.stream_name, cmd.token)
+        await self.handler.on_position(cmd.stream_name, cmd.token)
 
-    def on_SYNC(self, cmd):
-        return self.handler.on_sync(cmd.data)
+    async def on_SYNC(self, cmd):
+        self.handler.on_sync(cmd.data)
 
     def replicate(self, stream_name, token):
         """Send the subscription request to the server
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index cbfdaf5773..b1752e88cd 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -23,7 +23,6 @@ from six import itervalues
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
 from twisted.internet.protocol import Factory
 
 from synapse.metrics import LaterGauge
@@ -155,8 +154,7 @@ class ReplicationStreamer(object):
 
         run_as_background_process("replication_notifier", self._run_notifier_loop)
 
-    @defer.inlineCallbacks
-    def _run_notifier_loop(self):
+    async def _run_notifier_loop(self):
         self.is_looping = True
 
         try:
@@ -185,7 +183,7 @@ class ReplicationStreamer(object):
                             continue
 
                         if self._replication_torture_level:
-                            yield self.clock.sleep(
+                            await self.clock.sleep(
                                 self._replication_torture_level / 1000.0
                             )
 
@@ -196,7 +194,7 @@ class ReplicationStreamer(object):
                             stream.upto_token,
                         )
                         try:
-                            updates, current_token = yield stream.get_updates()
+                            updates, current_token = await stream.get_updates()
                         except Exception:
                             logger.info("Failed to handle stream %s", stream.NAME)
                             raise
@@ -233,7 +231,7 @@ class ReplicationStreamer(object):
             self.is_looping = False
 
     @measure_func("repl.get_stream_updates")
-    def get_stream_updates(self, stream_name, token):
+    async def get_stream_updates(self, stream_name, token):
         """For a given stream get all updates since token. This is called when
         a client first subscribes to a stream.
         """
@@ -241,7 +239,7 @@ class ReplicationStreamer(object):
         if not stream:
             raise Exception("unknown stream %s", stream_name)
 
-        return stream.get_updates_since(token)
+        return await stream.get_updates_since(token)
 
     @measure_func("repl.federation_ack")
     def federation_ack(self, token):
@@ -252,22 +250,20 @@ class ReplicationStreamer(object):
             self.federation_sender.federation_ack(token)
 
     @measure_func("repl.on_user_sync")
-    @defer.inlineCallbacks
-    def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms):
+    async def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms):
         """A client has started/stopped syncing on a worker.
         """
         user_sync_counter.inc()
-        yield self.presence_handler.update_external_syncs_row(
+        await self.presence_handler.update_external_syncs_row(
             conn_id, user_id, is_syncing, last_sync_ms
         )
 
     @measure_func("repl.on_remove_pusher")
-    @defer.inlineCallbacks
-    def on_remove_pusher(self, app_id, push_key, user_id):
+    async def on_remove_pusher(self, app_id, push_key, user_id):
         """A client has asked us to remove a pusher
         """
         remove_pusher_counter.inc()
-        yield self.store.delete_pusher_by_app_id_pushkey_user_id(
+        await self.store.delete_pusher_by_app_id_pushkey_user_id(
             app_id=app_id, pushkey=push_key, user_id=user_id
         )
 
@@ -281,15 +277,16 @@ class ReplicationStreamer(object):
         getattr(self.store, cache_func).invalidate(tuple(keys))
 
     @measure_func("repl.on_user_ip")
-    @defer.inlineCallbacks
-    def on_user_ip(self, user_id, access_token, ip, user_agent, device_id, last_seen):
+    async def on_user_ip(
+        self, user_id, access_token, ip, user_agent, device_id, last_seen
+    ):
         """The client saw a user request
         """
         user_ip_cache_counter.inc()
-        yield self.store.insert_client_ip(
+        await self.store.insert_client_ip(
             user_id, access_token, ip, user_agent, device_id, last_seen
         )
-        yield self._server_notices_sender.on_user_ip(user_id)
+        await self._server_notices_sender.on_user_ip(user_id)
 
     def send_sync_to_all_connections(self, data):
         """Sends a SYNC command to all clients.
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 4ab0334fc1..e03e77199b 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -19,8 +19,6 @@ import logging
 from collections import namedtuple
 from typing import Any
 
-from twisted.internet import defer
-
 logger = logging.getLogger(__name__)
 
 
@@ -144,8 +142,7 @@ class Stream(object):
         self.upto_token = self.current_token()
         self.last_token = self.upto_token
 
-    @defer.inlineCallbacks
-    def get_updates(self):
+    async def get_updates(self):
         """Gets all updates since the last time this function was called (or
         since the stream was constructed if it hadn't been called before),
         until the `upto_token`
@@ -156,13 +153,12 @@ class Stream(object):
                 list of ``(token, row)`` entries. ``row`` will be json-serialised and
                 sent over the replication steam.
         """
-        updates, current_token = yield self.get_updates_since(self.last_token)
+        updates, current_token = await self.get_updates_since(self.last_token)
         self.last_token = current_token
 
         return updates, current_token
 
-    @defer.inlineCallbacks
-    def get_updates_since(self, from_token):
+    async def get_updates_since(self, from_token):
         """Like get_updates except allows specifying from when we should
         stream updates
 
@@ -182,15 +178,16 @@ class Stream(object):
         if from_token == current_token:
             return [], current_token
 
+        logger.info("get_updates_since: %s", self.__class__)
         if self._LIMITED:
-            rows = yield self.update_function(
+            rows = await self.update_function(
                 from_token, current_token, limit=MAX_EVENTS_BEHIND + 1
             )
 
             # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
             rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
         else:
-            rows = yield self.update_function(from_token, current_token)
+            rows = await self.update_function(from_token, current_token)
 
         updates = [(row[0], row[1:]) for row in rows]
 
@@ -295,9 +292,8 @@ class PushRulesStream(Stream):
         push_rules_token, _ = self.store.get_push_rules_stream_token()
         return push_rules_token
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, to_token, limit):
-        rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit)
+    async def update_function(self, from_token, to_token, limit):
+        rows = await self.store.get_all_push_rule_updates(from_token, to_token, limit)
         return [(row[0], row[2]) for row in rows]
 
 
@@ -413,9 +409,8 @@ class AccountDataStream(Stream):
 
         super(AccountDataStream, self).__init__(hs)
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, to_token, limit):
-        global_results, room_results = yield self.store.get_all_updated_account_data(
+    async def update_function(self, from_token, to_token, limit):
+        global_results, room_results = await self.store.get_all_updated_account_data(
             from_token, from_token, to_token, limit
         )
 
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index 0843e5aa90..b3afabb8cd 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -19,8 +19,6 @@ from typing import Tuple, Type
 
 import attr
 
-from twisted.internet import defer
-
 from ._base import Stream
 
 
@@ -122,16 +120,15 @@ class EventsStream(Stream):
 
         super(EventsStream, self).__init__(hs)
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, current_token, limit=None):
-        event_rows = yield self._store.get_all_new_forward_event_rows(
+    async def update_function(self, from_token, current_token, limit=None):
+        event_rows = await self._store.get_all_new_forward_event_rows(
             from_token, current_token, limit
         )
         event_updates = (
             (row[0], EventsStreamEventRow.TypeId, row[1:]) for row in event_rows
         )
 
-        state_rows = yield self._store.get_all_updated_current_state_deltas(
+        state_rows = await self._store.get_all_updated_current_state_deltas(
             from_token, current_token, limit
         )
         state_updates = (
diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py
index 1d14e77255..e96ad4ca4e 100644
--- a/tests/replication/tcp/streams/_base.py
+++ b/tests/replication/tcp/streams/_base.py
@@ -73,6 +73,6 @@ class TestReplicationClientHandler(object):
     def finished_connecting(self):
         pass
 
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         for r in rows:
             self.received_rdata_rows.append((stream_name, token, r))
-- 
cgit 1.4.1


From 38e0e59f42de03b25ce84a95a578a8cdbe75ceb4 Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Thu, 16 Jan 2020 09:46:14 +0000
Subject: Add org.matrix.e2e_cross_signing to unstable_features in /versions as
 per MSC1756 (#6712)

---
 changelog.d/6712.feature        | 1 +
 synapse/rest/client/versions.py | 2 ++
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6712.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6712.feature b/changelog.d/6712.feature
new file mode 100644
index 0000000000..2cce0ecf88
--- /dev/null
+++ b/changelog.d/6712.feature
@@ -0,0 +1 @@
+Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756).
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 2a477ad22e..3d0fefb4df 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -71,6 +71,8 @@ class VersionsRestServlet(RestServlet):
                     # Implements support for label-based filtering as described in
                     # MSC2326.
                     "org.matrix.label_based_filtering": True,
+                    # Implements support for cross signing as described in MSC1756
+                    "org.matrix.e2e_cross_signing": True,
                 },
             },
         )
-- 
cgit 1.4.1


From 7b14c4a0189dde1a6e7e077e2206c61bfa4b8b01 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 16 Jan 2020 09:46:36 +0000
Subject: Add tips for the changelog to the pull request template (#6663)

---
 .github/PULL_REQUEST_TEMPLATE.md | 6 +++++-
 CONTRIBUTING.md                  | 4 ++--
 changelog.d/6663.doc             | 1 +
 3 files changed, 8 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6663.doc

(limited to 'changelog.d')

diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 11fb05ca96..fc22d89426 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,6 +3,10 @@
 
 
 * [ ] Pull request is based on the develop branch
-* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
+* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
+  - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+  - Use markdown where necessary, mostly for `code blocks`.
+  - End with either a period (.) or an exclamation mark (!).
+  - Start with a capital letter.
 * [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
 * [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c0091346f3..5736ede6c4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -101,8 +101,8 @@ in the format of `PRnumber.type`. The type can be one of the following:
 The content of the file is your changelog entry, which should be a short
 description of your change in the same style as the rest of our [changelog](
 https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
-contain Markdown formatting, and should end with a full stop ('.') for
-consistency.
+contain Markdown formatting, and should end with a full stop (.) or an
+exclamation mark (!) for consistency.
 
 Adding credits to the changelog is encouraged, we value your
 contributions and would like to have you shouted out in the release notes!
diff --git a/changelog.d/6663.doc b/changelog.d/6663.doc
new file mode 100644
index 0000000000..83b9c1626a
--- /dev/null
+++ b/changelog.d/6663.doc
@@ -0,0 +1 @@
+Add some helpful tips about changelog entries to the github pull request template.
\ No newline at end of file
-- 
cgit 1.4.1


From d386f2f339c839ff6ec8d656492dd635dc26f811 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 16 Jan 2020 13:31:22 +0000
Subject: Add StateMap type alias (#6715)

---
 changelog.d/6715.misc                              |  1 +
 synapse/api/auth.py                                |  8 +---
 synapse/events/snapshot.py                         | 11 +++--
 synapse/federation/sender/per_destination_queue.py |  3 +-
 synapse/handlers/admin.py                          | 25 ++++-------
 synapse/handlers/federation.py                     | 10 ++---
 synapse/handlers/room.py                           | 24 +++++++---
 synapse/state/__init__.py                          |  5 ++-
 synapse/state/v1.py                                |  5 ++-
 synapse/state/v2.py                                |  9 ++--
 synapse/storage/data_stores/main/state.py          | 11 ++---
 synapse/storage/data_stores/state/store.py         | 52 ++++++++++++----------
 synapse/storage/state.py                           | 35 +++++++++------
 synapse/types.py                                   |  9 +++-
 14 files changed, 115 insertions(+), 93 deletions(-)
 create mode 100644 changelog.d/6715.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6715.misc b/changelog.d/6715.misc
new file mode 100644
index 0000000000..8876b0446d
--- /dev/null
+++ b/changelog.d/6715.misc
@@ -0,0 +1 @@
+Add StateMap type alias to simplify types.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index abbc7079a3..2cbfab2569 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -14,7 +14,6 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, Tuple
 
 from six import itervalues
 
@@ -35,7 +34,7 @@ from synapse.api.errors import (
     ResourceLimitError,
 )
 from synapse.config.server import is_threepid_reserved
-from synapse.types import UserID
+from synapse.types import StateMap, UserID
 from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.metrics import Measure
@@ -509,10 +508,7 @@ class Auth(object):
         return self.store.is_server_admin(user)
 
     def compute_auth_events(
-        self,
-        event,
-        current_state_ids: Dict[Tuple[str, str], str],
-        for_verification: bool = False,
+        self, event, current_state_ids: StateMap[str], for_verification: bool = False,
     ):
         """Given an event and current state return the list of event IDs used
         to auth an event.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index a44baea365..9ea85e93e6 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Dict, Optional, Tuple, Union
+from typing import Optional, Union
 
 from six import iteritems
 
@@ -23,6 +23,7 @@ from twisted.internet import defer
 
 from synapse.appservice import ApplicationService
 from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import StateMap
 
 
 @attr.s(slots=True)
@@ -106,13 +107,11 @@ class EventContext:
     _state_group = attr.ib(default=None, type=Optional[int])
     state_group_before_event = attr.ib(default=None, type=Optional[int])
     prev_group = attr.ib(default=None, type=Optional[int])
-    delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
+    delta_ids = attr.ib(default=None, type=Optional[StateMap[str]])
     app_service = attr.ib(default=None, type=Optional[ApplicationService])
 
-    _current_state_ids = attr.ib(
-        default=None, type=Optional[Dict[Tuple[str, str], str]]
-    )
-    _prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
+    _current_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
+    _prev_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
 
     @staticmethod
     def with_state(
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index a5b36b1827..5012aaea35 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -31,6 +31,7 @@ from synapse.handlers.presence import format_user_presence_state
 from synapse.metrics import sent_transactions_counter
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.presence import UserPresenceState
+from synapse.types import StateMap
 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
 
 # This is defined in the Matrix spec and enforced by the receiver.
@@ -77,7 +78,7 @@ class PerDestinationQueue(object):
         # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
         # based on their key (e.g. typing events by room_id)
         # Map of (edu_type, key) -> Edu
-        self._pending_edus_keyed = {}  # type: dict[tuple[str, str], Edu]
+        self._pending_edus_keyed = {}  # type: StateMap[Edu]
 
         # Map of user_id -> UserPresenceState of pending presence to be sent to this
         # destination
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index a9407553b4..60a7c938bc 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -14,9 +14,11 @@
 # limitations under the License.
 
 import logging
+from typing import List
 
 from synapse.api.constants import Membership
-from synapse.types import RoomStreamToken
+from synapse.events import FrozenEvent
+from synapse.types import RoomStreamToken, StateMap
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
@@ -259,35 +261,26 @@ class ExfiltrationWriter(object):
     """Interface used to specify how to write exported data.
     """
 
-    def write_events(self, room_id, events):
+    def write_events(self, room_id: str, events: List[FrozenEvent]):
         """Write a batch of events for a room.
-
-        Args:
-            room_id (str)
-            events (list[FrozenEvent])
         """
         pass
 
-    def write_state(self, room_id, event_id, state):
+    def write_state(self, room_id: str, event_id: str, state: StateMap[FrozenEvent]):
         """Write the state at the given event in the room.
 
         This only gets called for backward extremities rather than for each
         event.
-
-        Args:
-            room_id (str)
-            event_id (str)
-            state (dict[tuple[str, str], FrozenEvent])
         """
         pass
 
-    def write_invite(self, room_id, event, state):
+    def write_invite(self, room_id: str, event: FrozenEvent, state: StateMap[dict]):
         """Write an invite for the room, with associated invite state.
 
         Args:
-            room_id (str)
-            event (FrozenEvent)
-            state (dict[tuple[str, str], dict]): A subset of the state at the
+            room_id
+            event
+            state: A subset of the state at the
                 invite, with a subset of the event keys (type, state_key
                 content and sender)
         """
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 61b6713c88..d4f9a792fc 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -64,7 +64,7 @@ from synapse.replication.http.federation import (
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
-from synapse.types import UserID, get_domain_from_id
+from synapse.types import StateMap, UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
@@ -89,7 +89,7 @@ class _NewEventInfo:
 
     event = attr.ib(type=EventBase)
     state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
-    auth_events = attr.ib(type=Optional[Dict[Tuple[str, str], EventBase]], default=None)
+    auth_events = attr.ib(type=Optional[StateMap[EventBase]], default=None)
 
 
 def shortstr(iterable, maxitems=5):
@@ -352,9 +352,7 @@ class FederationHandler(BaseHandler):
                     ours = await self.state_store.get_state_groups_ids(room_id, seen)
 
                     # state_maps is a list of mappings from (type, state_key) to event_id
-                    state_maps = list(
-                        ours.values()
-                    )  # type: list[dict[tuple[str, str], str]]
+                    state_maps = list(ours.values())  # type: list[StateMap[str]]
 
                     # we don't need this any more, let's delete it.
                     del ours
@@ -1912,7 +1910,7 @@ class FederationHandler(BaseHandler):
         origin: str,
         event: EventBase,
         state: Optional[Iterable[EventBase]],
-        auth_events: Optional[Dict[Tuple[str, str], EventBase]],
+        auth_events: Optional[StateMap[EventBase]],
         backfilled: bool,
     ):
         """
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 9cab2adbfb..9f50196ea7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -32,7 +32,15 @@ from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, Syna
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.storage.state import StateFilter
-from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
+from synapse.types import (
+    Requester,
+    RoomAlias,
+    RoomID,
+    RoomStreamToken,
+    StateMap,
+    StreamToken,
+    UserID,
+)
 from synapse.util import stringutils
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches.response_cache import ResponseCache
@@ -207,15 +215,19 @@ class RoomCreationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _update_upgraded_room_pls(
-        self, requester, old_room_id, new_room_id, old_room_state,
+        self,
+        requester: Requester,
+        old_room_id: str,
+        new_room_id: str,
+        old_room_state: StateMap[str],
     ):
         """Send updated power levels in both rooms after an upgrade
 
         Args:
-            requester (synapse.types.Requester): the user requesting the upgrade
-            old_room_id (str): the id of the room to be replaced
-            new_room_id (str): the id of the replacement room
-            old_room_state (dict[tuple[str, str], str]): the state map for the old room
+            requester: the user requesting the upgrade
+            old_room_id: the id of the room to be replaced
+            new_room_id: the id of the replacement room
+            old_room_state: the state map for the old room
 
         Returns:
             Deferred
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 5accc071ab..cacd0c0c2b 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,7 +16,7 @@
 
 import logging
 from collections import namedtuple
-from typing import Dict, Iterable, List, Optional, Tuple
+from typing import Dict, Iterable, List, Optional
 
 from six import iteritems, itervalues
 
@@ -33,6 +33,7 @@ from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.types import StateMap
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -594,7 +595,7 @@ def _make_state_cache_entry(new_state, state_groups_ids):
 def resolve_events_with_store(
     room_id: str,
     room_version: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_res_store: "StateResolutionStore",
 ):
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index b2f9865f39..d6c34ce3b7 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -15,7 +15,7 @@
 
 import hashlib
 import logging
-from typing import Callable, Dict, List, Optional, Tuple
+from typing import Callable, Dict, List, Optional
 
 from six import iteritems, iterkeys, itervalues
 
@@ -26,6 +26,7 @@ from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
@@ -36,7 +37,7 @@ POWER_KEY = (EventTypes.PowerLevels, "")
 @defer.inlineCallbacks
 def resolve_events_with_store(
     room_id: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_map_factory: Callable,
 ):
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 72fb8a6317..6216fdd204 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -16,7 +16,7 @@
 import heapq
 import itertools
 import logging
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List, Optional
 
 from six import iteritems, itervalues
 
@@ -27,6 +27,7 @@ from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.events import EventBase
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
@@ -35,7 +36,7 @@ logger = logging.getLogger(__name__)
 def resolve_events_with_store(
     room_id: str,
     room_version: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_res_store: "synapse.state.StateResolutionStore",
 ):
@@ -393,12 +394,12 @@ def _iterative_auth_checks(
         room_id (str)
         room_version (str)
         event_ids (list[str]): Ordered list of events to apply auth checks to
-        base_state (dict[tuple[str, str], str]): The set of state to start with
+        base_state (StateMap[str]): The set of state to start with
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
 
     Returns:
-        Deferred[dict[tuple[str, str], str]]: Returns the final updated state
+        Deferred[StateMap[str]]: Returns the final updated state
     """
     resolved_state = base_state.copy()
 
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index d07440e3ed..33bebd1c48 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -165,19 +165,20 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
     # FIXME: how should this be cached?
-    def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()):
+    def get_filtered_current_state_ids(
+        self, room_id: str, state_filter: StateFilter = StateFilter.all()
+    ):
         """Get the current state event of a given type for a room based on the
         current_state_events table.  This may not be as up-to-date as the result
         of doing a fresh state resolution as per state_handler.get_current_state
 
         Args:
-            room_id (str)
-            state_filter (StateFilter): The state filter used to fetch state
+            room_id
+            state_filter: The state filter used to fetch state
                 from the database.
 
         Returns:
-            Deferred[dict[tuple[str, str], str]]: Map from type/state_key to
-            event ID.
+            defer.Deferred[StateMap[str]]: Map from type/state_key to event ID.
         """
 
         where_clause, where_args = state_filter.make_sql_filter_clause()
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
index d53695f238..c4ee9b7ccb 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/data_stores/state/store.py
@@ -15,6 +15,7 @@
 
 import logging
 from collections import namedtuple
+from typing import Dict, Iterable, List, Set, Tuple
 
 from six import iteritems
 from six.moves import range
@@ -26,6 +27,7 @@ from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
 from synapse.storage.database import Database
 from synapse.storage.state import StateFilter
+from synapse.types import StateMap
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.descriptors import cached
 from synapse.util.caches.dictionary_cache import DictionaryCache
@@ -133,17 +135,18 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         )
 
     @defer.inlineCallbacks
-    def _get_state_groups_from_groups(self, groups, state_filter):
-        """Returns the state groups for a given set of groups, filtering on
-        types of state events.
+    def _get_state_groups_from_groups(
+        self, groups: List[int], state_filter: StateFilter
+    ):
+        """Returns the state groups for a given set of groups from the
+        database, filtering on types of state events.
 
         Args:
-            groups(list[int]): list of state group IDs to query
-            state_filter (StateFilter): The state filter used to fetch state
+            groups: list of state group IDs to query
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
         results = {}
 
@@ -199,18 +202,19 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         return state_filter.filter_state(state_dict_ids), not missing_types
 
     @defer.inlineCallbacks
-    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
+    def _get_state_for_groups(
+        self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
+    ):
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
         Args:
-            groups (iterable[int]): list of state groups for which we want
+            groups: list of state groups for which we want
                 to get the state.
-            state_filter (StateFilter): The state filter used to fetch state
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
 
         member_filter, non_member_filter = state_filter.get_member_split()
@@ -268,24 +272,24 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
         return state
 
-    def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
+    def _get_state_for_groups_using_cache(
+        self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
+    ) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key, querying from a specific cache.
 
         Args:
-            groups (iterable[int]): list of state groups for which we want
-                to get the state.
-            cache (DictionaryCache): the cache of group ids to state dicts which
-                we will pass through - either the normal state cache or the specific
-                members state cache.
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            groups: list of state groups for which we want to get the state.
+            cache: the cache of group ids to state dicts which
+                we will pass through - either the normal state cache or the
+                specific members state cache.
+            state_filter: The state filter used to fetch state from the
+                database.
 
         Returns:
-            tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
-            dict of state_group_id -> (dict of (type, state_key) -> event id)
-            of entries in the cache, and the state group ids either missing
-            from the cache or incomplete.
+            Tuple of dict of state_group_id to state map of entries in the
+            cache, and the state group ids either missing from the cache or
+            incomplete.
         """
         results = {}
         incomplete_groups = set()
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index cbeb586014..c522c80922 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Iterable, List, TypeVar
 
 from six import iteritems, itervalues
 
@@ -22,9 +23,13 @@ import attr
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
+# Used for generic functions below
+T = TypeVar("T")
+
 
 @attr.s(slots=True)
 class StateFilter(object):
@@ -233,14 +238,14 @@ class StateFilter(object):
 
         return len(self.concrete_types())
 
-    def filter_state(self, state_dict):
+    def filter_state(self, state_dict: StateMap[T]) -> StateMap[T]:
         """Returns the state filtered with by this StateFilter
 
         Args:
-            state (dict[tuple[str, str], Any]): The state map to filter
+            state: The state map to filter
 
         Returns:
-            dict[tuple[str, str], Any]: The filtered state map
+            The filtered state map
         """
         if self.is_full():
             return dict(state_dict)
@@ -333,12 +338,12 @@ class StateGroupStorage(object):
     def __init__(self, hs, stores):
         self.stores = stores
 
-    def get_state_group_delta(self, state_group):
+    def get_state_group_delta(self, state_group: int):
         """Given a state group try to return a previous group and a delta between
         the old and the new.
 
         Returns:
-            Deferred[Tuple[Optional[int], Optional[list[dict[tuple[str, str], str]]]]]):
+            Deferred[Tuple[Optional[int], Optional[StateMap[str]]]]:
                 (prev_group, delta_ids)
         """
 
@@ -353,7 +358,7 @@ class StateGroupStorage(object):
             event_ids (iterable[str]): ids of the events
 
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
+            Deferred[dict[int, StateMap[str]]]:
                 dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
         if not event_ids:
@@ -410,17 +415,18 @@ class StateGroupStorage(object):
             for group, event_id_map in iteritems(group_to_ids)
         }
 
-    def _get_state_groups_from_groups(self, groups, state_filter):
+    def _get_state_groups_from_groups(
+        self, groups: List[int], state_filter: StateFilter
+    ):
         """Returns the state groups for a given set of groups, filtering on
         types of state events.
 
         Args:
-            groups(list[int]): list of state group IDs to query
-            state_filter (StateFilter): The state filter used to fetch state
+            groups: list of state group IDs to query
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
 
         return self.stores.state._get_state_groups_from_groups(groups, state_filter)
@@ -519,7 +525,9 @@ class StateGroupStorage(object):
         state_map = yield self.get_state_ids_for_events([event_id], state_filter)
         return state_map[event_id]
 
-    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
+    def _get_state_for_groups(
+        self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
+    ):
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
@@ -529,8 +537,7 @@ class StateGroupStorage(object):
             state_filter (StateFilter): The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[dict[int, StateMap[str]]]: Dict of state group to state map.
         """
         return self.stores.state._get_state_for_groups(groups, state_filter)
 
diff --git a/synapse/types.py b/synapse/types.py
index cd996c0b5a..65e4d8c181 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -17,6 +17,7 @@ import re
 import string
 import sys
 from collections import namedtuple
+from typing import Dict, Tuple, TypeVar
 
 import attr
 from signedjson.key import decode_verify_key_bytes
@@ -28,7 +29,7 @@ from synapse.api.errors import SynapseError
 if sys.version_info[:3] >= (3, 6, 0):
     from typing import Collection
 else:
-    from typing import Sized, Iterable, Container, TypeVar
+    from typing import Sized, Iterable, Container
 
     T_co = TypeVar("T_co", covariant=True)
 
@@ -36,6 +37,12 @@ else:
         __slots__ = ()
 
 
+# Define a state map type from type/state_key to T (usually an event ID or
+# event)
+T = TypeVar("T")
+StateMap = Dict[Tuple[str, str], T]
+
+
 class Requester(
     namedtuple(
         "Requester", ["user", "access_token_id", "is_guest", "device_id", "app_service"]
-- 
cgit 1.4.1


From 4fb3cb208a17ba36a5da050b19e3997cf4808f9a Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 16 Jan 2020 20:27:07 +0000
Subject: Precise changelog

---
 changelog.d/6714.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6714.bugfix b/changelog.d/6714.bugfix
index 3924f1ad79..410516694f 100644
--- a/changelog.d/6714.bugfix
+++ b/changelog.d/6714.bugfix
@@ -1 +1 @@
-Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies.
+Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs.
-- 
cgit 1.4.1


From 95c5b9bfb3506d06e6b0a7d42adfb1f76f2cb7ca Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Thu, 16 Jan 2020 22:29:06 +0000
Subject: changelog

---
 changelog.d/6724.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6724.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6724.misc b/changelog.d/6724.misc
new file mode 100644
index 0000000000..5256be75fa
--- /dev/null
+++ b/changelog.d/6724.misc
@@ -0,0 +1 @@
+When processing a SAML response, log the assertions for easier configuration.
-- 
cgit 1.4.1


From 5ce0b17e38404fceb8867fdb3b4b59c00db6b1e6 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 17 Jan 2020 10:04:15 +0000
Subject: Clarify the `account_validity` and `email` sections of the sample
 configuration. (#6685)

Generally try to make this more comprehensible, and make it match the
conventions.

I've removed the documentation for all the settings which allow you to change
the names of the template files, because I can't really see why they are
useful.
---
 changelog.d/6685.doc           |   1 +
 docs/sample_config.yaml        | 284 ++++++++++++++++++++++-------------------
 synapse/config/emailconfig.py  | 222 ++++++++++++++++----------------
 synapse/config/push.py         |   2 +-
 synapse/config/registration.py |  83 +++++++-----
 5 files changed, 320 insertions(+), 272 deletions(-)
 create mode 100644 changelog.d/6685.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6685.doc b/changelog.d/6685.doc
new file mode 100644
index 0000000000..7cf750fe3f
--- /dev/null
+++ b/changelog.d/6685.doc
@@ -0,0 +1 @@
+Clarify the `account_validity` and `email` sections of the sample configuration.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 0a2505e7bb..8e8cf513b0 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -874,23 +874,6 @@ media_store_path: "DATADIR/media_store"
 # Optional account validity configuration. This allows for accounts to be denied
 # any request after a given period.
 #
-# ``enabled`` defines whether the account validity feature is enabled. Defaults
-# to False.
-#
-# ``period`` allows setting the period after which an account is valid
-# after its registration. When renewing the account, its validity period
-# will be extended by this amount of time. This parameter is required when using
-# the account validity feature.
-#
-# ``renew_at`` is the amount of time before an account's expiry date at which
-# Synapse will send an email to the account's email address with a renewal link.
-# This needs the ``email`` and ``public_baseurl`` configuration sections to be
-# filled.
-#
-# ``renew_email_subject`` is the subject of the email sent out with the renewal
-# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter
-# from the ``email`` section.
-#
 # Once this feature is enabled, Synapse will look for registered users without an
 # expiration date at startup and will add one to every account it found using the
 # current settings at that time.
@@ -901,21 +884,55 @@ media_store_path: "DATADIR/media_store"
 # date will be randomly selected within a range [now + period - d ; now + period],
 # where d is equal to 10% of the validity period.
 #
-#account_validity:
-#  enabled: true
-#  period: 6w
-#  renew_at: 1w
-#  renew_email_subject: "Renew your %(app)s account"
-#  # Directory in which Synapse will try to find the HTML files to serve to the
-#  # user when trying to renew an account. Optional, defaults to
-#  # synapse/res/templates.
-#  template_dir: "res/templates"
-#  # HTML to be displayed to the user after they successfully renewed their
-#  # account. Optional.
-#  account_renewed_html_path: "account_renewed.html"
-#  # HTML to be displayed when the user tries to renew an account with an invalid
-#  # renewal token. Optional.
-#  invalid_token_html_path: "invalid_token.html"
+account_validity:
+  # The account validity feature is disabled by default. Uncomment the
+  # following line to enable it.
+  #
+  #enabled: true
+
+  # The period after which an account is valid after its registration. When
+  # renewing the account, its validity period will be extended by this amount
+  # of time. This parameter is required when using the account validity
+  # feature.
+  #
+  #period: 6w
+
+  # The amount of time before an account's expiry date at which Synapse will
+  # send an email to the account's email address with a renewal link. By
+  # default, no such emails are sent.
+  #
+  # If you enable this setting, you will also need to fill out the 'email' and
+  # 'public_baseurl' configuration sections.
+  #
+  #renew_at: 1w
+
+  # The subject of the email sent out with the renewal link. '%(app)s' can be
+  # used as a placeholder for the 'app_name' parameter from the 'email'
+  # section.
+  #
+  # Note that the placeholder must be written '%(app)s', including the
+  # trailing 's'.
+  #
+  # If this is not set, a default value is used.
+  #
+  #renew_email_subject: "Renew your %(app)s account"
+
+  # Directory in which Synapse will try to find templates for the HTML files to
+  # serve to the user when trying to renew an account. If not set, default
+  # templates from within the Synapse package will be used.
+  #
+  #template_dir: "res/templates"
+
+  # File within 'template_dir' giving the HTML to be displayed to the user after
+  # they successfully renewed their account. If not set, default text is used.
+  #
+  #account_renewed_html_path: "account_renewed.html"
+
+  # File within 'template_dir' giving the HTML to be displayed when the user
+  # tries to renew an account with an invalid renewal token. If not set,
+  # default text is used.
+  #
+  #invalid_token_html_path: "invalid_token.html"
 
 # Time that a user's session remains valid for, after they log in.
 #
@@ -1353,107 +1370,110 @@ password_config:
    #pepper: "EVEN_MORE_SECRET"
 
 
+# Configuration for sending emails from Synapse.
+#
+email:
+  # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
+  #
+  #smtp_host: mail.server
+
+  # The port on the mail server for outgoing SMTP. Defaults to 25.
+  #
+  #smtp_port: 587
+
+  # Username/password for authentication to the SMTP server. By default, no
+  # authentication is attempted.
+  #
+  # smtp_user: "exampleusername"
+  # smtp_pass: "examplepassword"
+
+  # Uncomment the following to require TLS transport security for SMTP.
+  # By default, Synapse will connect over plain text, and will then switch to
+  # TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
+  # Synapse will refuse to connect unless the server supports STARTTLS.
+  #
+  #require_transport_security: true
+
+  # Enable sending emails for messages that the user has missed
+  #
+  #enable_notifs: false
+
+  # notif_from defines the "From" address to use when sending emails.
+  # It must be set if email sending is enabled.
+  #
+  # The placeholder '%(app)s' will be replaced by the application name,
+  # which is normally 'app_name' (below), but may be overridden by the
+  # Matrix client application.
+  #
+  # Note that the placeholder must be written '%(app)s', including the
+  # trailing 's'.
+  #
+  #notif_from: "Your Friendly %(app)s homeserver "
+
+  # app_name defines the default value for '%(app)s' in notif_from. It
+  # defaults to 'Matrix'.
+  #
+  #app_name: my_branded_matrix_server
+
+  # Uncomment the following to disable automatic subscription to email
+  # notifications for new users. Enabled by default.
+  #
+  #notif_for_new_users: false
+
+  # Custom URL for client links within the email notifications. By default
+  # links will be based on "https://matrix.to".
+  #
+  # (This setting used to be called riot_base_url; the old name is still
+  # supported for backwards-compatibility but is now deprecated.)
+  #
+  #client_base_url: "http://localhost/riot"
 
-# Enable sending emails for password resets, notification events or
-# account expiry notices
-#
-# If your SMTP server requires authentication, the optional smtp_user &
-# smtp_pass variables should be used
-#
-#email:
-#   enable_notifs: false
-#   smtp_host: "localhost"
-#   smtp_port: 25 # SSL: 465, STARTTLS: 587
-#   smtp_user: "exampleusername"
-#   smtp_pass: "examplepassword"
-#   require_transport_security: false
-#
-#   # notif_from defines the "From" address to use when sending emails.
-#   # It must be set if email sending is enabled.
-#   #
-#   # The placeholder '%(app)s' will be replaced by the application name,
-#   # which is normally 'app_name' (below), but may be overridden by the
-#   # Matrix client application.
-#   #
-#   # Note that the placeholder must be written '%(app)s', including the
-#   # trailing 's'.
-#   #
-#   notif_from: "Your Friendly %(app)s homeserver "
-#
-#   # app_name defines the default value for '%(app)s' in notif_from. It
-#   # defaults to 'Matrix'.
-#   #
-#   #app_name: my_branded_matrix_server
-#
-#   # Enable email notifications by default
-#   #
-#   notif_for_new_users: true
-#
-#   # Defining a custom URL for Riot is only needed if email notifications
-#   # should contain links to a self-hosted installation of Riot; when set
-#   # the "app_name" setting is ignored
-#   #
-#   riot_base_url: "http://localhost/riot"
-#
-#   # Configure the time that a validation email or text message code
-#   # will expire after sending
-#   #
-#   # This is currently used for password resets
-#   #
-#   #validation_token_lifetime: 1h
-#
-#   # Template directory. All template files should be stored within this
-#   # directory. If not set, default templates from within the Synapse
-#   # package will be used
-#   #
-#   # For the list of default templates, please see
-#   # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
-#   #
-#   #template_dir: res/templates
-#
-#   # Templates for email notifications
-#   #
-#   notif_template_html: notif_mail.html
-#   notif_template_text: notif_mail.txt
-#
-#   # Templates for account expiry notices
-#   #
-#   expiry_template_html: notice_expiry.html
-#   expiry_template_text: notice_expiry.txt
-#
-#   # Templates for password reset emails sent by the homeserver
-#   #
-#   #password_reset_template_html: password_reset.html
-#   #password_reset_template_text: password_reset.txt
-#
-#   # Templates for registration emails sent by the homeserver
-#   #
-#   #registration_template_html: registration.html
-#   #registration_template_text: registration.txt
-#
-#   # Templates for validation emails sent by the homeserver when adding an email to
-#   # your user account
-#   #
-#   #add_threepid_template_html: add_threepid.html
-#   #add_threepid_template_text: add_threepid.txt
-#
-#   # Templates for password reset success and failure pages that a user
-#   # will see after attempting to reset their password
-#   #
-#   #password_reset_template_success_html: password_reset_success.html
-#   #password_reset_template_failure_html: password_reset_failure.html
-#
-#   # Templates for registration success and failure pages that a user
-#   # will see after attempting to register using an email or phone
-#   #
-#   #registration_template_success_html: registration_success.html
-#   #registration_template_failure_html: registration_failure.html
-#
-#   # Templates for success and failure pages that a user will see after attempting
-#   # to add an email or phone to their account
-#   #
-#   #add_threepid_success_html: add_threepid_success.html
-#   #add_threepid_failure_html: add_threepid_failure.html
+  # Configure the time that a validation email will expire after sending.
+  # Defaults to 1h.
+  #
+  #validation_token_lifetime: 15m
+
+  # Directory in which Synapse will try to find the template files below.
+  # If not set, default templates from within the Synapse package will be used.
+  #
+  # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
+  # If you *do* uncomment it, you will need to make sure that all the templates
+  # below are in the directory.
+  #
+  # Synapse will look for the following templates in this directory:
+  #
+  # * The contents of email notifications of missed events: 'notif_mail.html' and
+  #   'notif_mail.txt'.
+  #
+  # * The contents of account expiry notice emails: 'notice_expiry.html' and
+  #   'notice_expiry.txt'.
+  #
+  # * The contents of password reset emails sent by the homeserver:
+  #   'password_reset.html' and 'password_reset.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in the password reset email: 'password_reset_success.html' and
+  #   'password_reset_failure.html'
+  #
+  # * The contents of address verification emails sent during registration:
+  #   'registration.html' and 'registration.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in an address verification email sent during registration:
+  #   'registration_success.html' and 'registration_failure.html'
+  #
+  # * The contents of address verification emails sent when an address is added
+  #   to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in an address verification email sent when an address is added
+  #   to a Matrix account: 'add_threepid_success.html' and
+  #   'add_threepid_failure.html'
+  #
+  # You can see the default templates at:
+  # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
+  #
+  #template_dir: "res/templates"
 
 
 #password_providers:
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 35756bed87..74853f9faa 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -37,10 +37,12 @@ class EmailConfig(Config):
 
         self.email_enable_notifs = False
 
-        email_config = config.get("email", {})
+        email_config = config.get("email")
+        if email_config is None:
+            email_config = {}
 
-        self.email_smtp_host = email_config.get("smtp_host", None)
-        self.email_smtp_port = email_config.get("smtp_port", None)
+        self.email_smtp_host = email_config.get("smtp_host", "localhost")
+        self.email_smtp_port = email_config.get("smtp_port", 25)
         self.email_smtp_user = email_config.get("smtp_user", None)
         self.email_smtp_pass = email_config.get("smtp_pass", None)
         self.require_transport_security = email_config.get(
@@ -74,9 +76,9 @@ class EmailConfig(Config):
         self.email_template_dir = os.path.abspath(template_dir)
 
         self.email_enable_notifs = email_config.get("enable_notifs", False)
-        account_validity_renewal_enabled = config.get("account_validity", {}).get(
-            "renew_at"
-        )
+
+        account_validity_config = config.get("account_validity") or {}
+        account_validity_renewal_enabled = account_validity_config.get("renew_at")
 
         self.threepid_behaviour_email = (
             # Have Synapse handle the email sending if account_threepid_delegates.email
@@ -278,7 +280,9 @@ class EmailConfig(Config):
             self.email_notif_for_new_users = email_config.get(
                 "notif_for_new_users", True
             )
-            self.email_riot_base_url = email_config.get("riot_base_url", None)
+            self.email_riot_base_url = email_config.get(
+                "client_base_url", email_config.get("riot_base_url", None)
+            )
 
         if account_validity_renewal_enabled:
             self.email_expiry_template_html = email_config.get(
@@ -294,107 +298,111 @@ class EmailConfig(Config):
                     raise ConfigError("Unable to find email template file %s" % (p,))
 
     def generate_config_section(self, config_dir_path, server_name, **kwargs):
-        return """
-        # Enable sending emails for password resets, notification events or
-        # account expiry notices
-        #
-        # If your SMTP server requires authentication, the optional smtp_user &
-        # smtp_pass variables should be used
-        #
-        #email:
-        #   enable_notifs: false
-        #   smtp_host: "localhost"
-        #   smtp_port: 25 # SSL: 465, STARTTLS: 587
-        #   smtp_user: "exampleusername"
-        #   smtp_pass: "examplepassword"
-        #   require_transport_security: false
-        #
-        #   # notif_from defines the "From" address to use when sending emails.
-        #   # It must be set if email sending is enabled.
-        #   #
-        #   # The placeholder '%(app)s' will be replaced by the application name,
-        #   # which is normally 'app_name' (below), but may be overridden by the
-        #   # Matrix client application.
-        #   #
-        #   # Note that the placeholder must be written '%(app)s', including the
-        #   # trailing 's'.
-        #   #
-        #   notif_from: "Your Friendly %(app)s homeserver "
-        #
-        #   # app_name defines the default value for '%(app)s' in notif_from. It
-        #   # defaults to 'Matrix'.
-        #   #
-        #   #app_name: my_branded_matrix_server
-        #
-        #   # Enable email notifications by default
-        #   #
-        #   notif_for_new_users: true
-        #
-        #   # Defining a custom URL for Riot is only needed if email notifications
-        #   # should contain links to a self-hosted installation of Riot; when set
-        #   # the "app_name" setting is ignored
-        #   #
-        #   riot_base_url: "http://localhost/riot"
-        #
-        #   # Configure the time that a validation email or text message code
-        #   # will expire after sending
-        #   #
-        #   # This is currently used for password resets
-        #   #
-        #   #validation_token_lifetime: 1h
-        #
-        #   # Template directory. All template files should be stored within this
-        #   # directory. If not set, default templates from within the Synapse
-        #   # package will be used
-        #   #
-        #   # For the list of default templates, please see
-        #   # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
-        #   #
-        #   #template_dir: res/templates
-        #
-        #   # Templates for email notifications
-        #   #
-        #   notif_template_html: notif_mail.html
-        #   notif_template_text: notif_mail.txt
-        #
-        #   # Templates for account expiry notices
-        #   #
-        #   expiry_template_html: notice_expiry.html
-        #   expiry_template_text: notice_expiry.txt
-        #
-        #   # Templates for password reset emails sent by the homeserver
-        #   #
-        #   #password_reset_template_html: password_reset.html
-        #   #password_reset_template_text: password_reset.txt
-        #
-        #   # Templates for registration emails sent by the homeserver
-        #   #
-        #   #registration_template_html: registration.html
-        #   #registration_template_text: registration.txt
-        #
-        #   # Templates for validation emails sent by the homeserver when adding an email to
-        #   # your user account
-        #   #
-        #   #add_threepid_template_html: add_threepid.html
-        #   #add_threepid_template_text: add_threepid.txt
-        #
-        #   # Templates for password reset success and failure pages that a user
-        #   # will see after attempting to reset their password
-        #   #
-        #   #password_reset_template_success_html: password_reset_success.html
-        #   #password_reset_template_failure_html: password_reset_failure.html
-        #
-        #   # Templates for registration success and failure pages that a user
-        #   # will see after attempting to register using an email or phone
-        #   #
-        #   #registration_template_success_html: registration_success.html
-        #   #registration_template_failure_html: registration_failure.html
+        return """\
+        # Configuration for sending emails from Synapse.
         #
-        #   # Templates for success and failure pages that a user will see after attempting
-        #   # to add an email or phone to their account
-        #   #
-        #   #add_threepid_success_html: add_threepid_success.html
-        #   #add_threepid_failure_html: add_threepid_failure.html
+        email:
+          # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
+          #
+          #smtp_host: mail.server
+
+          # The port on the mail server for outgoing SMTP. Defaults to 25.
+          #
+          #smtp_port: 587
+
+          # Username/password for authentication to the SMTP server. By default, no
+          # authentication is attempted.
+          #
+          # smtp_user: "exampleusername"
+          # smtp_pass: "examplepassword"
+
+          # Uncomment the following to require TLS transport security for SMTP.
+          # By default, Synapse will connect over plain text, and will then switch to
+          # TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
+          # Synapse will refuse to connect unless the server supports STARTTLS.
+          #
+          #require_transport_security: true
+
+          # Enable sending emails for messages that the user has missed
+          #
+          #enable_notifs: false
+
+          # notif_from defines the "From" address to use when sending emails.
+          # It must be set if email sending is enabled.
+          #
+          # The placeholder '%(app)s' will be replaced by the application name,
+          # which is normally 'app_name' (below), but may be overridden by the
+          # Matrix client application.
+          #
+          # Note that the placeholder must be written '%(app)s', including the
+          # trailing 's'.
+          #
+          #notif_from: "Your Friendly %(app)s homeserver "
+
+          # app_name defines the default value for '%(app)s' in notif_from. It
+          # defaults to 'Matrix'.
+          #
+          #app_name: my_branded_matrix_server
+
+          # Uncomment the following to disable automatic subscription to email
+          # notifications for new users. Enabled by default.
+          #
+          #notif_for_new_users: false
+
+          # Custom URL for client links within the email notifications. By default
+          # links will be based on "https://matrix.to".
+          #
+          # (This setting used to be called riot_base_url; the old name is still
+          # supported for backwards-compatibility but is now deprecated.)
+          #
+          #client_base_url: "http://localhost/riot"
+
+          # Configure the time that a validation email will expire after sending.
+          # Defaults to 1h.
+          #
+          #validation_token_lifetime: 15m
+
+          # Directory in which Synapse will try to find the template files below.
+          # If not set, default templates from within the Synapse package will be used.
+          #
+          # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
+          # If you *do* uncomment it, you will need to make sure that all the templates
+          # below are in the directory.
+          #
+          # Synapse will look for the following templates in this directory:
+          #
+          # * The contents of email notifications of missed events: 'notif_mail.html' and
+          #   'notif_mail.txt'.
+          #
+          # * The contents of account expiry notice emails: 'notice_expiry.html' and
+          #   'notice_expiry.txt'.
+          #
+          # * The contents of password reset emails sent by the homeserver:
+          #   'password_reset.html' and 'password_reset.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in the password reset email: 'password_reset_success.html' and
+          #   'password_reset_failure.html'
+          #
+          # * The contents of address verification emails sent during registration:
+          #   'registration.html' and 'registration.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in an address verification email sent during registration:
+          #   'registration_success.html' and 'registration_failure.html'
+          #
+          # * The contents of address verification emails sent when an address is added
+          #   to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in an address verification email sent when an address is added
+          #   to a Matrix account: 'add_threepid_success.html' and
+          #   'add_threepid_failure.html'
+          #
+          # You can see the default templates at:
+          # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
+          #
+          #template_dir: "res/templates"
         """
 
 
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 0910958649..6f2b3a7faa 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -35,7 +35,7 @@ class PushConfig(Config):
 
         # Now check for the one in the 'email' section and honour it,
         # with a warning.
-        push_config = config.get("email", {})
+        push_config = config.get("email") or {}
         redact_content = push_config.get("redact_content")
         if redact_content is not None:
             print(
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ee9614c5f7..b873995a49 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -27,6 +27,8 @@ class AccountValidityConfig(Config):
     section = "accountvalidity"
 
     def __init__(self, config, synapse_config):
+        if config is None:
+            return
         self.enabled = config.get("enabled", False)
         self.renew_by_email_enabled = "renew_at" in config
 
@@ -159,23 +161,6 @@ class RegistrationConfig(Config):
         # Optional account validity configuration. This allows for accounts to be denied
         # any request after a given period.
         #
-        # ``enabled`` defines whether the account validity feature is enabled. Defaults
-        # to False.
-        #
-        # ``period`` allows setting the period after which an account is valid
-        # after its registration. When renewing the account, its validity period
-        # will be extended by this amount of time. This parameter is required when using
-        # the account validity feature.
-        #
-        # ``renew_at`` is the amount of time before an account's expiry date at which
-        # Synapse will send an email to the account's email address with a renewal link.
-        # This needs the ``email`` and ``public_baseurl`` configuration sections to be
-        # filled.
-        #
-        # ``renew_email_subject`` is the subject of the email sent out with the renewal
-        # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
-        # from the ``email`` section.
-        #
         # Once this feature is enabled, Synapse will look for registered users without an
         # expiration date at startup and will add one to every account it found using the
         # current settings at that time.
@@ -186,21 +171,55 @@ class RegistrationConfig(Config):
         # date will be randomly selected within a range [now + period - d ; now + period],
         # where d is equal to 10%% of the validity period.
         #
-        #account_validity:
-        #  enabled: true
-        #  period: 6w
-        #  renew_at: 1w
-        #  renew_email_subject: "Renew your %%(app)s account"
-        #  # Directory in which Synapse will try to find the HTML files to serve to the
-        #  # user when trying to renew an account. Optional, defaults to
-        #  # synapse/res/templates.
-        #  template_dir: "res/templates"
-        #  # HTML to be displayed to the user after they successfully renewed their
-        #  # account. Optional.
-        #  account_renewed_html_path: "account_renewed.html"
-        #  # HTML to be displayed when the user tries to renew an account with an invalid
-        #  # renewal token. Optional.
-        #  invalid_token_html_path: "invalid_token.html"
+        account_validity:
+          # The account validity feature is disabled by default. Uncomment the
+          # following line to enable it.
+          #
+          #enabled: true
+
+          # The period after which an account is valid after its registration. When
+          # renewing the account, its validity period will be extended by this amount
+          # of time. This parameter is required when using the account validity
+          # feature.
+          #
+          #period: 6w
+
+          # The amount of time before an account's expiry date at which Synapse will
+          # send an email to the account's email address with a renewal link. By
+          # default, no such emails are sent.
+          #
+          # If you enable this setting, you will also need to fill out the 'email' and
+          # 'public_baseurl' configuration sections.
+          #
+          #renew_at: 1w
+
+          # The subject of the email sent out with the renewal link. '%%(app)s' can be
+          # used as a placeholder for the 'app_name' parameter from the 'email'
+          # section.
+          #
+          # Note that the placeholder must be written '%%(app)s', including the
+          # trailing 's'.
+          #
+          # If this is not set, a default value is used.
+          #
+          #renew_email_subject: "Renew your %%(app)s account"
+
+          # Directory in which Synapse will try to find templates for the HTML files to
+          # serve to the user when trying to renew an account. If not set, default
+          # templates from within the Synapse package will be used.
+          #
+          #template_dir: "res/templates"
+
+          # File within 'template_dir' giving the HTML to be displayed to the user after
+          # they successfully renewed their account. If not set, default text is used.
+          #
+          #account_renewed_html_path: "account_renewed.html"
+
+          # File within 'template_dir' giving the HTML to be displayed when the user
+          # tries to renew an account with an invalid renewal token. If not set,
+          # default text is used.
+          #
+          #invalid_token_html_path: "invalid_token.html"
 
         # Time that a user's session remains valid for, after they log in.
         #
-- 
cgit 1.4.1


From a8a50f5b5746279379b4511c8ecb2a40b143fe32 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 17 Jan 2020 10:27:19 +0000
Subject: Wake up transaction queue when remote server comes back online
 (#6706)

This will be used to retry outbound transactions to a remote server if
we think it might have come back up.
---
 changelog.d/6706.misc                  |  1 +
 docs/tcp_replication.md                |  6 +++++-
 synapse/app/federation_sender.py       | 12 +++++++++++-
 synapse/federation/sender/__init__.py  | 18 ++++++++++++++++--
 synapse/federation/transport/server.py | 19 ++++++++++++++++++-
 synapse/notifier.py                    | 31 ++++++++++++++++++++++++++++---
 synapse/replication/tcp/client.py      |  3 +++
 synapse/replication/tcp/commands.py    | 17 +++++++++++++++++
 synapse/replication/tcp/protocol.py    | 15 +++++++++++++++
 synapse/replication/tcp/resource.py    |  9 +++++++++
 synapse/server.pyi                     | 12 ++++++++++++
 11 files changed, 135 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/6706.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6706.misc b/changelog.d/6706.misc
new file mode 100644
index 0000000000..1ac11cc04b
--- /dev/null
+++ b/changelog.d/6706.misc
@@ -0,0 +1 @@
+Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data.
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index ba9e874d07..a0b1d563ff 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -209,7 +209,7 @@ Where `` may be either:
  * a numeric stream_id to stream updates since (exclusive)
  * `NOW` to stream all subsequent updates.
 
-The `` is the name of a replication stream to subscribe 
+The `` is the name of a replication stream to subscribe
 to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
 of streams). It can also be `ALL` to subscribe to all known streams,
 in which case the `` must be set to `NOW`.
@@ -234,6 +234,10 @@ in which case the `` must be set to `NOW`.
 
    Used exclusively in tests
 
+### REMOTE_SERVER_UP (S, C)
+
+   Inform other processes that a remote server may have come back online.
+
 See `synapse/replication/tcp/commands.py` for a detailed description and
 the format of each command.
 
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index a57cf991ac..38d11fdd0f 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -158,6 +158,13 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
         args.update(self.send_handler.stream_positions())
         return args
 
+    def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+
+        # Let's wake up the transaction queue for the server in case we have
+        # pending stuff to send to it.
+        self.send_handler.wake_destination(server)
+
 
 def start(config_options):
     try:
@@ -205,7 +212,7 @@ class FederationSenderHandler(object):
     to the federation sender.
     """
 
-    def __init__(self, hs, replication_client):
+    def __init__(self, hs: FederationSenderServer, replication_client):
         self.store = hs.get_datastore()
         self._is_mine_id = hs.is_mine_id
         self.federation_sender = hs.get_federation_sender()
@@ -226,6 +233,9 @@ class FederationSenderHandler(object):
             self.store.get_room_max_stream_ordering()
         )
 
+    def wake_destination(self, server: str):
+        self.federation_sender.wake_destination(server)
+
     def stream_positions(self):
         return {"federation": self.federation_position}
 
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 4ebb0e8bc0..36c83c3027 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -21,6 +21,7 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
+import synapse
 import synapse.metrics
 from synapse.federation.sender.per_destination_queue import PerDestinationQueue
 from synapse.federation.sender.transaction_manager import TransactionManager
@@ -54,7 +55,7 @@ sent_pdus_destination_dist_total = Counter(
 
 
 class FederationSender(object):
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.hs = hs
         self.server_name = hs.hostname
 
@@ -482,7 +483,20 @@ class FederationSender(object):
 
     def send_device_messages(self, destination):
         if destination == self.server_name:
-            logger.info("Not sending device update to ourselves")
+            logger.warning("Not sending device update to ourselves")
+            return
+
+        self._get_per_destination_queue(destination).attempt_new_transaction()
+
+    def wake_destination(self, destination: str):
+        """Called when we want to retry sending transactions to a remote.
+
+        This is mainly useful if the remote server has been down and we think it
+        might have come back.
+        """
+
+        if destination == self.server_name:
+            logger.warning("Not waking up ourselves")
             return
 
         self._get_per_destination_queue(destination).attempt_new_transaction()
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index b4cbf23394..d8cf9ed299 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -44,6 +44,7 @@ from synapse.logging.opentracing import (
     tags,
     whitelisted_homeserver,
 )
+from synapse.server import HomeServer
 from synapse.types import ThirdPartyInstanceID, get_domain_from_id
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.versionstring import get_version_string
@@ -101,12 +102,17 @@ class NoAuthenticationError(AuthenticationError):
 
 
 class Authenticator(object):
-    def __init__(self, hs):
+    def __init__(self, hs: HomeServer):
         self._clock = hs.get_clock()
         self.keyring = hs.get_keyring()
         self.server_name = hs.hostname
         self.store = hs.get_datastore()
         self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+        self.notifer = hs.get_notifier()
+
+        self.replication_client = None
+        if hs.config.worker.worker_app:
+            self.replication_client = hs.get_tcp_replication()
 
     # A method just so we can pass 'self' as the authenticator to the Servlets
     async def authenticate_request(self, request, content):
@@ -166,6 +172,17 @@ class Authenticator(object):
         try:
             logger.info("Marking origin %r as up", origin)
             await self.store.set_destination_retry_timings(origin, None, 0, 0)
+
+            # Inform the relevant places that the remote server is back up.
+            self.notifer.notify_remote_server_up(origin)
+            if self.replication_client:
+                # If we're on a worker we try and inform master about this. The
+                # replication client doesn't hook into the notifier to avoid
+                # infinite loops where we send a `REMOTE_SERVER_UP` command to
+                # master, which then echoes it back to us which in turn pokes
+                # the notifier.
+                self.replication_client.send_remote_server_up(origin)
+
         except Exception:
             logger.exception("Error resetting retry timings on %s", origin)
 
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 5f5f765bea..6132727cbd 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -15,11 +15,13 @@
 
 import logging
 from collections import namedtuple
+from typing import Callable, List
 
 from prometheus_client import Counter
 
 from twisted.internet import defer
 
+import synapse.server
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError
 from synapse.handlers.presence import format_user_presence_state
@@ -154,7 +156,7 @@ class Notifier(object):
 
     UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
 
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.user_to_user_stream = {}
         self.room_to_user_streams = {}
 
@@ -164,7 +166,12 @@ class Notifier(object):
         self.store = hs.get_datastore()
         self.pending_new_room_events = []
 
-        self.replication_callbacks = []
+        # Called when there are new things to stream over replication
+        self.replication_callbacks = []  # type: List[Callable[[], None]]
+
+        # Called when remote servers have come back online after having been
+        # down.
+        self.remote_server_up_callbacks = []  # type: List[Callable[[str], None]]
 
         self.clock = hs.get_clock()
         self.appservice_handler = hs.get_application_service_handler()
@@ -205,7 +212,7 @@ class Notifier(object):
             "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream)
         )
 
-    def add_replication_callback(self, cb):
+    def add_replication_callback(self, cb: Callable[[], None]):
         """Add a callback that will be called when some new data is available.
         Callback is not given any arguments. It should *not* return a Deferred - if
         it needs to do any asynchronous work, a background thread should be started and
@@ -213,6 +220,12 @@ class Notifier(object):
         """
         self.replication_callbacks.append(cb)
 
+    def add_remote_server_up_callback(self, cb: Callable[[str], None]):
+        """Add a callback that will be called when synapse detects a server
+        has been
+        """
+        self.remote_server_up_callbacks.append(cb)
+
     def on_new_room_event(
         self, event, room_stream_id, max_room_stream_id, extra_users=[]
     ):
@@ -522,3 +535,15 @@ class Notifier(object):
         """Notify the any replication listeners that there's a new event"""
         for cb in self.replication_callbacks:
             cb()
+
+    def notify_remote_server_up(self, server: str):
+        """Notify any replication that a remote server has come back up
+        """
+        # We call federation_sender directly rather than registering as a
+        # callback as a) we already have a reference to it and b) it introduces
+        # circular dependencies.
+        if self.federation_sender:
+            self.federation_sender.wake_destination(server)
+
+        for cb in self.remote_server_up_callbacks:
+            cb(server)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 52a0aefe68..fc06a7b053 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -143,6 +143,9 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         if d:
             d.callback(data)
 
+    def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+
     def get_streams_to_replicate(self) -> Dict[str, int]:
         """Called when a new connection has been established and we need to
         subscribe to streams.
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index cbb36b9acf..451671412d 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -387,6 +387,20 @@ class UserIpCommand(Command):
         )
 
 
+class RemoteServerUpCommand(Command):
+    """Sent when a worker has detected that a remote server is no longer
+    "down" and retry timings should be reset.
+
+    If sent from a client the server will relay to all other workers.
+
+    Format::
+
+        REMOTE_SERVER_UP 
+    """
+
+    NAME = "REMOTE_SERVER_UP"
+
+
 _COMMANDS = (
     ServerCommand,
     RdataCommand,
@@ -401,6 +415,7 @@ _COMMANDS = (
     RemovePusherCommand,
     InvalidateCacheCommand,
     UserIpCommand,
+    RemoteServerUpCommand,
 )  # type: Tuple[Type[Command], ...]
 
 # Map of command name to command type.
@@ -414,6 +429,7 @@ VALID_SERVER_COMMANDS = (
     ErrorCommand.NAME,
     PingCommand.NAME,
     SyncCommand.NAME,
+    RemoteServerUpCommand.NAME,
 )
 
 # The commands the client is allowed to send
@@ -427,4 +443,5 @@ VALID_CLIENT_COMMANDS = (
     InvalidateCacheCommand.NAME,
     UserIpCommand.NAME,
     ErrorCommand.NAME,
+    RemoteServerUpCommand.NAME,
 )
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 5f4bdf84d2..131e5acb09 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -76,6 +76,7 @@ from synapse.replication.tcp.commands import (
     PingCommand,
     PositionCommand,
     RdataCommand,
+    RemoteServerUpCommand,
     ReplicateCommand,
     ServerCommand,
     SyncCommand,
@@ -460,6 +461,9 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     async def on_INVALIDATE_CACHE(self, cmd):
         self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
 
+    async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
+        self.streamer.on_remote_server_up(cmd.data)
+
     async def on_USER_IP(self, cmd):
         self.streamer.on_user_ip(
             cmd.user_id,
@@ -555,6 +559,9 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     def send_sync(self, data):
         self.send_command(SyncCommand(data))
 
+    def send_remote_server_up(self, server: str):
+        self.send_command(RemoteServerUpCommand(server))
+
     def on_connection_closed(self):
         BaseReplicationStreamProtocol.on_connection_closed(self)
         self.streamer.lost_connection(self)
@@ -588,6 +595,11 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
         """Called when get a new SYNC command."""
         raise NotImplementedError()
 
+    @abc.abstractmethod
+    async def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+        raise NotImplementedError()
+
     @abc.abstractmethod
     def get_streams_to_replicate(self):
         """Called when a new connection has been established and we need to
@@ -707,6 +719,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
     async def on_SYNC(self, cmd):
         self.handler.on_sync(cmd.data)
 
+    async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
+        self.handler.on_remote_server_up(cmd.data)
+
     def replicate(self, stream_name, token):
         """Send the subscription request to the server
         """
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index b1752e88cd..6ebf944f66 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -120,6 +120,7 @@ class ReplicationStreamer(object):
             self.federation_sender = hs.get_federation_sender()
 
         self.notifier.add_replication_callback(self.on_notifier_poke)
+        self.notifier.add_remote_server_up_callback(self.send_remote_server_up)
 
         # Keeps track of whether we are currently checking for updates
         self.is_looping = False
@@ -288,6 +289,14 @@ class ReplicationStreamer(object):
         )
         await self._server_notices_sender.on_user_ip(user_id)
 
+    @measure_func("repl.on_remote_server_up")
+    def on_remote_server_up(self, server: str):
+        self.notifier.notify_remote_server_up(server)
+
+    def send_remote_server_up(self, server: str):
+        for conn in self.connections:
+            conn.send_remote_server_up(server)
+
     def send_sync_to_all_connections(self, data):
         """Sends a SYNC command to all clients.
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
index b5e0b57095..0731403047 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -1,3 +1,5 @@
+import twisted.internet
+
 import synapse.api.auth
 import synapse.config.homeserver
 import synapse.federation.sender
@@ -9,10 +11,12 @@ import synapse.handlers.deactivate_account
 import synapse.handlers.device
 import synapse.handlers.e2e_keys
 import synapse.handlers.message
+import synapse.handlers.presence
 import synapse.handlers.room
 import synapse.handlers.room_member
 import synapse.handlers.set_password
 import synapse.http.client
+import synapse.notifier
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
 import synapse.server_notices.server_notices_sender
@@ -85,3 +89,11 @@ class HomeServer(object):
         self,
     ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
         pass
+    def get_notifier(self) -> synapse.notifier.Notifier:
+        pass
+    def get_presence_handler(self) -> synapse.handlers.presence.PresenceHandler:
+        pass
+    def get_clock(self) -> synapse.util.Clock:
+        pass
+    def get_reactor(self) -> twisted.internet.base.ReactorBase:
+        pass
-- 
cgit 1.4.1


From 2b6a77fcde8396331a790a5ddeaa744093a8c728 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 17 Jan 2020 10:32:47 +0000
Subject: Delegate remote_user_id mapping to the saml mapping provider (#6723)

Turns out that figuring out a remote user id for the SAML user isn't quite as obvious as it seems. Factor it out to the SamlMappingProvider so that it's easy to control.
---
 changelog.d/6723.misc            |  1 +
 synapse/config/saml2_config.py   |  1 +
 synapse/handlers/saml_handler.py | 27 +++++++++++++++++++++------
 3 files changed, 23 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6723.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6723.misc b/changelog.d/6723.misc
new file mode 100644
index 0000000000..17f15e73a8
--- /dev/null
+++ b/changelog.d/6723.misc
@@ -0,0 +1 @@
+Updates to the SAML mapping provider API.
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index b91414aa35..423c158b11 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -121,6 +121,7 @@ class SAML2Config(Config):
         required_methods = [
             "get_saml_attributes",
             "saml_response_to_user_attributes",
+            "get_remote_user_id",
         ]
         missing_methods = [
             method
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index 107f97032b..90e69b49ee 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -135,14 +135,15 @@ class SamlHandler:
         logger.info("SAML2 response: %s", saml2_auth.origxml)
         logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
 
-        try:
-            remote_user_id = saml2_auth.ava["uid"][0]
-        except KeyError:
-            logger.warning("SAML2 response lacks a 'uid' attestation")
-            raise SynapseError(400, "'uid' not in SAML2 response")
-
         self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None)
 
+        remote_user_id = self._user_mapping_provider.get_remote_user_id(
+            saml2_auth, client_redirect_url
+        )
+
+        if not remote_user_id:
+            raise Exception("Failed to extract remote user id from SAML response")
+
         with (await self._mapping_lock.queue(self._auth_provider_id)):
             # first of all, check if we already have a mapping for this user
             logger.info(
@@ -279,6 +280,20 @@ class DefaultSamlMappingProvider(object):
         self._mxid_source_attribute = parsed_config.mxid_source_attribute
         self._mxid_mapper = parsed_config.mxid_mapper
 
+        self._grandfathered_mxid_source_attribute = (
+            module_api._hs.config.saml2_grandfathered_mxid_source_attribute
+        )
+
+    def get_remote_user_id(
+        self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str
+    ):
+        """Extracts the remote user id from the SAML response"""
+        try:
+            return saml_response.ava["uid"][0]
+        except KeyError:
+            logger.warning("SAML2 response lacks a 'uid' attestation")
+            raise SynapseError(400, "'uid' not in SAML2 response")
+
     def saml_response_to_user_attributes(
         self,
         saml_response: saml2.response.AuthnResponse,
-- 
cgit 1.4.1


From 722b4f302d705f497355f206ecb160de1bef2074 Mon Sep 17 00:00:00 2001
From: Satsuki Yanagi <17376330+u1-liquid@users.noreply.github.com>
Date: Fri, 17 Jan 2020 23:30:35 +0900
Subject: Fix syntax error in run_upgrade for schema 57 (#6728)

Fix #6727
Related #6655

Co-authored-by: Erik Johnston 
---
 changelog.d/6728.bugfix                                            | 1 +
 .../data_stores/main/schema/delta/57/local_current_membership.py   | 7 ++++---
 2 files changed, 5 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6728.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6728.bugfix b/changelog.d/6728.bugfix
new file mode 100644
index 0000000000..5a136e17be
--- /dev/null
+++ b/changelog.d/6728.bugfix
@@ -0,0 +1 @@
+Fix a bug causing `ValueError: unsupported format character ''' (0x27) at index 312` error when running the schema 57 upgrade script.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
index 601c236c4a..63b5acdcf7 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
+++ b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
@@ -56,7 +56,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
             INSERT INTO local_current_membership (room_id, user_id, event_id, membership)
                 SELECT c.room_id, state_key AS user_id, event_id, c.membership
                 FROM current_state_events AS c
-                WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key like '%' || ?
+                WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key LIKE ?
         """
     else:
         # We can't rely on the membership column, so we need to join against
@@ -66,9 +66,10 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
                 SELECT c.room_id, state_key AS user_id, event_id, r.membership
                 FROM current_state_events AS c
                 INNER JOIN room_memberships AS r USING (event_id)
-                WHERE type = 'm.room.member' and state_key like '%' || ?
+                WHERE type = 'm.room.member' AND state_key LIKE ?
         """
-    cur.execute(sql, (config.server_name,))
+    sql = database_engine.convert_param_style(sql)
+    cur.execute(sql, ("%:" + config.server_name,))
 
     cur.execute(
         "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
-- 
cgit 1.4.1


From 5909751936cca2e394cb30fb5da9520db76ee73a Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 17 Jan 2020 15:13:27 +0000
Subject: Fix up changelog

---
 changelog.d/6728.bugfix | 1 -
 changelog.d/6728.misc   | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
 delete mode 100644 changelog.d/6728.bugfix
 create mode 100644 changelog.d/6728.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6728.bugfix b/changelog.d/6728.bugfix
deleted file mode 100644
index 5a136e17be..0000000000
--- a/changelog.d/6728.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing `ValueError: unsupported format character ''' (0x27) at index 312` error when running the schema 57 upgrade script.
diff --git a/changelog.d/6728.misc b/changelog.d/6728.misc
new file mode 100644
index 0000000000..01e78bc84e
--- /dev/null
+++ b/changelog.d/6728.misc
@@ -0,0 +1 @@
+Add `local_current_membership` table for tracking local user membership state in rooms.
-- 
cgit 1.4.1


From a17f64361c87f06c67fd7bb5a98b54dc5a2bb4fb Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 17 Jan 2020 20:51:44 +0000
Subject: Add more logging around message retention policies support (#6717)

So we can debug issues like #6683 more easily
---
 changelog.d/6717.misc          |  1 +
 synapse/config/server.py       |  8 ++++++++
 synapse/handlers/pagination.py | 13 +++++++++++++
 3 files changed, 22 insertions(+)
 create mode 100644 changelog.d/6717.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6717.misc b/changelog.d/6717.misc
new file mode 100644
index 0000000000..a2a7776126
--- /dev/null
+++ b/changelog.d/6717.misc
@@ -0,0 +1 @@
+Add more logging around message retention policies support.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 9ac112233b..0ec1b0fadd 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -294,6 +294,14 @@ class ServerConfig(Config):
             self.retention_default_min_lifetime = None
             self.retention_default_max_lifetime = None
 
+        if self.retention_enabled:
+            logger.info(
+                "Message retention policies support enabled with the following default"
+                " policy: min_lifetime = %s ; max_lifetime = %s",
+                self.retention_default_min_lifetime,
+                self.retention_default_max_lifetime,
+            )
+
         self.retention_allowed_lifetime_min = retention_config.get(
             "allowed_lifetime_min"
         )
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 3ee6a091c5..71d76202c9 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -88,6 +88,8 @@ class PaginationHandler(object):
         if hs.config.retention_enabled:
             # Run the purge jobs described in the configuration file.
             for job in hs.config.retention_purge_jobs:
+                logger.info("Setting up purge job with config: %s", job)
+
                 self.clock.looping_call(
                     run_as_background_process,
                     job["interval"],
@@ -130,11 +132,22 @@ class PaginationHandler(object):
         else:
             include_null = False
 
+        logger.info(
+            "[purge] Running purge job for %d < max_lifetime <= %d (include NULLs = %s)",
+            min_ms,
+            max_ms,
+            include_null,
+        )
+
         rooms = yield self.store.get_rooms_for_retention_period_in_range(
             min_ms, max_ms, include_null
         )
 
+        logger.debug("[purge] Rooms to purge: %s", rooms)
+
         for room_id, retention_policy in iteritems(rooms):
+            logger.info("[purge] Attempting to purge messages in room %s", room_id)
+
             if room_id in self._purges_in_progress_by_room:
                 logger.warning(
                     "[purge] not purging room %s as there's an ongoing purge running"
-- 
cgit 1.4.1


From 026f4bdf3c97513b6b48e1f3857198cdb22a3334 Mon Sep 17 00:00:00 2001
From: Andrew Morgan 
Date: Mon, 20 Jan 2020 14:11:42 +0000
Subject: Add changelog

---
 changelog.d/6747.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6747.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix
new file mode 100644
index 0000000000..cb088873e5
--- /dev/null
+++ b/changelog.d/6747.bugfix
@@ -0,0 +1 @@
+Fix infinite recursion and dictionary access bug when setting `account_validity` to an empty block in the homeserver config. Thanks to @Sorunome for reporting.
\ No newline at end of file
-- 
cgit 1.4.1


From 351fdfede6e9582f7c365d41c684b9d60b6c98c2 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 20 Jan 2020 15:58:44 +0000
Subject: Update changelog.d/6747.bugfix

Co-Authored-By: Erik Johnston 
---
 changelog.d/6747.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix
index cb088873e5..c98107e741 100644
--- a/changelog.d/6747.bugfix
+++ b/changelog.d/6747.bugfix
@@ -1 +1 @@
-Fix infinite recursion and dictionary access bug when setting `account_validity` to an empty block in the homeserver config. Thanks to @Sorunome for reporting.
\ No newline at end of file
+Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting.
-- 
cgit 1.4.1


From ceecedc68ba1af25b0ee60c5cf927fd1fd245b9f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 20 Jan 2020 17:23:59 +0000
Subject: Fix changing password via user admin API. (#6730)

---
 changelog.d/6730.bugfix       |  1 +
 synapse/rest/admin/users.py   |  4 ++--
 tests/rest/admin/test_user.py | 13 +++++++++++++
 3 files changed, 16 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6730.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6730.bugfix b/changelog.d/6730.bugfix
new file mode 100644
index 0000000000..beb444ca66
--- /dev/null
+++ b/changelog.d/6730.bugfix
@@ -0,0 +1 @@
+Fix changing password via user admin API.
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 574cb90c74..c178c960c5 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -193,8 +193,8 @@ class UserRestServletV2(RestServlet):
                     raise SynapseError(400, "Invalid password")
                 else:
                     new_password = body["password"]
-                    await self._set_password_handler.set_password(
-                        target_user, new_password, requester
+                    await self.set_password_handler.set_password(
+                        target_user.to_string(), new_password, requester
                     )
 
             if "deactivated" in body:
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 7352d609e6..8f09f51c61 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -435,6 +435,19 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["is_guest"])
         self.assertEqual(0, channel.json_body["deactivated"])
 
+        # Change password
+        body = json.dumps({"password": "hahaha"})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
         # Modify user
         body = json.dumps({"displayname": "foobar", "deactivated": True})
 
-- 
cgit 1.4.1


From 0f6e525be309b65e07066c071b2f55ebbaac6862 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 20 Jan 2020 17:34:13 +0000
Subject: Fixup synapse.api to pass mypy (#6733)

---
 changelog.d/6733.misc       | 1 +
 mypy.ini                    | 3 +++
 synapse/api/filtering.py    | 4 +++-
 synapse/api/ratelimiting.py | 7 +++++--
 synapse/event_auth.py       | 2 +-
 tox.ini                     | 1 +
 6 files changed, 14 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6733.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6733.misc b/changelog.d/6733.misc
new file mode 100644
index 0000000000..bf048c0be2
--- /dev/null
+++ b/changelog.d/6733.misc
@@ -0,0 +1 @@
+Fixup synapse.api to pass mypy.
diff --git a/mypy.ini b/mypy.ini
index a66434b76b..e3c515e2c4 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -7,6 +7,9 @@ show_error_codes = True
 show_traceback = True
 mypy_path = stubs
 
+[mypy-pymacaroons.*]
+ignore_missing_imports = True
+
 [mypy-zope]
 ignore_missing_imports = True
 
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 6eab1f13f0..8b64d0a285 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -15,6 +15,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import List
+
 from six import text_type
 
 import jsonschema
@@ -293,7 +295,7 @@ class Filter(object):
             room_id = None
             ev_type = "m.presence"
             contains_url = False
-            labels = []
+            labels = []  # type: List[str]
         else:
             sender = event.get("sender", None)
             if not sender:
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 172841f595..7a049b3af7 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import collections
+from collections import OrderedDict
+from typing import Any, Optional, Tuple
 
 from synapse.api.errors import LimitExceededError
 
@@ -23,7 +24,9 @@ class Ratelimiter(object):
     """
 
     def __init__(self):
-        self.message_counts = collections.OrderedDict()
+        self.message_counts = (
+            OrderedDict()
+        )  # type: OrderedDict[Any, Tuple[float, int, Optional[float]]]
 
     def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
         """Can the entity (e.g. user or IP address) perform the action?
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 1033e5e121..e3a1ba47a0 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -634,7 +634,7 @@ def get_public_keys(invite_event):
     return public_keys
 
 
-def auth_types_for_event(event) -> Set[Tuple[str]]:
+def auth_types_for_event(event) -> Set[Tuple[str, str]]:
     """Given an event, return a list of (EventType, StateKey) that may be
     needed to auth the event. The returned list may be a superset of what
     would actually be required depending on the full state of the room.
diff --git a/tox.ini b/tox.ini
index b73a993053..edf4654177 100644
--- a/tox.ini
+++ b/tox.ini
@@ -177,6 +177,7 @@ env =
     MYPYPATH = stubs/
 extras = all
 commands = mypy \
+            synapse/api \
             synapse/config/ \
             synapse/handlers/ui_auth \
             synapse/logging/ \
-- 
cgit 1.4.1


From 74b74462f1c8b2db9b0995cbf64d879cbfce0dc4 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 20 Jan 2020 17:38:09 +0000
Subject: Fix `/events/:event_id` deprecated API. (#6731)

---
 changelog.d/6731.bugfix             |  1 +
 synapse/rest/client/v1/events.py    |  2 +-
 tests/rest/client/v1/test_events.py | 27 +++++++++++++++++++++++++++
 tests/unittest.py                   |  2 +-
 4 files changed, 30 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6731.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6731.bugfix b/changelog.d/6731.bugfix
new file mode 100644
index 0000000000..21f6e15cbd
--- /dev/null
+++ b/changelog.d/6731.bugfix
@@ -0,0 +1 @@
+Fix `/events/:event_id` deprecated API.
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 4beb617733..25effd0261 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -70,7 +70,6 @@ class EventStreamRestServlet(RestServlet):
         return 200, {}
 
 
-# TODO: Unit test gets, with and without auth, with different kinds of events.
 class EventRestServlet(RestServlet):
     PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True)
 
@@ -78,6 +77,7 @@ class EventRestServlet(RestServlet):
         super(EventRestServlet, self).__init__()
         self.clock = hs.get_clock()
         self.event_handler = hs.get_event_handler()
+        self.auth = hs.get_auth()
         self._event_serializer = hs.get_event_client_serializer()
 
     async def on_GET(self, request, event_id):
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
index f340b7e851..ffb2de1505 100644
--- a/tests/rest/client/v1/test_events.py
+++ b/tests/rest/client/v1/test_events.py
@@ -134,3 +134,30 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
 
         # someone else set topic, expect 6 (join,send,topic,join,send,topic)
         pass
+
+
+class GetEventsTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        events.register_servlets,
+        room.register_servlets,
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+    ]
+
+    def prepare(self, hs, reactor, clock):
+
+        # register an account
+        self.user_id = self.register_user("sid1", "pass")
+        self.token = self.login(self.user_id, "pass")
+
+        self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+    def test_get_event_via_events(self):
+        resp = self.helper.send(self.room_id, tok=self.token)
+        event_id = resp["event_id"]
+
+        request, channel = self.make_request(
+            "GET", "/events/" + event_id, access_token=self.token,
+        )
+        self.render(request)
+        self.assertEquals(channel.code, 200, msg=channel.result)
diff --git a/tests/unittest.py b/tests/unittest.py
index ddcd4becfe..b56e249386 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -463,7 +463,7 @@ class HomeserverTestCase(TestCase):
         # Create the user
         request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
         self.render(request)
-        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.code, 200, msg=channel.result)
         nonce = channel.json_body["nonce"]
 
         want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
-- 
cgit 1.4.1


From b0a66ab83ce4d67e145a1129b1ebd8fc53c24408 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 20 Jan 2020 17:38:21 +0000
Subject: Fixup synapse.rest to pass mypy (#6732)

---
 changelog.d/6732.misc                         |  1 +
 mypy.ini                                      |  9 +++++++++
 synapse/rest/admin/users.py                   | 23 ++++++++++++-----------
 synapse/rest/client/v1/login.py               |  2 +-
 synapse/rest/client/v1/room.py                | 18 ++++++++++++------
 synapse/rest/client/v2_alpha/register.py      |  3 ++-
 synapse/rest/client/v2_alpha/sendtodevice.py  |  3 ++-
 synapse/rest/key/v2/remote_key_resource.py    |  5 +++--
 synapse/rest/media/v1/media_repository.py     |  3 ++-
 synapse/rest/media/v1/preview_url_resource.py |  7 ++++---
 synapse/rest/media/v1/thumbnail_resource.py   | 14 +++++++-------
 tox.ini                                       |  3 +--
 12 files changed, 56 insertions(+), 35 deletions(-)
 create mode 100644 changelog.d/6732.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6732.misc b/changelog.d/6732.misc
new file mode 100644
index 0000000000..8edd767405
--- /dev/null
+++ b/changelog.d/6732.misc
@@ -0,0 +1 @@
+Fixup `synapse.rest` to pass mypy.
diff --git a/mypy.ini b/mypy.ini
index e3c515e2c4..69be2f67ad 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -66,3 +66,12 @@ ignore_missing_imports = True
 
 [mypy-sentry_sdk]
 ignore_missing_imports = True
+
+[mypy-PIL.*]
+ignore_missing_imports = True
+
+[mypy-lxml]
+ignore_missing_imports = True
+
+[mypy-jwt.*]
+ignore_missing_imports = True
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index c178c960c5..52d27fa3e3 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -338,21 +338,22 @@ class UserRegisterServlet(RestServlet):
 
         got_mac = body["mac"]
 
-        want_mac = hmac.new(
+        want_mac_builder = hmac.new(
             key=self.hs.config.registration_shared_secret.encode(),
             digestmod=hashlib.sha1,
         )
-        want_mac.update(nonce.encode("utf8"))
-        want_mac.update(b"\x00")
-        want_mac.update(username)
-        want_mac.update(b"\x00")
-        want_mac.update(password)
-        want_mac.update(b"\x00")
-        want_mac.update(b"admin" if admin else b"notadmin")
+        want_mac_builder.update(nonce.encode("utf8"))
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(username)
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(password)
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(b"admin" if admin else b"notadmin")
         if user_type:
-            want_mac.update(b"\x00")
-            want_mac.update(user_type.encode("utf8"))
-        want_mac = want_mac.hexdigest()
+            want_mac_builder.update(b"\x00")
+            want_mac_builder.update(user_type.encode("utf8"))
+
+        want_mac = want_mac_builder.hexdigest()
 
         if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
             raise SynapseError(403, "HMAC incorrect")
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index ff9c978fe7..1294e080dc 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -514,7 +514,7 @@ class CasTicketServlet(RestServlet):
             if user is None:
                 raise Exception("CAS response does not contain user")
         except Exception:
-            logger.error("Error parsing CAS response", exc_info=1)
+            logger.exception("Error parsing CAS response")
             raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
         if not success:
             raise LoginError(
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 711d4ad304..5aef8238b8 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -16,6 +16,7 @@
 
 """ This module contains REST servlets to do with rooms: /rooms/ """
 import logging
+from typing import List, Optional
 
 from six.moves.urllib import parse as urlparse
 
@@ -207,7 +208,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
                 requester, event_dict, txn_id=txn_id
             )
 
-        ret = {}
+        ret = {}  # type: dict
         if event:
             set_tag("event_id", event.event_id)
             ret = {"event_id": event.event_id}
@@ -285,7 +286,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
             try:
                 remote_room_hosts = [
                     x.decode("ascii") for x in request.args[b"server_name"]
-                ]
+                ]  # type: Optional[List[str]]
             except Exception:
                 remote_room_hosts = None
         elif RoomAlias.is_valid(room_identifier):
@@ -375,7 +376,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
         server = parse_string(request, "server", default=None)
         content = parse_json_object_from_request(request)
 
-        limit = int(content.get("limit", 100))
+        limit = int(content.get("limit", 100))  # type: Optional[int]
         since_token = content.get("since", None)
         search_filter = content.get("filter", None)
 
@@ -504,11 +505,16 @@ class RoomMessageListRestServlet(RestServlet):
         filter_bytes = parse_string(request, b"filter", encoding=None)
         if filter_bytes:
             filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
-            event_filter = Filter(json.loads(filter_json))
-            if event_filter.filter_json.get("event_format", "client") == "federation":
+            event_filter = Filter(json.loads(filter_json))  # type: Optional[Filter]
+            if (
+                event_filter
+                and event_filter.filter_json.get("event_format", "client")
+                == "federation"
+            ):
                 as_client_event = False
         else:
             event_filter = None
+
         msgs = await self.pagination_handler.get_messages(
             room_id=room_id,
             requester=requester,
@@ -611,7 +617,7 @@ class RoomEventContextServlet(RestServlet):
         filter_bytes = parse_string(request, "filter")
         if filter_bytes:
             filter_json = urlparse.unquote(filter_bytes)
-            event_filter = Filter(json.loads(filter_json))
+            event_filter = Filter(json.loads(filter_json))  # type: Optional[Filter]
         else:
             event_filter = None
 
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 66de16a1fa..1bda9aec7e 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -21,6 +21,7 @@ from typing import List, Union
 from six import string_types
 
 import synapse
+import synapse.api.auth
 import synapse.types
 from synapse.api.constants import LoginType
 from synapse.api.errors import (
@@ -405,7 +406,7 @@ class RegisterRestServlet(RestServlet):
             return ret
         elif kind != b"user":
             raise UnrecognizedRequestError(
-                "Do not understand membership kind: %s" % (kind,)
+                "Do not understand membership kind: %s" % (kind.decode("utf8"),)
             )
 
         # we do basic sanity checks here because the auth layer will store these
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 501b52fb6c..db829f3098 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Tuple
 
 from synapse.http import servlet
 from synapse.http.servlet import parse_json_object_from_request
@@ -60,7 +61,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
             sender_user_id, message_type, content["messages"]
         )
 
-        response = (200, {})
+        response = (200, {})  # type: Tuple[int, dict]
         return response
 
 
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index e7fc3f0431..9d6813a047 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, Set
 
 from canonicaljson import encode_canonical_json, json
 from signedjson.sign import sign_json
@@ -103,7 +104,7 @@ class RemoteKey(DirectServeResource):
     async def _async_render_GET(self, request):
         if len(request.postpath) == 1:
             (server,) = request.postpath
-            query = {server.decode("ascii"): {}}
+            query = {server.decode("ascii"): {}}  # type: dict
         elif len(request.postpath) == 2:
             server, key_id = request.postpath
             minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts")
@@ -148,7 +149,7 @@ class RemoteKey(DirectServeResource):
 
         time_now_ms = self.clock.time_msec()
 
-        cache_misses = dict()
+        cache_misses = dict()  # type: Dict[str, Set[str]]
         for (server_name, key_id, from_server), results in cached.items():
             results = [(result["ts_added_ms"], result) for result in results]
 
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index bd9186fe50..490b1b45a8 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -18,6 +18,7 @@ import errno
 import logging
 import os
 import shutil
+from typing import Dict, Tuple
 
 from six import iteritems
 
@@ -605,7 +606,7 @@ class MediaRepository(object):
 
         # We deduplicate the thumbnail sizes by ignoring the cropped versions if
         # they have the same dimensions of a scaled one.
-        thumbnails = {}
+        thumbnails = {}  # type: Dict[Tuple[int, int, str], str]
         for r_width, r_height, r_method, r_type in requirements:
             if r_method == "crop":
                 thumbnails.setdefault((r_width, r_height, r_type), r_method)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 6b978be876..07e395cfd1 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -23,6 +23,7 @@ import re
 import shutil
 import sys
 import traceback
+from typing import Dict, Optional
 
 import six
 from six import string_types
@@ -237,8 +238,8 @@ class PreviewUrlResource(DirectServeResource):
             # If we don't find a match, we'll look at the HTTP Content-Type, and
             # if that doesn't exist, we'll fall back to UTF-8.
             if not encoding:
-                match = _content_type_match.match(media_info["media_type"])
-                encoding = match.group(1) if match else "utf-8"
+                content_match = _content_type_match.match(media_info["media_type"])
+                encoding = content_match.group(1) if content_match else "utf-8"
 
             og = decode_and_calc_og(body, media_info["uri"], encoding)
 
@@ -518,7 +519,7 @@ def _calc_og(tree, media_uri):
     # "og:video:height" : "720",
     # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",
 
-    og = {}
+    og = {}  # type: Dict[str, Optional[str]]
     for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
         if "content" in tag.attrib:
             # if we've got more than 50 tags, someone is taking the piss
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index 931ce79be8..eee93b4313 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -296,8 +296,8 @@ class ThumbnailResource(DirectServeResource):
         d_h = desired_height
 
         if desired_method.lower() == "crop":
-            info_list = []
-            info_list2 = []
+            crop_info_list = []
+            crop_info_list2 = []
             for info in thumbnail_infos:
                 t_w = info["thumbnail_width"]
                 t_h = info["thumbnail_height"]
@@ -309,7 +309,7 @@ class ThumbnailResource(DirectServeResource):
                     type_quality = desired_type != info["thumbnail_type"]
                     length_quality = info["thumbnail_length"]
                     if t_w >= d_w or t_h >= d_h:
-                        info_list.append(
+                        crop_info_list.append(
                             (
                                 aspect_quality,
                                 min_quality,
@@ -320,7 +320,7 @@ class ThumbnailResource(DirectServeResource):
                             )
                         )
                     else:
-                        info_list2.append(
+                        crop_info_list2.append(
                             (
                                 aspect_quality,
                                 min_quality,
@@ -330,10 +330,10 @@ class ThumbnailResource(DirectServeResource):
                                 info,
                             )
                         )
-            if info_list:
-                return min(info_list)[-1]
+            if crop_info_list:
+                return min(crop_info_list2)[-1]
             else:
-                return min(info_list2)[-1]
+                return min(crop_info_list2)[-1]
         else:
             info_list = []
             info_list2 = []
diff --git a/tox.ini b/tox.ini
index edf4654177..1d946a02ba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -183,8 +183,7 @@ commands = mypy \
             synapse/logging/ \
             synapse/module_api \
             synapse/replication \
-            synapse/rest/consent \
-            synapse/rest/saml2 \
+            synapse/rest \
             synapse/spam_checker_api \
             synapse/storage/engines \
             synapse/streams
-- 
cgit 1.4.1


From 0e68760078c0aac57bfaeb681d534231e191315a Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 20 Jan 2020 18:07:20 +0000
Subject: Add a DeltaState to track changes to be made to current state (#6716)

---
 changelog.d/6716.misc                      |   1 +
 synapse/storage/data_stores/main/events.py |  87 ++++++++++----------
 synapse/storage/persist_events.py          | 123 ++++++++++++++++-------------
 3 files changed, 112 insertions(+), 99 deletions(-)
 create mode 100644 changelog.d/6716.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6716.misc b/changelog.d/6716.misc
new file mode 100644
index 0000000000..319aaa4acb
--- /dev/null
+++ b/changelog.d/6716.misc
@@ -0,0 +1 @@
+Add a `DeltaState` to track changes to be made to current state during event persistence.
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index bb69c20448..596daf8909 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -19,6 +19,7 @@ import itertools
 import logging
 from collections import Counter as c_counter, OrderedDict, namedtuple
 from functools import wraps
+from typing import Dict, List, Tuple
 
 from six import iteritems, text_type
 from six.moves import range
@@ -41,8 +42,9 @@ from synapse.storage._base import make_in_list_sql_clause
 from synapse.storage.data_stores.main.event_federation import EventFederationStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.state import StateGroupWorkerStore
-from synapse.storage.database import Database
-from synapse.types import RoomStreamToken, get_domain_from_id
+from synapse.storage.database import Database, LoggingTransaction
+from synapse.storage.persist_events import DeltaState
+from synapse.types import RoomStreamToken, StateMap, get_domain_from_id
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.util.frozenutils import frozendict_json_encoder
 from synapse.util.iterutils import batch_iter
@@ -148,30 +150,26 @@ class EventsStore(
     @defer.inlineCallbacks
     def _persist_events_and_state_updates(
         self,
-        events_and_contexts,
-        current_state_for_room,
-        state_delta_for_room,
-        new_forward_extremeties,
-        backfilled=False,
-        delete_existing=False,
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
+        current_state_for_room: Dict[str, StateMap[str]],
+        state_delta_for_room: Dict[str, DeltaState],
+        new_forward_extremeties: Dict[str, List[str]],
+        backfilled: bool = False,
+        delete_existing: bool = False,
     ):
         """Persist a set of events alongside updates to the current state and
         forward extremities tables.
 
         Args:
-            events_and_contexts (list[(EventBase, EventContext)]):
-            current_state_for_room (dict[str, dict]): Map from room_id to the
-                current state of the room based on forward extremities
-            state_delta_for_room (dict[str, tuple]): Map from room_id to tuple
-                of `(to_delete, to_insert)` where to_delete is a list
-                of type/state keys to remove from current state, and to_insert
-                is a map (type,key)->event_id giving the state delta in each
-                room.
-            new_forward_extremities (dict[str, list[str]]): Map from room_id
-                to list of event IDs that are the new forward extremities of
-                the room.
-            backfilled (bool)
-            delete_existing (bool):
+            events_and_contexts:
+            current_state_for_room: Map from room_id to the current state of
+                the room based on forward extremities
+            state_delta_for_room: Map from room_id to the delta to apply to
+                room state
+            new_forward_extremities: Map from room_id to list of event IDs
+                that are the new forward extremities of the room.
+            backfilled
+            delete_existing
 
         Returns:
             Deferred: resolves when the events have been persisted
@@ -352,12 +350,12 @@ class EventsStore(
     @log_function
     def _persist_events_txn(
         self,
-        txn,
-        events_and_contexts,
-        backfilled,
-        delete_existing=False,
-        state_delta_for_room={},
-        new_forward_extremeties={},
+        txn: LoggingTransaction,
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
+        backfilled: bool,
+        delete_existing: bool = False,
+        state_delta_for_room: Dict[str, DeltaState] = {},
+        new_forward_extremeties: Dict[str, List[str]] = {},
     ):
         """Insert some number of room events into the necessary database tables.
 
@@ -366,21 +364,16 @@ class EventsStore(
         whether the event was rejected.
 
         Args:
-            txn (twisted.enterprise.adbapi.Connection): db connection
-            events_and_contexts (list[(EventBase, EventContext)]):
-                events to persist
-            backfilled (bool): True if the events were backfilled
-            delete_existing (bool): True to purge existing table rows for the
-                events from the database. This is useful when retrying due to
+            txn
+            events_and_contexts: events to persist
+            backfilled: True if the events were backfilled
+            delete_existing True to purge existing table rows for the events
+                from the database. This is useful when retrying due to
                 IntegrityError.
-            state_delta_for_room (dict[str, (list, dict)]):
-                The current-state delta for each room. For each room, a tuple
-                (to_delete, to_insert), being a list of type/state keys to be
-                removed from the current state, and a state set to be added to
-                the current state.
-            new_forward_extremeties (dict[str, list[str]]):
-                The new forward extremities for each room. For each room, a
-                list of the event ids which are the forward extremities.
+            state_delta_for_room: The current-state delta for each room.
+            new_forward_extremetie: The new forward extremities for each room.
+                For each room, a list of the event ids which are the forward
+                extremities.
 
         """
         all_events_and_contexts = events_and_contexts
@@ -465,9 +458,15 @@ class EventsStore(
         # room_memberships, where applicable.
         self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
 
-    def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
-        for room_id, current_state_tuple in iteritems(state_delta_by_room):
-            to_delete, to_insert = current_state_tuple
+    def _update_current_state_txn(
+        self,
+        txn: LoggingTransaction,
+        state_delta_by_room: Dict[str, DeltaState],
+        stream_id: int,
+    ):
+        for room_id, delta_state in iteritems(state_delta_by_room):
+            to_delete = delta_state.to_delete
+            to_insert = delta_state.to_insert
 
             # First we add entries to the current_state_delta_stream. We
             # do this before updating the current_state_events table so
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 1ed44925fc..368c457321 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -17,19 +17,24 @@
 
 import logging
 from collections import deque, namedtuple
+from typing import Iterable, List, Optional, Tuple
 
 from six import iteritems
 from six.moves import range
 
+import attr
 from prometheus_client import Counter, Histogram
 
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
+from synapse.events import FrozenEvent
+from synapse.events.snapshot import EventContext
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.state import StateResolutionStore
 from synapse.storage.data_stores import DataStores
+from synapse.types import StateMap
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.metrics import Measure
 
@@ -67,6 +72,19 @@ stale_forward_extremities_counter = Histogram(
 )
 
 
+@attr.s(slots=True, frozen=True)
+class DeltaState:
+    """Deltas to use to update the `current_state_events` table.
+
+    Attributes:
+        to_delete: List of type/state_keys to delete from current state
+        to_insert: Map of state to upsert into current state
+    """
+
+    to_delete = attr.ib(type=List[Tuple[str, str]])
+    to_insert = attr.ib(type=StateMap[str])
+
+
 class _EventPeristenceQueue(object):
     """Queues up events so that they can be persisted in bulk with only one
     concurrent transaction per room.
@@ -138,13 +156,12 @@ class _EventPeristenceQueue(object):
 
         self._currently_persisting_rooms.add(room_id)
 
-        @defer.inlineCallbacks
-        def handle_queue_loop():
+        async def handle_queue_loop():
             try:
                 queue = self._get_drainining_queue(room_id)
                 for item in queue:
                     try:
-                        ret = yield per_item_callback(item)
+                        ret = await per_item_callback(item)
                     except Exception:
                         with PreserveLoggingContext():
                             item.deferred.errback()
@@ -191,12 +208,16 @@ class EventsPersistenceStorage(object):
         self._state_resolution_handler = hs.get_state_resolution_handler()
 
     @defer.inlineCallbacks
-    def persist_events(self, events_and_contexts, backfilled=False):
+    def persist_events(
+        self,
+        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        backfilled: bool = False,
+    ):
         """
         Write events to the database
         Args:
             events_and_contexts: list of tuples of (event, context)
-            backfilled (bool): Whether the results are retrieved from federation
+            backfilled: Whether the results are retrieved from federation
                 via backfill or not. Used to determine if they're "new" events
                 which might update the current state etc.
 
@@ -226,16 +247,12 @@ class EventsPersistenceStorage(object):
         return max_persisted_id
 
     @defer.inlineCallbacks
-    def persist_event(self, event, context, backfilled=False):
+    def persist_event(
+        self, event: FrozenEvent, context: EventContext, backfilled: bool = False
+    ):
         """
-
-        Args:
-            event (EventBase):
-            context (EventContext):
-            backfilled (bool):
-
         Returns:
-            Deferred: resolves to (int, int): the stream ordering of ``event``,
+            Deferred[Tuple[int, int]]: the stream ordering of ``event``,
             and the stream ordering of the latest persisted event
         """
         deferred = self._event_persist_queue.add_to_queue(
@@ -249,28 +266,22 @@ class EventsPersistenceStorage(object):
         max_persisted_id = yield self.main_store.get_current_events_token()
         return (event.internal_metadata.stream_ordering, max_persisted_id)
 
-    def _maybe_start_persisting(self, room_id):
-        @defer.inlineCallbacks
-        def persisting_queue(item):
+    def _maybe_start_persisting(self, room_id: str):
+        async def persisting_queue(item):
             with Measure(self._clock, "persist_events"):
-                yield self._persist_events(
+                await self._persist_events(
                     item.events_and_contexts, backfilled=item.backfilled
                 )
 
         self._event_persist_queue.handle_queue(room_id, persisting_queue)
 
-    @defer.inlineCallbacks
-    def _persist_events(self, events_and_contexts, backfilled=False):
+    async def _persist_events(
+        self,
+        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        backfilled: bool = False,
+    ):
         """Calculates the change to current state and forward extremities, and
         persists the given events and with those updates.
-
-        Args:
-            events_and_contexts (list[(EventBase, EventContext)]):
-            backfilled (bool):
-            delete_existing (bool):
-
-        Returns:
-            Deferred: resolves when the events have been persisted
         """
         if not events_and_contexts:
             return
@@ -315,10 +326,10 @@ class EventsPersistenceStorage(object):
                         )
 
                     for room_id, ev_ctx_rm in iteritems(events_by_room):
-                        latest_event_ids = yield self.main_store.get_latest_event_ids_in_room(
+                        latest_event_ids = await self.main_store.get_latest_event_ids_in_room(
                             room_id
                         )
-                        new_latest_event_ids = yield self._calculate_new_extremities(
+                        new_latest_event_ids = await self._calculate_new_extremities(
                             room_id, ev_ctx_rm, latest_event_ids
                         )
 
@@ -374,7 +385,7 @@ class EventsPersistenceStorage(object):
                         with Measure(
                             self._clock, "persist_events.get_new_state_after_events"
                         ):
-                            res = yield self._get_new_state_after_events(
+                            res = await self._get_new_state_after_events(
                                 room_id,
                                 ev_ctx_rm,
                                 latest_event_ids,
@@ -389,12 +400,12 @@ class EventsPersistenceStorage(object):
                             # If there is a delta we know that we've
                             # only added or replaced state, never
                             # removed keys entirely.
-                            state_delta_for_room[room_id] = ([], delta_ids)
+                            state_delta_for_room[room_id] = DeltaState([], delta_ids)
                         elif current_state is not None:
                             with Measure(
                                 self._clock, "persist_events.calculate_state_delta"
                             ):
-                                delta = yield self._calculate_state_delta(
+                                delta = await self._calculate_state_delta(
                                     room_id, current_state
                                 )
                             state_delta_for_room[room_id] = delta
@@ -404,7 +415,7 @@ class EventsPersistenceStorage(object):
                         if current_state is not None:
                             current_state_for_room[room_id] = current_state
 
-            yield self.main_store._persist_events_and_state_updates(
+            await self.main_store._persist_events_and_state_updates(
                 chunk,
                 current_state_for_room=current_state_for_room,
                 state_delta_for_room=state_delta_for_room,
@@ -412,8 +423,12 @@ class EventsPersistenceStorage(object):
                 backfilled=backfilled,
             )
 
-    @defer.inlineCallbacks
-    def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
+    async def _calculate_new_extremities(
+        self,
+        room_id: str,
+        event_contexts: List[Tuple[FrozenEvent, EventContext]],
+        latest_event_ids: List[str],
+    ):
         """Calculates the new forward extremities for a room given events to
         persist.
 
@@ -444,13 +459,13 @@ class EventsPersistenceStorage(object):
         )
 
         # Remove any events which are prev_events of any existing events.
-        existing_prevs = yield self.main_store._get_events_which_are_prevs(result)
+        existing_prevs = await self.main_store._get_events_which_are_prevs(result)
         result.difference_update(existing_prevs)
 
         # Finally handle the case where the new events have soft-failed prev
         # events. If they do we need to remove them and their prev events,
         # otherwise we end up with dangling extremities.
-        existing_prevs = yield self.main_store._get_prevs_before_rejected(
+        existing_prevs = await self.main_store._get_prevs_before_rejected(
             e_id for event in new_events for e_id in event.prev_event_ids()
         )
         result.difference_update(existing_prevs)
@@ -464,10 +479,13 @@ class EventsPersistenceStorage(object):
 
         return result
 
-    @defer.inlineCallbacks
-    def _get_new_state_after_events(
-        self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
-    ):
+    async def _get_new_state_after_events(
+        self,
+        room_id: str,
+        events_context: List[Tuple[FrozenEvent, EventContext]],
+        old_latest_event_ids: Iterable[str],
+        new_latest_event_ids: Iterable[str],
+    ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]]]:
         """Calculate the current state dict after adding some new events to
         a room
 
@@ -485,7 +503,6 @@ class EventsPersistenceStorage(object):
                 the new forward extremities for the room.
 
         Returns:
-            Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
             Returns a tuple of two state maps, the first being the full new current
             state and the second being the delta to the existing current state.
             If both are None then there has been no change.
@@ -547,7 +564,7 @@ class EventsPersistenceStorage(object):
 
         if missing_event_ids:
             # Now pull out the state groups for any missing events from DB
-            event_to_groups = yield self.main_store._get_state_group_for_events(
+            event_to_groups = await self.main_store._get_state_group_for_events(
                 missing_event_ids
             )
             event_id_to_state_group.update(event_to_groups)
@@ -588,7 +605,7 @@ class EventsPersistenceStorage(object):
         # their state IDs so we can resolve to a single state set.
         missing_state = new_state_groups - set(state_groups_map)
         if missing_state:
-            group_to_state = yield self.state_store._get_state_for_groups(missing_state)
+            group_to_state = await self.state_store._get_state_for_groups(missing_state)
             state_groups_map.update(group_to_state)
 
         if len(new_state_groups) == 1:
@@ -612,10 +629,10 @@ class EventsPersistenceStorage(object):
                 break
 
         if not room_version:
-            room_version = yield self.main_store.get_room_version(room_id)
+            room_version = await self.main_store.get_room_version(room_id)
 
         logger.debug("calling resolve_state_groups from preserve_events")
-        res = yield self._state_resolution_handler.resolve_state_groups(
+        res = await self._state_resolution_handler.resolve_state_groups(
             room_id,
             room_version,
             state_groups,
@@ -625,18 +642,14 @@ class EventsPersistenceStorage(object):
 
         return res.state, None
 
-    @defer.inlineCallbacks
-    def _calculate_state_delta(self, room_id, current_state):
+    async def _calculate_state_delta(
+        self, room_id: str, current_state: StateMap[str]
+    ) -> DeltaState:
         """Calculate the new state deltas for a room.
 
         Assumes that we are only persisting events for one room at a time.
-
-        Returns:
-            tuple[list, dict] (to_delete, to_insert): where to_delete are the
-            type/state_keys to remove from current_state_events and `to_insert`
-            are the updates to current_state_events.
         """
-        existing_state = yield self.main_store.get_current_state_ids(room_id)
+        existing_state = await self.main_store.get_current_state_ids(room_id)
 
         to_delete = [key for key in existing_state if key not in current_state]
 
@@ -646,4 +659,4 @@ class EventsPersistenceStorage(object):
             if ev_id != existing_state.get(key)
         }
 
-        return to_delete, to_insert
+        return DeltaState(to_delete=to_delete, to_insert=to_insert)
-- 
cgit 1.4.1


From 07124d028df6b33336dcc2ef807fd7866f42902a Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 21 Jan 2020 19:04:58 +0000
Subject: Port synapse_port_db to async/await (#6718)

* Raise an exception if there are pending background updates

So we return with a non-0 code

* Changelog

* Port synapse_port_db to async/await

* Port update_database to async/await

* Add version string to mocked homeservers

* Remove unused imports

* Convert overseen bits to async/await

* Fixup logging contexts

* Fix imports

* Add a way to print an error without raising an exception

* Incorporate review
---
 changelog.d/6718.bugfix     |   1 +
 scripts-dev/update_database |  20 +++--
 scripts/synapse_port_db     | 194 +++++++++++++++++++++++++-------------------
 3 files changed, 126 insertions(+), 89 deletions(-)
 create mode 100644 changelog.d/6718.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6718.bugfix b/changelog.d/6718.bugfix
new file mode 100644
index 0000000000..23b23e3ed8
--- /dev/null
+++ b/changelog.d/6718.bugfix
@@ -0,0 +1 @@
+Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case.
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
index 1d62f0403a..94aa8758b4 100755
--- a/scripts-dev/update_database
+++ b/scripts-dev/update_database
@@ -22,10 +22,12 @@ import yaml
 
 from twisted.internet import defer, reactor
 
+import synapse
 from synapse.config.homeserver import HomeServerConfig
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.server import HomeServer
 from synapse.storage import DataStore
+from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger("update_database")
 
@@ -38,6 +40,8 @@ class MockHomeserver(HomeServer):
             config.server_name, reactor=reactor, config=config, **kwargs
         )
 
+        self.version_string = "Synapse/"+get_version_string(synapse)
+
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(
@@ -81,15 +85,17 @@ if __name__ == "__main__":
     hs.setup()
     store = hs.get_datastore()
 
-    @defer.inlineCallbacks
-    def run_background_updates():
-        yield store.db.updates.run_background_updates(sleep=False)
+    async def run_background_updates():
+        await store.db.updates.run_background_updates(sleep=False)
         # Stop the reactor to exit the script once every background update is run.
         reactor.stop()
 
-    # Apply all background updates on the database.
-    reactor.callWhenRunning(
-        lambda: run_as_background_process("background_updates", run_background_updates)
-    )
+    def run():
+        # Apply all background updates on the database.
+        defer.ensureDeferred(
+            run_as_background_process("background_updates", run_background_updates)
+        )
+
+    reactor.callWhenRunning(run)
 
     reactor.run()
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 5e69104b97..e8b698f3ff 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -27,13 +27,16 @@ from six import string_types
 
 import yaml
 
-from twisted.enterprise import adbapi
 from twisted.internet import defer, reactor
 
+import synapse
 from synapse.config.database import DatabaseConnectionConfig
 from synapse.config.homeserver import HomeServerConfig
-from synapse.logging.context import PreserveLoggingContext
-from synapse.storage._base import LoggingTransaction
+from synapse.logging.context import (
+    LoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
+)
 from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
 from synapse.storage.data_stores.main.deviceinbox import (
     DeviceInboxBackgroundUpdateStore,
@@ -61,6 +64,7 @@ from synapse.storage.database import Database, make_conn
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 from synapse.util import Clock
+from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger("synapse_port_db")
 
@@ -125,6 +129,13 @@ APPEND_ONLY_TABLES = [
 ]
 
 
+# Error returned by the run function. Used at the top-level part of the script to
+# handle errors and return codes.
+end_error = None
+# The exec_info for the error, if any. If error is defined but not exec_info the script
+# will show only the error message without the stacktrace, if exec_info is defined but
+# not the error then the script will show nothing outside of what's printed in the run
+# function. If both are defined, the script will print both the error and the stacktrace.
 end_error_exec_info = None
 
 
@@ -177,6 +188,7 @@ class MockHomeserver:
         self.clock = Clock(reactor)
         self.config = config
         self.hostname = config.server_name
+        self.version_string = "Synapse/"+get_version_string(synapse)
 
     def get_clock(self):
         return self.clock
@@ -189,11 +201,10 @@ class Porter(object):
     def __init__(self, **kwargs):
         self.__dict__.update(kwargs)
 
-    @defer.inlineCallbacks
-    def setup_table(self, table):
+    async def setup_table(self, table):
         if table in APPEND_ONLY_TABLES:
             # It's safe to just carry on inserting.
-            row = yield self.postgres_store.db.simple_select_one(
+            row = await self.postgres_store.db.simple_select_one(
                 table="port_from_sqlite3",
                 keyvalues={"table_name": table},
                 retcols=("forward_rowid", "backward_rowid"),
@@ -207,10 +218,10 @@ class Porter(object):
                         forward_chunk,
                         already_ported,
                         total_to_port,
-                    ) = yield self._setup_sent_transactions()
+                    ) = await self._setup_sent_transactions()
                     backward_chunk = 0
                 else:
-                    yield self.postgres_store.db.simple_insert(
+                    await self.postgres_store.db.simple_insert(
                         table="port_from_sqlite3",
                         values={
                             "table_name": table,
@@ -227,7 +238,7 @@ class Porter(object):
                 backward_chunk = row["backward_rowid"]
 
             if total_to_port is None:
-                already_ported, total_to_port = yield self._get_total_count_to_port(
+                already_ported, total_to_port = await self._get_total_count_to_port(
                     table, forward_chunk, backward_chunk
                 )
         else:
@@ -238,9 +249,9 @@ class Porter(object):
                 )
                 txn.execute("TRUNCATE %s CASCADE" % (table,))
 
-            yield self.postgres_store.execute(delete_all)
+            await self.postgres_store.execute(delete_all)
 
-            yield self.postgres_store.db.simple_insert(
+            await self.postgres_store.db.simple_insert(
                 table="port_from_sqlite3",
                 values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
             )
@@ -248,16 +259,13 @@ class Porter(object):
             forward_chunk = 1
             backward_chunk = 0
 
-            already_ported, total_to_port = yield self._get_total_count_to_port(
+            already_ported, total_to_port = await self._get_total_count_to_port(
                 table, forward_chunk, backward_chunk
             )
 
-        defer.returnValue(
-            (table, already_ported, total_to_port, forward_chunk, backward_chunk)
-        )
+        return table, already_ported, total_to_port, forward_chunk, backward_chunk
 
-    @defer.inlineCallbacks
-    def handle_table(
+    async def handle_table(
         self, table, postgres_size, table_size, forward_chunk, backward_chunk
     ):
         logger.info(
@@ -275,7 +283,7 @@ class Porter(object):
         self.progress.add_table(table, postgres_size, table_size)
 
         if table == "event_search":
-            yield self.handle_search_table(
+            await self.handle_search_table(
                 postgres_size, table_size, forward_chunk, backward_chunk
             )
             return
@@ -294,7 +302,7 @@ class Porter(object):
         if table == "user_directory_stream_pos":
             # We need to make sure there is a single row, `(X, null), as that is
             # what synapse expects to be there.
-            yield self.postgres_store.db.simple_insert(
+            await self.postgres_store.db.simple_insert(
                 table=table, values={"stream_id": None}
             )
             self.progress.update(table, table_size)  # Mark table as done
@@ -335,7 +343,7 @@ class Porter(object):
 
                 return headers, forward_rows, backward_rows
 
-            headers, frows, brows = yield self.sqlite_store.db.runInteraction(
+            headers, frows, brows = await self.sqlite_store.db.runInteraction(
                 "select", r
             )
 
@@ -361,7 +369,7 @@ class Porter(object):
                         },
                     )
 
-                yield self.postgres_store.execute(insert)
+                await self.postgres_store.execute(insert)
 
                 postgres_size += len(rows)
 
@@ -369,8 +377,7 @@ class Porter(object):
             else:
                 return
 
-    @defer.inlineCallbacks
-    def handle_search_table(
+    async def handle_search_table(
         self, postgres_size, table_size, forward_chunk, backward_chunk
     ):
         select = (
@@ -390,7 +397,7 @@ class Porter(object):
 
                 return headers, rows
 
-            headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
+            headers, rows = await self.sqlite_store.db.runInteraction("select", r)
 
             if rows:
                 forward_chunk = rows[-1][0] + 1
@@ -438,7 +445,7 @@ class Porter(object):
                         },
                     )
 
-                yield self.postgres_store.execute(insert)
+                await self.postgres_store.execute(insert)
 
                 postgres_size += len(rows)
 
@@ -476,11 +483,10 @@ class Porter(object):
 
         return store
 
-    @defer.inlineCallbacks
-    def run_background_updates_on_postgres(self):
+    async def run_background_updates_on_postgres(self):
         # Manually apply all background updates on the PostgreSQL database.
         postgres_ready = (
-            yield self.postgres_store.db.updates.has_completed_background_updates()
+            await self.postgres_store.db.updates.has_completed_background_updates()
         )
 
         if not postgres_ready:
@@ -489,13 +495,20 @@ class Porter(object):
             self.progress.set_state("Running background updates on PostgreSQL")
 
         while not postgres_ready:
-            yield self.postgres_store.db.updates.do_next_background_update(100)
-            postgres_ready = yield (
+            await self.postgres_store.db.updates.do_next_background_update(100)
+            postgres_ready = await (
                 self.postgres_store.db.updates.has_completed_background_updates()
             )
 
-    @defer.inlineCallbacks
-    def run(self):
+    async def run(self):
+        """Ports the SQLite database to a PostgreSQL database.
+
+        When a fatal error is met, its message is assigned to the global "end_error"
+        variable. When this error comes with a stacktrace, its exec_info is assigned to
+        the global "end_error_exec_info" variable.
+        """
+        global end_error
+
         try:
             # we allow people to port away from outdated versions of sqlite.
             self.sqlite_store = self.build_db_store(
@@ -505,21 +518,21 @@ class Porter(object):
 
             # Check if all background updates are done, abort if not.
             updates_complete = (
-                yield self.sqlite_store.db.updates.has_completed_background_updates()
+                await self.sqlite_store.db.updates.has_completed_background_updates()
             )
             if not updates_complete:
-                sys.stderr.write(
+                end_error = (
                     "Pending background updates exist in the SQLite3 database."
                     " Please start Synapse again and wait until every update has finished"
                     " before running this script.\n"
                 )
-                defer.returnValue(None)
+                return
 
             self.postgres_store = self.build_db_store(
                 self.hs_config.get_single_database()
             )
 
-            yield self.run_background_updates_on_postgres()
+            await self.run_background_updates_on_postgres()
 
             self.progress.set_state("Creating port tables")
 
@@ -547,22 +560,22 @@ class Porter(object):
                 )
 
             try:
-                yield self.postgres_store.db.runInteraction("alter_table", alter_table)
+                await self.postgres_store.db.runInteraction("alter_table", alter_table)
             except Exception:
                 # On Error Resume Next
                 pass
 
-            yield self.postgres_store.db.runInteraction(
+            await self.postgres_store.db.runInteraction(
                 "create_port_table", create_port_table
             )
 
             # Step 2. Get tables.
             self.progress.set_state("Fetching tables")
-            sqlite_tables = yield self.sqlite_store.db.simple_select_onecol(
+            sqlite_tables = await self.sqlite_store.db.simple_select_onecol(
                 table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
             )
 
-            postgres_tables = yield self.postgres_store.db.simple_select_onecol(
+            postgres_tables = await self.postgres_store.db.simple_select_onecol(
                 table="information_schema.tables",
                 keyvalues={},
                 retcol="distinct table_name",
@@ -573,28 +586,34 @@ class Porter(object):
 
             # Step 3. Figure out what still needs copying
             self.progress.set_state("Checking on port progress")
-            setup_res = yield defer.gatherResults(
-                [
-                    self.setup_table(table)
-                    for table in tables
-                    if table not in ["schema_version", "applied_schema_deltas"]
-                    and not table.startswith("sqlite_")
-                ],
-                consumeErrors=True,
+            setup_res = await make_deferred_yieldable(
+                defer.gatherResults(
+                    [
+                        run_in_background(self.setup_table, table)
+                        for table in tables
+                        if table not in ["schema_version", "applied_schema_deltas"]
+                        and not table.startswith("sqlite_")
+                    ],
+                    consumeErrors=True,
+                )
             )
 
             # Step 4. Do the copying.
             self.progress.set_state("Copying to postgres")
-            yield defer.gatherResults(
-                [self.handle_table(*res) for res in setup_res], consumeErrors=True
+            await make_deferred_yieldable(
+                defer.gatherResults(
+                    [run_in_background(self.handle_table, *res) for res in setup_res],
+                    consumeErrors=True,
+                )
             )
 
             # Step 5. Do final post-processing
-            yield self._setup_state_group_id_seq()
+            await self._setup_state_group_id_seq()
 
             self.progress.done()
-        except Exception:
+        except Exception as e:
             global end_error_exec_info
+            end_error = e
             end_error_exec_info = sys.exc_info()
             logger.exception("")
         finally:
@@ -634,8 +653,7 @@ class Porter(object):
 
         return outrows
 
-    @defer.inlineCallbacks
-    def _setup_sent_transactions(self):
+    async def _setup_sent_transactions(self):
         # Only save things from the last day
         yesterday = int(time.time() * 1000) - 86400000
 
@@ -656,7 +674,7 @@ class Porter(object):
 
             return headers, [r for r in rows if r[ts_ind] < yesterday]
 
-        headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
+        headers, rows = await self.sqlite_store.db.runInteraction("select", r)
 
         rows = self._convert_rows("sent_transactions", headers, rows)
 
@@ -669,7 +687,7 @@ class Porter(object):
                     txn, "sent_transactions", headers[1:], rows
                 )
 
-            yield self.postgres_store.execute(insert)
+            await self.postgres_store.execute(insert)
         else:
             max_inserted_rowid = 0
 
@@ -686,10 +704,10 @@ class Porter(object):
             else:
                 return 1
 
-        next_chunk = yield self.sqlite_store.execute(get_start_id)
+        next_chunk = await self.sqlite_store.execute(get_start_id)
         next_chunk = max(max_inserted_rowid + 1, next_chunk)
 
-        yield self.postgres_store.db.simple_insert(
+        await self.postgres_store.db.simple_insert(
             table="port_from_sqlite3",
             values={
                 "table_name": "sent_transactions",
@@ -705,46 +723,49 @@ class Porter(object):
             (size,) = txn.fetchone()
             return int(size)
 
-        remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
+        remaining_count = await self.sqlite_store.execute(get_sent_table_size)
 
         total_count = remaining_count + inserted_rows
 
-        defer.returnValue((next_chunk, inserted_rows, total_count))
+        return next_chunk, inserted_rows, total_count
 
-    @defer.inlineCallbacks
-    def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
-        frows = yield self.sqlite_store.execute_sql(
+    async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
+        frows = await self.sqlite_store.execute_sql(
             "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
         )
 
-        brows = yield self.sqlite_store.execute_sql(
+        brows = await self.sqlite_store.execute_sql(
             "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
         )
 
-        defer.returnValue(frows[0][0] + brows[0][0])
+        return frows[0][0] + brows[0][0]
 
-    @defer.inlineCallbacks
-    def _get_already_ported_count(self, table):
-        rows = yield self.postgres_store.execute_sql(
+    async def _get_already_ported_count(self, table):
+        rows = await self.postgres_store.execute_sql(
             "SELECT count(*) FROM %s" % (table,)
         )
 
-        defer.returnValue(rows[0][0])
+        return rows[0][0]
 
-    @defer.inlineCallbacks
-    def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
-        remaining, done = yield defer.gatherResults(
-            [
-                self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
-                self._get_already_ported_count(table),
-            ],
-            consumeErrors=True,
+    async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
+        remaining, done = await make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(
+                        self._get_remaining_count_to_port,
+                        table,
+                        forward_chunk,
+                        backward_chunk,
+                    ),
+                    run_in_background(self._get_already_ported_count, table),
+                ],
+            )
         )
 
         remaining = int(remaining) if remaining else 0
         done = int(done) if done else 0
 
-        defer.returnValue((done, remaining + done))
+        return done, remaining + done
 
     def _setup_state_group_id_seq(self):
         def r(txn):
@@ -1010,7 +1031,12 @@ if __name__ == "__main__":
             hs_config=config,
         )
 
-        reactor.callWhenRunning(porter.run)
+        @defer.inlineCallbacks
+        def run():
+            with LoggingContext("synapse_port_db_run"):
+                yield defer.ensureDeferred(porter.run())
+
+        reactor.callWhenRunning(run)
 
         reactor.run()
 
@@ -1019,7 +1045,11 @@ if __name__ == "__main__":
     else:
         start()
 
-    if end_error_exec_info:
-        exc_type, exc_value, exc_traceback = end_error_exec_info
-        traceback.print_exception(exc_type, exc_value, exc_traceback)
+    if end_error:
+        if end_error_exec_info:
+            exc_type, exc_value, exc_traceback = end_error_exec_info
+            traceback.print_exception(exc_type, exc_value, exc_traceback)
+
+        sys.stderr.write(end_error)
+
         sys.exit(5)
-- 
cgit 1.4.1


From 837f62266b845cce9797fbe989a7816d4f1fadff Mon Sep 17 00:00:00 2001
From: Ivan Vilata-i-Balaguer 
Date: Wed, 22 Jan 2020 02:32:52 -0500
Subject: Avoid attribute error when `password_config` present but empty
 (#6753)

The old statement returned `None` for such a `password_config` (like the one
created on first run), thus retrieval of the `pepper` key failed with
`AttributeError`.

Fixes #5315

Signed-off-by: Ivan Vilata i Balaguer 
---
 changelog.d/6753.bugfix | 1 +
 scripts/hash_password   | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6753.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6753.bugfix b/changelog.d/6753.bugfix
new file mode 100644
index 0000000000..5dfde793e1
--- /dev/null
+++ b/changelog.d/6753.bugfix
@@ -0,0 +1 @@
+Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata.
diff --git a/scripts/hash_password b/scripts/hash_password
index a1eb0769da..a30767f758 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -52,7 +52,7 @@ if __name__ == "__main__":
     if "config" in args and args.config:
         config = yaml.safe_load(args.config)
         bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
-        password_config = config.get("password_config", {})
+        password_config = config.get("password_config", None) or {}
         password_pepper = password_config.get("pepper", password_pepper)
     password = args.password
 
-- 
cgit 1.4.1


From 2093f83ea045d8a3fc6daa0c793da9b17237dc1f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 22 Jan 2020 10:36:48 +0000
Subject: Remove unused CI docker compose files (#6754)

These now exist in the pipelines repo.
---
 .buildkite/docker-compose.py35.pg95.yaml | 22 ----------------------
 .buildkite/docker-compose.py37.pg11.yaml | 22 ----------------------
 .buildkite/docker-compose.py37.pg95.yaml | 22 ----------------------
 changelog.d/6754.misc                    |  1 +
 4 files changed, 1 insertion(+), 66 deletions(-)
 delete mode 100644 .buildkite/docker-compose.py35.pg95.yaml
 delete mode 100644 .buildkite/docker-compose.py37.pg11.yaml
 delete mode 100644 .buildkite/docker-compose.py37.pg95.yaml
 create mode 100644 changelog.d/6754.misc

(limited to 'changelog.d')

diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
deleted file mode 100644
index 43237b7775..0000000000
--- a/.buildkite/docker-compose.py35.pg95.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.5
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.5
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
deleted file mode 100644
index b767228147..0000000000
--- a/.buildkite/docker-compose.py37.pg11.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:11
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.7
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
deleted file mode 100644
index 02fcd28304..0000000000
--- a/.buildkite/docker-compose.py37.pg95.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.5
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.7
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/changelog.d/6754.misc b/changelog.d/6754.misc
new file mode 100644
index 0000000000..0a955e47e6
--- /dev/null
+++ b/changelog.d/6754.misc
@@ -0,0 +1 @@
+Remove unused CI docker compose files.
-- 
cgit 1.4.1


From 5d7a6ad2238981646b2ae7b4071d8715281d181a Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 22 Jan 2020 10:37:00 +0000
Subject: Allow streaming cache invalidate all to workers. (#6749)

---
 changelog.d/6749.misc                      |  1 +
 docs/tcp_replication.md                    |  5 +++++
 synapse/replication/slave/storage/_base.py |  7 ++++++-
 synapse/replication/tcp/streams/_base.py   | 26 +++++++++++++++++++++-----
 synapse/storage/_base.py                   | 18 +++++++++++++-----
 synapse/storage/data_stores/main/cache.py  | 27 +++++++++++++++++++++++----
 6 files changed, 69 insertions(+), 15 deletions(-)
 create mode 100644 changelog.d/6749.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6749.misc b/changelog.d/6749.misc
new file mode 100644
index 0000000000..9fa13cb1d4
--- /dev/null
+++ b/changelog.d/6749.misc
@@ -0,0 +1 @@
+Allow streaming cache 'invalidate all' to workers.
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index a0b1d563ff..e3a4634b14 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -254,6 +254,11 @@ and they key to invalidate. For example:
 
     > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
 
+Alternatively, an entire cache can be invalidated by sending down a `null`
+instead of the key. For example:
+
+    > RDATA caches 550953772 ["get_user_by_id", null, 1550574873252]
+
 However, there are times when a number of caches need to be invalidated
 at the same time with the same key. To reduce traffic we batch those
 invalidations into a single poke by defining a special cache name that
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 704282c800..f45cbd37a0 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -66,11 +66,16 @@ class BaseSlavedStore(SQLBaseStore):
                 self._cache_id_gen.advance(token)
             for row in rows:
                 if row.cache_func == CURRENT_STATE_CACHE_NAME:
+                    if row.keys is None:
+                        raise Exception(
+                            "Can't send an 'invalidate all' for current state cache"
+                        )
+
                     room_id = row.keys[0]
                     members_changed = set(row.keys[1:])
                     self._invalidate_state_caches(room_id, members_changed)
                 else:
-                    self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
+                    self._attempt_to_invalidate_cache(row.cache_func, row.keys)
 
     def _invalidate_cache_and_stream(self, txn, cache_func, keys):
         txn.call_after(cache_func.invalidate, keys)
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index e03e77199b..a8d568b14a 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -17,7 +17,9 @@
 import itertools
 import logging
 from collections import namedtuple
-from typing import Any
+from typing import Any, List, Optional
+
+import attr
 
 logger = logging.getLogger(__name__)
 
@@ -65,10 +67,24 @@ PushersStreamRow = namedtuple(
     "PushersStreamRow",
     ("user_id", "app_id", "pushkey", "deleted"),  # str  # str  # str  # bool
 )
-CachesStreamRow = namedtuple(
-    "CachesStreamRow",
-    ("cache_func", "keys", "invalidation_ts"),  # str  # list(str)  # int
-)
+
+
+@attr.s
+class CachesStreamRow:
+    """Stream to inform workers they should invalidate their cache.
+
+    Attributes:
+        cache_func: Name of the cached function.
+        keys: The entry in the cache to invalidate. If None then will
+            invalidate all.
+        invalidation_ts: Timestamp of when the invalidation took place.
+    """
+
+    cache_func = attr.ib(type=str)
+    keys = attr.ib(type=Optional[List[Any]])
+    invalidation_ts = attr.ib(type=int)
+
+
 PublicRoomsStreamRow = namedtuple(
     "PublicRoomsStreamRow",
     (
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 3bb9381663..da3b99f93d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -17,6 +17,7 @@
 import logging
 import random
 from abc import ABCMeta
+from typing import Any, Optional
 
 from six import PY2
 from six.moves import builtins
@@ -26,7 +27,7 @@ from canonicaljson import json
 from synapse.storage.database import LoggingTransaction  # noqa: F401
 from synapse.storage.database import make_in_list_sql_clause  # noqa: F401
 from synapse.storage.database import Database
-from synapse.types import get_domain_from_id
+from synapse.types import Collection, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -63,17 +64,24 @@ class SQLBaseStore(metaclass=ABCMeta):
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
         self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,))
 
-    def _attempt_to_invalidate_cache(self, cache_name, key):
+    def _attempt_to_invalidate_cache(
+        self, cache_name: str, key: Optional[Collection[Any]]
+    ):
         """Attempts to invalidate the cache of the given name, ignoring if the
         cache doesn't exist. Mainly used for invalidating caches on workers,
         where they may not have the cache.
 
         Args:
-            cache_name (str)
-            key (tuple)
+            cache_name
+            key: Entry to invalidate. If None then invalidates the entire
+                cache.
         """
+
         try:
-            getattr(self, cache_name).invalidate(key)
+            if key is None:
+                getattr(self, cache_name).invalidate_all()
+            else:
+                getattr(self, cache_name).invalidate(tuple(key))
         except AttributeError:
             # We probably haven't pulled in the cache in this worker,
             # which is fine.
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py
index bf91512daf..afa2b41c98 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/data_stores/main/cache.py
@@ -16,6 +16,7 @@
 
 import itertools
 import logging
+from typing import Any, Iterable, Optional
 
 from twisted.internet import defer
 
@@ -43,6 +44,14 @@ class CacheInvalidationStore(SQLBaseStore):
         txn.call_after(cache_func.invalidate, keys)
         self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
 
+    def _invalidate_all_cache_and_stream(self, txn, cache_func):
+        """Invalidates the entire cache and adds it to the cache stream so slaves
+        will know to invalidate their caches.
+        """
+
+        txn.call_after(cache_func.invalidate_all)
+        self._send_invalidation_to_replication(txn, cache_func.__name__, None)
+
     def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
         """Special case invalidation of caches based on current state.
 
@@ -73,17 +82,24 @@ class CacheInvalidationStore(SQLBaseStore):
                 txn, CURRENT_STATE_CACHE_NAME, [room_id]
             )
 
-    def _send_invalidation_to_replication(self, txn, cache_name, keys):
+    def _send_invalidation_to_replication(
+        self, txn, cache_name: str, keys: Optional[Iterable[Any]]
+    ):
         """Notifies replication that given cache has been invalidated.
 
         Note that this does *not* invalidate the cache locally.
 
         Args:
             txn
-            cache_name (str)
-            keys (iterable[str])
+            cache_name
+            keys: Entry to invalidate. If None will invalidate all.
         """
 
+        if cache_name == CURRENT_STATE_CACHE_NAME and keys is None:
+            raise Exception(
+                "Can't stream invalidate all with magic current state cache"
+            )
+
         if isinstance(self.database_engine, PostgresEngine):
             # get_next() returns a context manager which is designed to wrap
             # the transaction. However, we want to only get an ID when we want
@@ -95,13 +111,16 @@ class CacheInvalidationStore(SQLBaseStore):
             txn.call_after(ctx.__exit__, None, None, None)
             txn.call_after(self.hs.get_notifier().on_new_replication_data)
 
+            if keys is not None:
+                keys = list(keys)
+
             self.db.simple_insert_txn(
                 txn,
                 table="cache_invalidation_stream",
                 values={
                     "stream_id": stream_id,
                     "cache_func": cache_name,
-                    "keys": list(keys),
+                    "keys": keys,
                     "invalidation_ts": self.clock.time_msec(),
                 },
             )
-- 
cgit 1.4.1


From 5e52d8563bdc0ab6667f0ec2571f35791720a40a Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Wed, 22 Jan 2020 11:05:14 +0000
Subject: Allow monthly active user limiting support for worker mode, fixes
 #4639. (#6742)

---
 changelog.d/6742.bugfix                            |   1 +
 synapse/app/client_reader.py                       |   4 +
 synapse/app/event_creator.py                       |   4 +
 synapse/app/federation_reader.py                   |   4 +
 synapse/app/synchrotron.py                         |   4 +
 .../data_stores/main/monthly_active_users.py       | 165 +++++++++++----------
 6 files changed, 100 insertions(+), 82 deletions(-)
 create mode 100644 changelog.d/6742.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6742.bugfix b/changelog.d/6742.bugfix
new file mode 100644
index 0000000000..ca2687c8bb
--- /dev/null
+++ b/changelog.d/6742.bugfix
@@ -0,0 +1 @@
+Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639).
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 3edfe19567..ca96da6a4a 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -62,6 +62,9 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.rest.client.versions import VersionsRestServlet
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
@@ -85,6 +88,7 @@ class ClientReaderSlavedStore(
     SlavedTransactionStore,
     SlavedProfileStore,
     SlavedClientIpStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index d0ddbe38fc..58e5b354f6 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -56,6 +56,9 @@ from synapse.rest.client.v1.room import (
     RoomStateEventRestServlet,
 )
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
@@ -81,6 +84,7 @@ class EventCreatorSlavedStore(
     SlavedEventStore,
     SlavedRegistrationStore,
     RoomStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 311523e0ed..1f1cea1416 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -46,6 +46,9 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
@@ -66,6 +69,7 @@ class FederationReaderSlavedStore(
     RoomStore,
     DirectoryStore,
     SlavedTransactionStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 3218da07bd..8982c0676e 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -54,6 +54,9 @@ from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
 from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
 from synapse.rest.client.v2_alpha import sync
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.storage.data_stores.main.presence import UserPresenceState
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
@@ -77,6 +80,7 @@ class SynchrotronSlavedStore(
     SlavedEventStore,
     SlavedClientIpStore,
     RoomStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py
index 27158534cb..89a41542a3 100644
--- a/synapse/storage/data_stores/main/monthly_active_users.py
+++ b/synapse/storage/data_stores/main/monthly_active_users.py
@@ -27,12 +27,76 @@ logger = logging.getLogger(__name__)
 LAST_SEEN_GRANULARITY = 60 * 60 * 1000
 
 
-class MonthlyActiveUsersStore(SQLBaseStore):
+class MonthlyActiveUsersWorkerStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
-        super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs)
+        super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs)
         self._clock = hs.get_clock()
         self.hs = hs
+
+    @cached(num_args=0)
+    def get_monthly_active_count(self):
+        """Generates current count of monthly active users
+
+        Returns:
+            Defered[int]: Number of current monthly active users
+        """
+
+        def _count_users(txn):
+            sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
+
+            txn.execute(sql)
+            (count,) = txn.fetchone()
+            return count
+
+        return self.db.runInteraction("count_users", _count_users)
+
+    @defer.inlineCallbacks
+    def get_registered_reserved_users(self):
+        """Of the reserved threepids defined in config, which are associated
+        with registered users?
+
+        Returns:
+            Defered[list]: Real reserved users
+        """
+        users = []
+
+        for tp in self.hs.config.mau_limits_reserved_threepids[
+            : self.hs.config.max_mau_value
+        ]:
+            user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
+                tp["medium"], tp["address"]
+            )
+            if user_id:
+                users.append(user_id)
+
+        return users
+
+    @cached(num_args=1)
+    def user_last_seen_monthly_active(self, user_id):
+        """
+            Checks if a given user is part of the monthly active user group
+            Arguments:
+                user_id (str): user to add/update
+            Return:
+                Deferred[int] : timestamp since last seen, None if never seen
+
+        """
+
+        return self.db.simple_select_one_onecol(
+            table="monthly_active_users",
+            keyvalues={"user_id": user_id},
+            retcol="timestamp",
+            allow_none=True,
+            desc="user_last_seen_monthly_active",
+        )
+
+
+class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
+    def __init__(self, database: Database, db_conn, hs):
+        super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs)
+
         # Do not add more reserved users than the total allowable number
+        # cur = LoggingTransaction(
         self.db.new_transaction(
             db_conn,
             "initialise_mau_threepids",
@@ -146,57 +210,22 @@ class MonthlyActiveUsersStore(SQLBaseStore):
 
                     txn.execute(sql, query_args)
 
+            # It seems poor to invalidate the whole cache, Postgres supports
+            # 'Returning' which would allow me to invalidate only the
+            # specific users, but sqlite has no way to do this and instead
+            # I would need to SELECT and the DELETE which without locking
+            # is racy.
+            # Have resolved to invalidate the whole cache for now and do
+            # something about it if and when the perf becomes significant
+            self._invalidate_all_cache_and_stream(
+                txn, self.user_last_seen_monthly_active
+            )
+            self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+
         reserved_users = yield self.get_registered_reserved_users()
         yield self.db.runInteraction(
             "reap_monthly_active_users", _reap_users, reserved_users
         )
-        # It seems poor to invalidate the whole cache, Postgres supports
-        # 'Returning' which would allow me to invalidate only the
-        # specific users, but sqlite has no way to do this and instead
-        # I would need to SELECT and the DELETE which without locking
-        # is racy.
-        # Have resolved to invalidate the whole cache for now and do
-        # something about it if and when the perf becomes significant
-        self.user_last_seen_monthly_active.invalidate_all()
-        self.get_monthly_active_count.invalidate_all()
-
-    @cached(num_args=0)
-    def get_monthly_active_count(self):
-        """Generates current count of monthly active users
-
-        Returns:
-            Defered[int]: Number of current monthly active users
-        """
-
-        def _count_users(txn):
-            sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
-
-            txn.execute(sql)
-            (count,) = txn.fetchone()
-            return count
-
-        return self.db.runInteraction("count_users", _count_users)
-
-    @defer.inlineCallbacks
-    def get_registered_reserved_users(self):
-        """Of the reserved threepids defined in config, which are associated
-        with registered users?
-
-        Returns:
-            Defered[list]: Real reserved users
-        """
-        users = []
-
-        for tp in self.hs.config.mau_limits_reserved_threepids[
-            : self.hs.config.max_mau_value
-        ]:
-            user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
-                tp["medium"], tp["address"]
-            )
-            if user_id:
-                users.append(user_id)
-
-        return users
 
     @defer.inlineCallbacks
     def upsert_monthly_active_user(self, user_id):
@@ -222,23 +251,9 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id
         )
 
-        user_in_mau = self.user_last_seen_monthly_active.cache.get(
-            (user_id,), None, update_metrics=False
-        )
-        if user_in_mau is None:
-            self.get_monthly_active_count.invalidate(())
-
-        self.user_last_seen_monthly_active.invalidate((user_id,))
-
     def upsert_monthly_active_user_txn(self, txn, user_id):
         """Updates or inserts monthly active user member
 
-        Note that, after calling this method, it will generally be necessary
-        to invalidate the caches on user_last_seen_monthly_active and
-        get_monthly_active_count. We can't do that here, because we are running
-        in a database thread rather than the main thread, and we can't call
-        txn.call_after because txn may not be a LoggingTransaction.
-
         We consciously do not call is_support_txn from this method because it
         is not possible to cache the response. is_support_txn will be false in
         almost all cases, so it seems reasonable to call it only for
@@ -269,27 +284,13 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             values={"timestamp": int(self._clock.time_msec())},
         )
 
-        return is_insert
-
-    @cached(num_args=1)
-    def user_last_seen_monthly_active(self, user_id):
-        """
-            Checks if a given user is part of the monthly active user group
-            Arguments:
-                user_id (str): user to add/update
-            Return:
-                Deferred[int] : timestamp since last seen, None if never seen
-
-        """
-
-        return self.db.simple_select_one_onecol(
-            table="monthly_active_users",
-            keyvalues={"user_id": user_id},
-            retcol="timestamp",
-            allow_none=True,
-            desc="user_last_seen_monthly_active",
+        self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+        self._invalidate_cache_and_stream(
+            txn, self.user_last_seen_monthly_active, (user_id,)
         )
 
+        return is_insert
+
     @defer.inlineCallbacks
     def populate_monthly_active_users(self, user_id):
         """Checks on the state of monthly active user limits and optionally
-- 
cgit 1.4.1


From aa9b00fb2f9a7718d67fb11621a83035492ed9fb Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 22 Jan 2020 11:05:50 +0000
Subject: Fix and add test to deprecated quarantine media admin api (#6756)

---
 changelog.d/6756.feature       |  1 +
 synapse/rest/admin/media.py    |  2 +-
 tests/rest/admin/test_admin.py | 15 +++++++++++----
 3 files changed, 13 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6756.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6756.feature b/changelog.d/6756.feature
new file mode 100644
index 0000000000..6328c868f2
--- /dev/null
+++ b/changelog.d/6756.feature
@@ -0,0 +1 @@
+Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media.
\ No newline at end of file
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 3a445d6eed..ee75095c0e 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -36,7 +36,7 @@ class QuarantineMediaInRoom(RestServlet):
         historical_admin_path_patterns("/room/(?P[^/]+)/media/quarantine")
         +
         # This path kept around for legacy reasons
-        historical_admin_path_patterns("/quarantine_media/(?P![^/]+)")
+        historical_admin_path_patterns("/quarantine_media/(?P[^/]+)")
     )
 
     def __init__(self, hs):
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index f3b4a31e21..af4d604e50 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -516,7 +516,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             ),
         )
 
-    def test_quarantine_all_media_in_room(self):
+    def test_quarantine_all_media_in_room(self, override_url_template=None):
         self.register_user("room_admin", "pass", admin=True)
         admin_user_tok = self.login("room_admin", "pass")
 
@@ -555,9 +555,12 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         )
 
         # Quarantine all media in the room
-        url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote(
-            room_id
-        )
+        if override_url_template:
+            url = override_url_template % urllib.parse.quote(room_id)
+        else:
+            url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote(
+                room_id
+            )
         request, channel = self.make_request("POST", url, access_token=admin_user_tok,)
         self.render(request)
         self.pump(1.0)
@@ -611,6 +614,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             ),
         )
 
+    def test_quaraantine_all_media_in_room_deprecated_api_path(self):
+        # Perform the above test with the deprecated API path
+        self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s")
+
     def test_quarantine_all_media_by_user(self):
         self.register_user("user_admin", "pass", admin=True)
         admin_user_tok = self.login("user_admin", "pass")
-- 
cgit 1.4.1


From 6ae0c8db3335faa9b5f0e4407f7a4a3713c84062 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 22 Jan 2020 12:38:18 +0000
Subject: Lint + changelog

---
 changelog.d/6764.misc                     | 1 +
 tests/rest/media/v1/test_media_storage.py | 4 +---
 2 files changed, 2 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6764.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6764.misc b/changelog.d/6764.misc
new file mode 100644
index 0000000000..8edd767405
--- /dev/null
+++ b/changelog.d/6764.misc
@@ -0,0 +1 @@
+Fixup `synapse.rest` to pass mypy.
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index 6345cc7637..1809ceb839 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -162,9 +162,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
     def _req(self, content_disposition):
 
-        request, channel = self.make_request(
-            "GET", self.media_id, shorthand=False
-        )
+        request, channel = self.make_request("GET", self.media_id, shorthand=False)
         request.render(self.download_resource)
         self.pump()
 
-- 
cgit 1.4.1


From 90a28fb475a29daa9e7a9ee7204f6f76cc8af441 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 22 Jan 2020 13:36:43 +0000
Subject: Admin API to list, filter and sort rooms (#6720)

---
 changelog.d/6720.feature                 |   1 +
 docs/admin_api/rooms.md                  | 173 ++++++++++++++
 synapse/rest/admin/__init__.py           |   3 +-
 synapse/rest/admin/_base.py              |  15 ++
 synapse/rest/admin/rooms.py              |  82 +++++++
 synapse/rest/client/v2_alpha/_base.py    |   2 +-
 synapse/storage/data_stores/main/room.py | 125 +++++++++-
 tests/rest/admin/test_admin.py           | 393 ++++++++++++++++++++++++++++++-
 8 files changed, 787 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6720.feature
 create mode 100644 docs/admin_api/rooms.md

(limited to 'changelog.d')

diff --git a/changelog.d/6720.feature b/changelog.d/6720.feature
new file mode 100644
index 0000000000..dfc1b74d62
--- /dev/null
+++ b/changelog.d/6720.feature
@@ -0,0 +1 @@
+Add a new admin API to list and filter rooms on the server.
\ No newline at end of file
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
new file mode 100644
index 0000000000..082721ea95
--- /dev/null
+++ b/docs/admin_api/rooms.md
@@ -0,0 +1,173 @@
+# List Room API
+
+The List Room admin API allows server admins to get a list of rooms on their
+server. There are various parameters available that allow for filtering and
+sorting the returned list. This API supports pagination.
+
+## Parameters
+
+The following query parameters are available:
+
+* `from` - Offset in the returned list. Defaults to `0`.
+* `limit` - Maximum amount of rooms to return. Defaults to `100`.
+* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
+  - `alphabetical` - Rooms are ordered alphabetically by room name. This is the default.
+  - `size` - Rooms are ordered by the number of members. Largest to smallest.
+* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
+          this value to `b` will reverse the above sort order. Defaults to `f`.
+* `search_term` - Filter rooms by their room name. Search term can be contained in any
+                  part of the room name. Defaults to no filtering.
+
+The following fields are possible in the JSON response body:
+
+* `rooms` - An array of objects, each containing information about a room.
+  - Room objects contain the following fields:
+    - `room_id` - The ID of the room.
+    - `name` - The name of the room.
+    - `canonical_alias` - The canonical (main) alias address of the room.
+    - `joined_members` - How many users are currently in the room.
+* `offset` - The current pagination offset in rooms. This parameter should be
+             used instead of `next_token` for room offset as `next_token` is
+             not intended to be parsed.
+* `total_rooms` - The total number of rooms this query can return. Using this
+                  and `offset`, you have enough information to know the current
+                  progression through the list.
+* `next_batch` - If this field is present, we know that there are potentially
+                 more rooms on the server that did not all fit into this response.
+                 We can use `next_batch` to get the "next page" of results. To do
+                 so, simply repeat your request, setting the `from` parameter to
+                 the value of `next_batch`.
+* `prev_batch` - If this field is present, it is possible to paginate backwards.
+                 Use `prev_batch` for the `from` value in the next request to
+                 get the "previous page" of results.
+
+## Usage
+
+A standard request with no filtering:
+
+```
+GET /_synapse/admin/rooms
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+      "name": "Matrix HQ",
+      "canonical_alias": "#matrix:matrix.org",
+      "joined_members": 8326
+    },
+    ... (8 hidden items) ...
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 10
+}
+```
+
+Filtering by room name:
+
+```
+GET /_synapse/admin/rooms?search_term=TWIM
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 1
+}
+```
+
+Paginating through a list of rooms:
+
+```
+GET /_synapse/admin/rooms?order_by=size
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+      "name": "Matrix HQ",
+      "canonical_alias": "#matrix:matrix.org",
+      "joined_members": 8326
+    },
+    ... (98 hidden items) ...
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 150
+  "next_token": 100
+}
+```
+
+The presence of the `next_token` parameter tells us that there are more rooms
+than returned in this request, and we need to make another request to get them.
+To get the next batch of room results, we repeat our request, setting the `from`
+parameter to the value of `next_token`.
+
+```
+GET /_synapse/admin/rooms?order_by=size&from=100
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
+      "name": "Music Theory",
+      "canonical_alias": "#musictheory:matrix.org",
+      "joined_members": 127
+    },
+    ... (48 hidden items) ...
+    {
+      "room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
+      "name": "weechat-matrix",
+      "canonical_alias": "#weechat-matrix:termina.org.uk",
+      "joined_members": 137
+    }
+  ],
+  "offset": 100,
+  "prev_batch": 0,
+  "total_rooms": 150
+}
+```
+
+Once the `next_token` parameter is no longer present, we know we've reached the
+end of the list.
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 2932fe2123..42cc2b062a 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -29,7 +29,7 @@ from synapse.rest.admin._base import (
 from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
 from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
-from synapse.rest.admin.rooms import ShutdownRoomRestServlet
+from synapse.rest.admin.rooms import ListRoomRestServlet, ShutdownRoomRestServlet
 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.rest.admin.users import (
     AccountValidityRenewServlet,
@@ -188,6 +188,7 @@ def register_servlets(hs, http_server):
     Register all the admin servlets.
     """
     register_servlets_for_client_rest_resource(hs, http_server)
+    ListRoomRestServlet(hs).register(http_server)
     PurgeRoomServlet(hs).register(http_server)
     SendServerNoticeServlet(hs).register(http_server)
     VersionServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index afd0647205..459482eb6d 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -40,6 +40,21 @@ def historical_admin_path_patterns(path_regex):
     )
 
 
+def admin_patterns(path_regex: str):
+    """Returns the list of patterns for an admin endpoint
+
+    Args:
+        path_regex: The regex string to match. This should NOT have a ^
+            as this will be prefixed.
+
+    Returns:
+        A list of regex patterns.
+    """
+    admin_prefix = "^/_synapse/admin/v1"
+    patterns = [re.compile(admin_prefix + path_regex)]
+    return patterns
+
+
 async def assert_requester_is_admin(auth, request):
     """Verify that the requester is an admin user
 
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f7cc5e9be9..f9b8c0a4f0 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -15,15 +15,20 @@
 import logging
 
 from synapse.api.constants import Membership
+from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
+    parse_integer,
     parse_json_object_from_request,
+    parse_string,
 )
 from synapse.rest.admin._base import (
+    admin_patterns,
     assert_user_is_admin,
     historical_admin_path_patterns,
 )
+from synapse.storage.data_stores.main.room import RoomSortOrder
 from synapse.types import create_requester
 from synapse.util.async_helpers import maybe_awaitable
 
@@ -155,3 +160,80 @@ class ShutdownRoomRestServlet(RestServlet):
                 "new_room_id": new_room_id,
             },
         )
+
+
+class ListRoomRestServlet(RestServlet):
+    """
+    List all rooms that are known to the homeserver. Results are returned
+    in a dictionary containing room information. Supports pagination.
+    """
+
+    PATTERNS = admin_patterns("/rooms")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+        self.admin_handler = hs.get_handlers().admin_handler
+
+    async def on_GET(self, request):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        # Extract query parameters
+        start = parse_integer(request, "from", default=0)
+        limit = parse_integer(request, "limit", default=100)
+        order_by = parse_string(request, "order_by", default="alphabetical")
+        if order_by not in (
+            RoomSortOrder.ALPHABETICAL.value,
+            RoomSortOrder.SIZE.value,
+        ):
+            raise SynapseError(
+                400,
+                "Unknown value for order_by: %s" % (order_by,),
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        search_term = parse_string(request, "search_term")
+        if search_term == "":
+            raise SynapseError(
+                400,
+                "search_term cannot be an empty string",
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        direction = parse_string(request, "dir", default="f")
+        if direction not in ("f", "b"):
+            raise SynapseError(
+                400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM
+            )
+
+        reverse_order = True if direction == "b" else False
+
+        # Return list of rooms according to parameters
+        rooms, total_rooms = await self.store.get_rooms_paginate(
+            start, limit, order_by, reverse_order, search_term
+        )
+        response = {
+            # next_token should be opaque, so return a value the client can parse
+            "offset": start,
+            "rooms": rooms,
+            "total_rooms": total_rooms,
+        }
+
+        # Are there more rooms to paginate through after this?
+        if (start + limit) < total_rooms:
+            # There are. Calculate where the query should start from next time
+            # to get the next part of the list
+            response["next_batch"] = start + limit
+
+        # Is it possible to paginate backwards? Check if we currently have an
+        # offset
+        if start > 0:
+            if start > limit:
+                # Going back one iteration won't take us to the start.
+                # Calculate new offset
+                response["prev_batch"] = start - limit
+            else:
+                response["prev_batch"] = 0
+
+        return 200, response
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index 2a3f4dd58f..bc11b4dda4 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -32,7 +32,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
 
     Args:
         path_regex (str): The regex string to match. This should NOT have a ^
-        as this will be prefixed.
+            as this will be prefixed.
     Returns:
         SRE_Pattern
     """
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 49bab62be3..d968803ad2 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -18,7 +18,8 @@ import collections
 import logging
 import re
 from abc import abstractmethod
-from typing import List, Optional, Tuple
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
 
 from six import integer_types
 
@@ -46,6 +47,18 @@ RatelimitOverride = collections.namedtuple(
 )
 
 
+class RoomSortOrder(Enum):
+    """
+    Enum to define the sorting method used when returning rooms with get_rooms_paginate
+
+    ALPHABETICAL = sort rooms alphabetically by name
+    SIZE = sort rooms by membership size, highest to lowest
+    """
+
+    ALPHABETICAL = "alphabetical"
+    SIZE = "size"
+
+
 class RoomWorkerStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
         super(RoomWorkerStore, self).__init__(database, db_conn, hs)
@@ -281,6 +294,116 @@ class RoomWorkerStore(SQLBaseStore):
             desc="is_room_blocked",
         )
 
+    async def get_rooms_paginate(
+        self,
+        start: int,
+        limit: int,
+        order_by: RoomSortOrder,
+        reverse_order: bool,
+        search_term: Optional[str],
+    ) -> Tuple[List[Dict[str, Any]], int]:
+        """Function to retrieve a paginated list of rooms as json.
+
+        Args:
+            start: offset in the list
+            limit: maximum amount of rooms to retrieve
+            order_by: the sort order of the returned list
+            reverse_order: whether to reverse the room list
+            search_term: a string to filter room names by
+        Returns:
+            A list of room dicts and an integer representing the total number of
+            rooms that exist given this query
+        """
+        # Filter room names by a string
+        where_statement = ""
+        if search_term:
+            where_statement = "WHERE state.name LIKE ?"
+
+            # Our postgres db driver converts ? -> %s in SQL strings as that's the
+            # placeholder for postgres.
+            # HOWEVER, if you put a % into your SQL then everything goes wibbly.
+            # To get around this, we're going to surround search_term with %'s
+            # before giving it to the database in python instead
+            search_term = "%" + search_term + "%"
+
+        # Set ordering
+        if RoomSortOrder(order_by) == RoomSortOrder.SIZE:
+            order_by_column = "curr.joined_members"
+            order_by_asc = False
+        elif RoomSortOrder(order_by) == RoomSortOrder.ALPHABETICAL:
+            # Sort alphabetically
+            order_by_column = "state.name"
+            order_by_asc = True
+        else:
+            raise StoreError(
+                500, "Incorrect value for order_by provided: %s" % order_by
+            )
+
+        # Whether to return the list in reverse order
+        if reverse_order:
+            # Flip the boolean
+            order_by_asc = not order_by_asc
+
+        # Create one query for getting the limited number of events that the user asked
+        # for, and another query for getting the total number of events that could be
+        # returned. Thus allowing us to see if there are more events to paginate through
+        info_sql = """
+            SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members
+            FROM room_stats_state state
+            INNER JOIN room_stats_current curr USING (room_id)
+            %s
+            ORDER BY %s %s
+            LIMIT ?
+            OFFSET ?
+        """ % (
+            where_statement,
+            order_by_column,
+            "ASC" if order_by_asc else "DESC",
+        )
+
+        # Use a nested SELECT statement as SQL can't count(*) with an OFFSET
+        count_sql = """
+            SELECT count(*) FROM (
+              SELECT room_id FROM room_stats_state state
+              %s
+            ) AS get_room_ids
+        """ % (
+            where_statement,
+        )
+
+        def _get_rooms_paginate_txn(txn):
+            # Execute the data query
+            sql_values = (limit, start)
+            if search_term:
+                # Add the search term into the WHERE clause
+                sql_values = (search_term,) + sql_values
+            txn.execute(info_sql, sql_values)
+
+            # Refactor room query data into a structured dictionary
+            rooms = []
+            for room in txn:
+                rooms.append(
+                    {
+                        "room_id": room[0],
+                        "name": room[1],
+                        "canonical_alias": room[2],
+                        "joined_members": room[3],
+                    }
+                )
+
+            # Execute the count query
+
+            # Add the search term into the WHERE clause if present
+            sql_values = (search_term,) if search_term else ()
+            txn.execute(count_sql, sql_values)
+
+            room_count = txn.fetchone()
+            return rooms, room_count[0]
+
+        return await self.db.runInteraction(
+            "get_rooms_paginate", _get_rooms_paginate_txn,
+        )
+
     @cachedInlineCallbacks(max_entries=10000)
     def get_ratelimit_for_user(self, user_id):
         """Check if there are any overrides for ratelimiting for the given
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index af4d604e50..0342aed416 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -17,6 +17,7 @@ import json
 import os
 import urllib.parse
 from binascii import unhexlify
+from typing import List, Optional
 
 from mock import Mock
 
@@ -26,7 +27,7 @@ import synapse.rest.admin
 from synapse.http.server import JsonResource
 from synapse.logging.context import make_deferred_yieldable
 from synapse.rest.admin import VersionServlet
-from synapse.rest.client.v1 import events, login, room
+from synapse.rest.client.v1 import directory, events, login, room
 from synapse.rest.client.v2_alpha import groups
 
 from tests import unittest
@@ -468,9 +469,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         )
 
         # Extract media ID from the response
-        server_name_and_media_id = response["content_uri"][
-            6:
-        ]  # Cut off the 'mxc://' bit
+        server_name_and_media_id = response["content_uri"][6:]  # Cut off 'mxc://'
         server_name, media_id = server_name_and_media_id.split("/")
 
         # Attempt to access the media
@@ -692,3 +691,389 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
                 % server_and_media_id_2
             ),
         )
+
+
+class RoomTestCase(unittest.HomeserverTestCase):
+    """Test /room admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        directory.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        # Create user
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+    def test_list_rooms(self):
+        """Test that we can list rooms"""
+        # Create 3 test rooms
+        total_rooms = 3
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        # Check request completed successfully
+        self.assertEqual(200, int(channel.code), msg=channel.json_body)
+
+        # Check that response json body contains a "rooms" key
+        self.assertTrue(
+            "rooms" in channel.json_body,
+            msg="Response body does not " "contain a 'rooms' key",
+        )
+
+        # Check that 3 rooms were returned
+        self.assertEqual(3, len(channel.json_body["rooms"]), msg=channel.json_body)
+
+        # Check their room_ids match
+        returned_room_ids = [room["room_id"] for room in channel.json_body["rooms"]]
+        self.assertEqual(room_ids, returned_room_ids)
+
+        # Check that all fields are available
+        for r in channel.json_body["rooms"]:
+            self.assertIn("name", r)
+            self.assertIn("canonical_alias", r)
+            self.assertIn("joined_members", r)
+
+        # Check that the correct number of total rooms was returned
+        self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+        # Check that the offset is correct
+        # Should be 0 as we aren't paginating
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that the prev_batch parameter is not present
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # We shouldn't receive a next token here as there's no further rooms to show
+        self.assertNotIn("next_batch", channel.json_body)
+
+    def test_list_rooms_pagination(self):
+        """Test that we can get a full list of rooms through pagination"""
+        # Create 5 test rooms
+        total_rooms = 5
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Set the name of the rooms so we get a consistent returned ordering
+        for idx, room_id in enumerate(room_ids):
+            self.helper.send_state(
+                room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok,
+            )
+
+        # Request the list of rooms
+        returned_room_ids = []
+        start = 0
+        limit = 2
+
+        run_count = 0
+        should_repeat = True
+        while should_repeat:
+            run_count += 1
+
+            url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % (
+                start,
+                limit,
+                "alphabetical",
+            )
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(
+                200, int(channel.result["code"]), msg=channel.result["body"]
+            )
+
+            self.assertTrue("rooms" in channel.json_body)
+            for r in channel.json_body["rooms"]:
+                returned_room_ids.append(r["room_id"])
+
+            # Check that the correct number of total rooms was returned
+            self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+            # Check that the offset is correct
+            # We're only getting 2 rooms each page, so should be 2 * last run_count
+            self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1))
+
+            if run_count > 1:
+                # Check the value of prev_batch is correct
+                self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2))
+
+            if "next_batch" not in channel.json_body:
+                # We have reached the end of the list
+                should_repeat = False
+            else:
+                # Make another query with an updated start value
+                start = channel.json_body["next_batch"]
+
+        # We should've queried the endpoint 3 times
+        self.assertEqual(
+            run_count,
+            3,
+            msg="Should've queried 3 times for 5 rooms with limit 2 per query",
+        )
+
+        # Check that we received all of the room ids
+        self.assertEqual(room_ids, returned_room_ids)
+
+        url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+    def test_correct_room_attributes(self):
+        """Test the correct attributes for a room are returned"""
+        # Create a test room
+        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        test_alias = "#test:test"
+        test_room_name = "something"
+
+        # Have another user join the room
+        user_2 = self.register_user("user4", "pass")
+        user_tok_2 = self.login("user4", "pass")
+        self.helper.join(room_id, user_2, tok=user_tok_2)
+
+        # Create a new alias to this room
+        url = "/_matrix/client/r0/directory/room/%s" % (urllib.parse.quote(test_alias),)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode("ascii"),
+            {"room_id": room_id},
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Set this new alias as the canonical alias for this room
+        self.helper.send_state(
+            room_id,
+            "m.room.aliases",
+            {"aliases": [test_alias]},
+            tok=self.admin_user_tok,
+            state_key="test",
+        )
+        self.helper.send_state(
+            room_id,
+            "m.room.canonical_alias",
+            {"alias": test_alias},
+            tok=self.admin_user_tok,
+        )
+
+        # Set a name for the room
+        self.helper.send_state(
+            room_id, "m.room.name", {"name": test_room_name}, tok=self.admin_user_tok,
+        )
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Check that rooms were returned
+        self.assertTrue("rooms" in channel.json_body)
+        rooms = channel.json_body["rooms"]
+
+        # Check that only one room was returned
+        self.assertEqual(len(rooms), 1)
+
+        # And that the value of the total_rooms key was correct
+        self.assertEqual(channel.json_body["total_rooms"], 1)
+
+        # Check that the offset is correct
+        # We're not paginating, so should be 0
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that there is no `prev_batch`
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # Check that there is no `next_batch`
+        self.assertNotIn("next_batch", channel.json_body)
+
+        # Check that all provided attributes are set
+        r = rooms[0]
+        self.assertEqual(room_id, r["room_id"])
+        self.assertEqual(test_room_name, r["name"])
+        self.assertEqual(test_alias, r["canonical_alias"])
+
+    def test_room_list_sort_order(self):
+        """Test room list sort ordering. alphabetical versus number of members,
+        reversing the order, etc.
+        """
+        # Create 3 test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": "A"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": "B"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_3, "m.room.name", {"name": "C"}, tok=self.admin_user_tok,
+        )
+
+        # Set room member size in the reverse order. room 1 -> 1 member, 2 -> 2, 3 -> 3
+        user_1 = self.register_user("bob1", "pass")
+        user_1_tok = self.login("bob1", "pass")
+        self.helper.join(room_id_2, user_1, tok=user_1_tok)
+
+        user_2 = self.register_user("bob2", "pass")
+        user_2_tok = self.login("bob2", "pass")
+        self.helper.join(room_id_3, user_2, tok=user_2_tok)
+
+        user_3 = self.register_user("bob3", "pass")
+        user_3_tok = self.login("bob3", "pass")
+        self.helper.join(room_id_3, user_3, tok=user_3_tok)
+
+        def _order_test(
+            order_type: str, expected_room_list: List[str], reverse: bool = False,
+        ):
+            """Request the list of rooms in a certain order. Assert that order is what
+            we expect
+
+            Args:
+                order_type: The type of ordering to give the server
+                expected_room_list: The list of room_ids in the order we expect to get
+                    back from the server
+            """
+            # Request the list of rooms in the given order
+            url = "/_synapse/admin/v1/rooms?order_by=%s" % (order_type,)
+            if reverse:
+                url += "&dir=b"
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(200, channel.code, msg=channel.json_body)
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check for the correct total_rooms value
+            self.assertEqual(channel.json_body["total_rooms"], 3)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            # Check that rooms were returned in alphabetical order
+            returned_order = [r["room_id"] for r in rooms]
+            self.assertListEqual(expected_room_list, returned_order)  # order is checked
+
+        # Test different sort orders, with forward and reverse directions
+        _order_test("alphabetical", [room_id_1, room_id_2, room_id_3])
+        _order_test("alphabetical", [room_id_3, room_id_2, room_id_1], reverse=True)
+
+        _order_test("size", [room_id_3, room_id_2, room_id_1])
+        _order_test("size", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+    def test_search_term(self):
+        """Test that searching for a room works correctly"""
+        # Create two test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        room_name_1 = "something"
+        room_name_2 = "else"
+
+        # Set the name for each room
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,
+        )
+
+        def _search_test(
+            expected_room_id: Optional[str],
+            search_term: str,
+            expected_http_code: int = 200,
+        ):
+            """Search for a room and check that the returned room's id is a match
+
+            Args:
+                expected_room_id: The room_id expected to be returned by the API. Set
+                    to None to expect zero results for the search
+                search_term: The term to search for room names with
+                expected_http_code: The expected http code for the request
+            """
+            url = "/_synapse/admin/v1/rooms?search_term=%s" % (search_term,)
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
+
+            if expected_http_code != 200:
+                return
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check that the expected number of rooms were returned
+            expected_room_count = 1 if expected_room_id else 0
+            self.assertEqual(len(rooms), expected_room_count)
+            self.assertEqual(channel.json_body["total_rooms"], expected_room_count)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            if expected_room_id:
+                # Check that the first returned room id is correct
+                r = rooms[0]
+                self.assertEqual(expected_room_id, r["room_id"])
+
+        # Perform search tests
+        _search_test(room_id_1, "something")
+        _search_test(room_id_1, "thing")
+
+        _search_test(room_id_2, "else")
+        _search_test(room_id_2, "se")
+
+        _search_test(None, "foo")
+        _search_test(None, "bar")
+        _search_test(None, "", expected_http_code=400)
-- 
cgit 1.4.1


From 0d0f32bc53fefb5eb444940998b97594da894967 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 22 Jan 2020 14:03:46 +0000
Subject: 1.9.0rc1

---
 CHANGES.md               | 70 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/5742.feature |  1 -
 changelog.d/6621.doc     |  1 -
 changelog.d/6624.doc     |  1 -
 changelog.d/6654.bugfix  |  1 -
 changelog.d/6655.misc    |  1 -
 changelog.d/6656.doc     |  1 -
 changelog.d/6663.doc     |  1 -
 changelog.d/6664.bugfix  |  1 -
 changelog.d/6665.doc     |  1 -
 changelog.d/6666.misc    |  1 -
 changelog.d/6667.misc    |  1 -
 changelog.d/6675.removal |  1 -
 changelog.d/6681.feature |  1 -
 changelog.d/6682.bugfix  |  2 --
 changelog.d/6685.doc     |  1 -
 changelog.d/6686.misc    |  1 -
 changelog.d/6687.misc    |  1 -
 changelog.d/6688.misc    |  1 -
 changelog.d/6689.misc    |  1 -
 changelog.d/6690.bugfix  |  1 -
 changelog.d/6691.misc    |  1 -
 changelog.d/6697.misc    |  1 -
 changelog.d/6698.doc     |  1 -
 changelog.d/6702.misc    |  1 -
 changelog.d/6706.misc    |  1 -
 changelog.d/6711.bugfix  |  1 -
 changelog.d/6712.feature |  1 -
 changelog.d/6714.bugfix  |  1 -
 changelog.d/6715.misc    |  1 -
 changelog.d/6716.misc    |  1 -
 changelog.d/6717.misc    |  1 -
 changelog.d/6718.bugfix  |  1 -
 changelog.d/6720.feature |  1 -
 changelog.d/6723.misc    |  1 -
 changelog.d/6724.misc    |  1 -
 changelog.d/6728.misc    |  1 -
 changelog.d/6730.bugfix  |  1 -
 changelog.d/6731.bugfix  |  1 -
 changelog.d/6732.misc    |  1 -
 changelog.d/6733.misc    |  1 -
 changelog.d/6742.bugfix  |  1 -
 changelog.d/6747.bugfix  |  1 -
 changelog.d/6749.misc    |  1 -
 changelog.d/6753.bugfix  |  1 -
 changelog.d/6754.misc    |  1 -
 changelog.d/6756.feature |  1 -
 changelog.d/6764.misc    |  1 -
 synapse/__init__.py      |  2 +-
 49 files changed, 71 insertions(+), 49 deletions(-)
 delete mode 100644 changelog.d/5742.feature
 delete mode 100644 changelog.d/6621.doc
 delete mode 100644 changelog.d/6624.doc
 delete mode 100644 changelog.d/6654.bugfix
 delete mode 100644 changelog.d/6655.misc
 delete mode 100644 changelog.d/6656.doc
 delete mode 100644 changelog.d/6663.doc
 delete mode 100644 changelog.d/6664.bugfix
 delete mode 100644 changelog.d/6665.doc
 delete mode 100644 changelog.d/6666.misc
 delete mode 100644 changelog.d/6667.misc
 delete mode 100644 changelog.d/6675.removal
 delete mode 100644 changelog.d/6681.feature
 delete mode 100644 changelog.d/6682.bugfix
 delete mode 100644 changelog.d/6685.doc
 delete mode 100644 changelog.d/6686.misc
 delete mode 100644 changelog.d/6687.misc
 delete mode 100644 changelog.d/6688.misc
 delete mode 100644 changelog.d/6689.misc
 delete mode 100644 changelog.d/6690.bugfix
 delete mode 100644 changelog.d/6691.misc
 delete mode 100644 changelog.d/6697.misc
 delete mode 100644 changelog.d/6698.doc
 delete mode 100644 changelog.d/6702.misc
 delete mode 100644 changelog.d/6706.misc
 delete mode 100644 changelog.d/6711.bugfix
 delete mode 100644 changelog.d/6712.feature
 delete mode 100644 changelog.d/6714.bugfix
 delete mode 100644 changelog.d/6715.misc
 delete mode 100644 changelog.d/6716.misc
 delete mode 100644 changelog.d/6717.misc
 delete mode 100644 changelog.d/6718.bugfix
 delete mode 100644 changelog.d/6720.feature
 delete mode 100644 changelog.d/6723.misc
 delete mode 100644 changelog.d/6724.misc
 delete mode 100644 changelog.d/6728.misc
 delete mode 100644 changelog.d/6730.bugfix
 delete mode 100644 changelog.d/6731.bugfix
 delete mode 100644 changelog.d/6732.misc
 delete mode 100644 changelog.d/6733.misc
 delete mode 100644 changelog.d/6742.bugfix
 delete mode 100644 changelog.d/6747.bugfix
 delete mode 100644 changelog.d/6749.misc
 delete mode 100644 changelog.d/6753.bugfix
 delete mode 100644 changelog.d/6754.misc
 delete mode 100644 changelog.d/6756.feature
 delete mode 100644 changelog.d/6764.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index c8840e9c74..7629a7e8ce 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,73 @@
+Synapse 1.9.0rc1 (2020-01-22)
+=============================
+
+Features
+--------
+
+- Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5742](https://github.com/matrix-org/synapse/issues/5742))
+- Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. ([\#6681](https://github.com/matrix-org/synapse/issues/6681), [\#6756](https://github.com/matrix-org/synapse/issues/6756))
+- Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712))
+- Add a new admin API to list and filter rooms on the server. ([\#6720](https://github.com/matrix-org/synapse/issues/6720))
+
+
+Bugfixes
+--------
+
+- Correctly proxy HTTP errors due to API calls to remote group servers. ([\#6654](https://github.com/matrix-org/synapse/issues/6654))
+- Fix media repo admin APIs when using a media worker. ([\#6664](https://github.com/matrix-org/synapse/issues/6664))
+- Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. ([\#6682](https://github.com/matrix-org/synapse/issues/6682))
+- Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690))
+- Fix `purge_room` admin API. ([\#6711](https://github.com/matrix-org/synapse/issues/6711))
+- Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. ([\#6714](https://github.com/matrix-org/synapse/issues/6714))
+- Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case. ([\#6718](https://github.com/matrix-org/synapse/issues/6718))
+- Fix changing password via user admin API. ([\#6730](https://github.com/matrix-org/synapse/issues/6730))
+- Fix `/events/:event_id` deprecated API. ([\#6731](https://github.com/matrix-org/synapse/issues/6731))
+- Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). ([\#6742](https://github.com/matrix-org/synapse/issues/6742))
+- Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. ([\#6747](https://github.com/matrix-org/synapse/issues/6747))
+- Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. ([\#6753](https://github.com/matrix-org/synapse/issues/6753))
+
+
+Improved Documentation
+----------------------
+
+- Fix a typo in the configuration example for purge jobs in the sample configuration file. ([\#6621](https://github.com/matrix-org/synapse/issues/6621))
+- Add complete documentation of the message retention policies support. ([\#6624](https://github.com/matrix-org/synapse/issues/6624), [\#6665](https://github.com/matrix-org/synapse/issues/6665))
+- No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656))
+- Add some helpful tips about changelog entries to the github pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663))
+- Clarify the `account_validity` and `email` sections of the sample configuration. ([\#6685](https://github.com/matrix-org/synapse/issues/6685))
+- Add more endpoints to the documentation for Synapse workers. ([\#6698](https://github.com/matrix-org/synapse/issues/6698))
+
+
+Deprecations and Removals
+-------------------------
+
+- Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). ([\#6675](https://github.com/matrix-org/synapse/issues/6675))
+
+
+Internal Changes
+----------------
+
+- Add `local_current_membership` table for tracking local user membership state in rooms. ([\#6655](https://github.com/matrix-org/synapse/issues/6655), [\#6728](https://github.com/matrix-org/synapse/issues/6728))
+- Port `synapse.replication.tcp` to async/await. ([\#6666](https://github.com/matrix-org/synapse/issues/6666))
+- Fixup `synapse.replication` to pass mypy checks. ([\#6667](https://github.com/matrix-org/synapse/issues/6667))
+- Allow additional_resources to implement IResource directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686))
+- Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687))
+- Updates and extensions to the module API. ([\#6688](https://github.com/matrix-org/synapse/issues/6688))
+- Updates to the SAML mapping provider API. ([\#6689](https://github.com/matrix-org/synapse/issues/6689), [\#6723](https://github.com/matrix-org/synapse/issues/6723))
+- Remove redundant RegistrationError class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691))
+- Don't block processing of incoming EDUs behind processing PDUs in the same transaction. ([\#6697](https://github.com/matrix-org/synapse/issues/6697))
+- Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. ([\#6702](https://github.com/matrix-org/synapse/issues/6702))
+- Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. ([\#6706](https://github.com/matrix-org/synapse/issues/6706))
+- Add StateMap type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715))
+- Add a `DeltaState` to track changes to be made to current state during event persistence. ([\#6716](https://github.com/matrix-org/synapse/issues/6716))
+- Add more logging around message retention policies support. ([\#6717](https://github.com/matrix-org/synapse/issues/6717))
+- When processing a SAML response, log the assertions for easier configuration. ([\#6724](https://github.com/matrix-org/synapse/issues/6724))
+- Fixup `synapse.rest` to pass mypy. ([\#6732](https://github.com/matrix-org/synapse/issues/6732), [\#6764](https://github.com/matrix-org/synapse/issues/6764))
+- Fixup synapse.api to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733))
+- Allow streaming cache 'invalidate all' to workers. ([\#6749](https://github.com/matrix-org/synapse/issues/6749))
+- Remove unused CI docker compose files. ([\#6754](https://github.com/matrix-org/synapse/issues/6754))
+
+
 Synapse 1.8.0 (2020-01-09)
 ==========================
 
diff --git a/changelog.d/5742.feature b/changelog.d/5742.feature
deleted file mode 100644
index de10302275..0000000000
--- a/changelog.d/5742.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/6621.doc b/changelog.d/6621.doc
deleted file mode 100644
index 6722ccfda3..0000000000
--- a/changelog.d/6621.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a typo in the configuration example for purge jobs in the sample configuration file.
diff --git a/changelog.d/6624.doc b/changelog.d/6624.doc
deleted file mode 100644
index bc9a022db2..0000000000
--- a/changelog.d/6624.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add complete documentation of the message retention policies support.
diff --git a/changelog.d/6654.bugfix b/changelog.d/6654.bugfix
deleted file mode 100644
index fed35252db..0000000000
--- a/changelog.d/6654.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Correctly proxy HTTP errors due to API calls to remote group servers.
diff --git a/changelog.d/6655.misc b/changelog.d/6655.misc
deleted file mode 100644
index 01e78bc84e..0000000000
--- a/changelog.d/6655.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add `local_current_membership` table for tracking local user membership state in rooms.
diff --git a/changelog.d/6656.doc b/changelog.d/6656.doc
deleted file mode 100644
index 9f32da1a88..0000000000
--- a/changelog.d/6656.doc
+++ /dev/null
@@ -1 +0,0 @@
-No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer.
diff --git a/changelog.d/6663.doc b/changelog.d/6663.doc
deleted file mode 100644
index 83b9c1626a..0000000000
--- a/changelog.d/6663.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add some helpful tips about changelog entries to the github pull request template.
\ No newline at end of file
diff --git a/changelog.d/6664.bugfix b/changelog.d/6664.bugfix
deleted file mode 100644
index 8c6a6fa1c8..0000000000
--- a/changelog.d/6664.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix media repo admin APIs when using a media worker.
diff --git a/changelog.d/6665.doc b/changelog.d/6665.doc
deleted file mode 100644
index bc9a022db2..0000000000
--- a/changelog.d/6665.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add complete documentation of the message retention policies support.
diff --git a/changelog.d/6666.misc b/changelog.d/6666.misc
deleted file mode 100644
index e79c23d2d2..0000000000
--- a/changelog.d/6666.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `synapse.replication.tcp` to async/await.
diff --git a/changelog.d/6667.misc b/changelog.d/6667.misc
deleted file mode 100644
index 227f80a508..0000000000
--- a/changelog.d/6667.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixup `synapse.replication` to pass mypy checks.
diff --git a/changelog.d/6675.removal b/changelog.d/6675.removal
deleted file mode 100644
index 95df9a2d83..0000000000
--- a/changelog.d/6675.removal
+++ /dev/null
@@ -1 +0,0 @@
-Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)).
diff --git a/changelog.d/6681.feature b/changelog.d/6681.feature
deleted file mode 100644
index 5cf19a4e0e..0000000000
--- a/changelog.d/6681.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media.
diff --git a/changelog.d/6682.bugfix b/changelog.d/6682.bugfix
deleted file mode 100644
index d48ea31477..0000000000
--- a/changelog.d/6682.bugfix
+++ /dev/null
@@ -1,2 +0,0 @@
-Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters.
-
diff --git a/changelog.d/6685.doc b/changelog.d/6685.doc
deleted file mode 100644
index 7cf750fe3f..0000000000
--- a/changelog.d/6685.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify the `account_validity` and `email` sections of the sample configuration.
\ No newline at end of file
diff --git a/changelog.d/6686.misc b/changelog.d/6686.misc
deleted file mode 100644
index 4070f2e563..0000000000
--- a/changelog.d/6686.misc
+++ /dev/null
@@ -1 +0,0 @@
-Allow additional_resources to implement IResource directly.
diff --git a/changelog.d/6687.misc b/changelog.d/6687.misc
deleted file mode 100644
index deb0454602..0000000000
--- a/changelog.d/6687.misc
+++ /dev/null
@@ -1 +0,0 @@
-Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location.
diff --git a/changelog.d/6688.misc b/changelog.d/6688.misc
deleted file mode 100644
index 2a9f28ce5c..0000000000
--- a/changelog.d/6688.misc
+++ /dev/null
@@ -1 +0,0 @@
-Updates and extensions to the module API.
\ No newline at end of file
diff --git a/changelog.d/6689.misc b/changelog.d/6689.misc
deleted file mode 100644
index 17f15e73a8..0000000000
--- a/changelog.d/6689.misc
+++ /dev/null
@@ -1 +0,0 @@
-Updates to the SAML mapping provider API.
diff --git a/changelog.d/6690.bugfix b/changelog.d/6690.bugfix
deleted file mode 100644
index 30ce1dc9f7..0000000000
--- a/changelog.d/6690.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username.
diff --git a/changelog.d/6691.misc b/changelog.d/6691.misc
deleted file mode 100644
index 104e9ce648..0000000000
--- a/changelog.d/6691.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant RegistrationError class.
diff --git a/changelog.d/6697.misc b/changelog.d/6697.misc
deleted file mode 100644
index 5650387804..0000000000
--- a/changelog.d/6697.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't block processing of incoming EDUs behind processing PDUs in the same transaction.
diff --git a/changelog.d/6698.doc b/changelog.d/6698.doc
deleted file mode 100644
index 5aba51252d..0000000000
--- a/changelog.d/6698.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add more endpoints to the documentation for Synapse workers.
diff --git a/changelog.d/6702.misc b/changelog.d/6702.misc
deleted file mode 100644
index f7bc98409c..0000000000
--- a/changelog.d/6702.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint.
\ No newline at end of file
diff --git a/changelog.d/6706.misc b/changelog.d/6706.misc
deleted file mode 100644
index 1ac11cc04b..0000000000
--- a/changelog.d/6706.misc
+++ /dev/null
@@ -1 +0,0 @@
-Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data.
diff --git a/changelog.d/6711.bugfix b/changelog.d/6711.bugfix
deleted file mode 100644
index c70506bd88..0000000000
--- a/changelog.d/6711.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `purge_room` admin API.
diff --git a/changelog.d/6712.feature b/changelog.d/6712.feature
deleted file mode 100644
index 2cce0ecf88..0000000000
--- a/changelog.d/6712.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756).
diff --git a/changelog.d/6714.bugfix b/changelog.d/6714.bugfix
deleted file mode 100644
index 410516694f..0000000000
--- a/changelog.d/6714.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs.
diff --git a/changelog.d/6715.misc b/changelog.d/6715.misc
deleted file mode 100644
index 8876b0446d..0000000000
--- a/changelog.d/6715.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add StateMap type alias to simplify types.
diff --git a/changelog.d/6716.misc b/changelog.d/6716.misc
deleted file mode 100644
index 319aaa4acb..0000000000
--- a/changelog.d/6716.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a `DeltaState` to track changes to be made to current state during event persistence.
diff --git a/changelog.d/6717.misc b/changelog.d/6717.misc
deleted file mode 100644
index a2a7776126..0000000000
--- a/changelog.d/6717.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add more logging around message retention policies support.
diff --git a/changelog.d/6718.bugfix b/changelog.d/6718.bugfix
deleted file mode 100644
index 23b23e3ed8..0000000000
--- a/changelog.d/6718.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case.
diff --git a/changelog.d/6720.feature b/changelog.d/6720.feature
deleted file mode 100644
index dfc1b74d62..0000000000
--- a/changelog.d/6720.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a new admin API to list and filter rooms on the server.
\ No newline at end of file
diff --git a/changelog.d/6723.misc b/changelog.d/6723.misc
deleted file mode 100644
index 17f15e73a8..0000000000
--- a/changelog.d/6723.misc
+++ /dev/null
@@ -1 +0,0 @@
-Updates to the SAML mapping provider API.
diff --git a/changelog.d/6724.misc b/changelog.d/6724.misc
deleted file mode 100644
index 5256be75fa..0000000000
--- a/changelog.d/6724.misc
+++ /dev/null
@@ -1 +0,0 @@
-When processing a SAML response, log the assertions for easier configuration.
diff --git a/changelog.d/6728.misc b/changelog.d/6728.misc
deleted file mode 100644
index 01e78bc84e..0000000000
--- a/changelog.d/6728.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add `local_current_membership` table for tracking local user membership state in rooms.
diff --git a/changelog.d/6730.bugfix b/changelog.d/6730.bugfix
deleted file mode 100644
index beb444ca66..0000000000
--- a/changelog.d/6730.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix changing password via user admin API.
diff --git a/changelog.d/6731.bugfix b/changelog.d/6731.bugfix
deleted file mode 100644
index 21f6e15cbd..0000000000
--- a/changelog.d/6731.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `/events/:event_id` deprecated API.
diff --git a/changelog.d/6732.misc b/changelog.d/6732.misc
deleted file mode 100644
index 8edd767405..0000000000
--- a/changelog.d/6732.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixup `synapse.rest` to pass mypy.
diff --git a/changelog.d/6733.misc b/changelog.d/6733.misc
deleted file mode 100644
index bf048c0be2..0000000000
--- a/changelog.d/6733.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixup synapse.api to pass mypy.
diff --git a/changelog.d/6742.bugfix b/changelog.d/6742.bugfix
deleted file mode 100644
index ca2687c8bb..0000000000
--- a/changelog.d/6742.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639).
diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix
deleted file mode 100644
index c98107e741..0000000000
--- a/changelog.d/6747.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting.
diff --git a/changelog.d/6749.misc b/changelog.d/6749.misc
deleted file mode 100644
index 9fa13cb1d4..0000000000
--- a/changelog.d/6749.misc
+++ /dev/null
@@ -1 +0,0 @@
-Allow streaming cache 'invalidate all' to workers.
diff --git a/changelog.d/6753.bugfix b/changelog.d/6753.bugfix
deleted file mode 100644
index 5dfde793e1..0000000000
--- a/changelog.d/6753.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata.
diff --git a/changelog.d/6754.misc b/changelog.d/6754.misc
deleted file mode 100644
index 0a955e47e6..0000000000
--- a/changelog.d/6754.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused CI docker compose files.
diff --git a/changelog.d/6756.feature b/changelog.d/6756.feature
deleted file mode 100644
index 6328c868f2..0000000000
--- a/changelog.d/6756.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media.
\ No newline at end of file
diff --git a/changelog.d/6764.misc b/changelog.d/6764.misc
deleted file mode 100644
index 8edd767405..0000000000
--- a/changelog.d/6764.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixup `synapse.rest` to pass mypy.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 17a6f691c8..1c44ca0999 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.9.0.dev2"
+__version__ = "1.9.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From ce84dd9e207d9ae88e4cf9ca8a9731fcac043969 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 22 Jan 2020 15:09:57 +0000
Subject: Remove unnecessary abstractions in admin handler (#6751)

---
 changelog.d/6751.misc                            |  1 +
 synapse/handlers/admin.py                        | 62 ------------------------
 synapse/rest/admin/users.py                      | 19 ++++----
 synapse/storage/data_stores/main/registration.py |  2 +-
 4 files changed, 11 insertions(+), 73 deletions(-)
 create mode 100644 changelog.d/6751.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6751.misc b/changelog.d/6751.misc
new file mode 100644
index 0000000000..7222520528
--- /dev/null
+++ b/changelog.d/6751.misc
@@ -0,0 +1 @@
+Remove some unnecessary admin handler abstraction methods.
\ No newline at end of file
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 60a7c938bc..9205865231 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -62,68 +62,6 @@ class AdminHandler(BaseHandler):
             ret["avatar_url"] = profile.avatar_url
         return ret
 
-    async def get_users(self):
-        """Function to retrieve a list of users in users table.
-
-        Args:
-        Returns:
-            defer.Deferred: resolves to list[dict[str, Any]]
-        """
-        ret = await self.store.get_users()
-
-        return ret
-
-    async def get_users_paginate(self, start, limit, name, guests, deactivated):
-        """Function to retrieve a paginated list of users from
-        users list. This will return a json list of users.
-
-        Args:
-            start (int): start number to begin the query from
-            limit (int): number of rows to retrieve
-            name (string): filter for user names
-            guests (bool): whether to in include guest users
-            deactivated (bool): whether to include deactivated users
-        Returns:
-            defer.Deferred: resolves to json list[dict[str, Any]]
-        """
-        ret = await self.store.get_users_paginate(
-            start, limit, name, guests, deactivated
-        )
-
-        return ret
-
-    async def search_users(self, term):
-        """Function to search users list for one or more users with
-        the matched term.
-
-        Args:
-            term (str): search term
-        Returns:
-            defer.Deferred: resolves to list[dict[str, Any]]
-        """
-        ret = await self.store.search_users(term)
-
-        return ret
-
-    def get_user_server_admin(self, user):
-        """
-        Get the admin bit on a user.
-
-        Args:
-            user_id (UserID): the (necessarily local) user to manipulate
-        """
-        return self.store.is_server_admin(user)
-
-    def set_user_server_admin(self, user, admin):
-        """
-        Set the admin bit on a user.
-
-        Args:
-            user_id (UserID): the (necessarily local) user to manipulate
-            admin (bool): whether or not the user should be an admin of this server
-        """
-        return self.store.set_server_admin(user, admin)
-
     async def export_user_data(self, user_id, writer):
         """Write all data we have on the user to the given writer.
 
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 52d27fa3e3..927e9ca9ee 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -45,6 +45,7 @@ class UsersRestServlet(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.admin_handler = hs.get_handlers().admin_handler
 
@@ -55,7 +56,7 @@ class UsersRestServlet(RestServlet):
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only users a local user")
 
-        ret = await self.admin_handler.get_users()
+        ret = await self.store.get_users()
 
         return 200, ret
 
@@ -80,6 +81,7 @@ class UsersRestServletV2(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.admin_handler = hs.get_handlers().admin_handler
 
@@ -92,7 +94,7 @@ class UsersRestServletV2(RestServlet):
         guests = parse_boolean(request, "guests", default=True)
         deactivated = parse_boolean(request, "deactivated", default=False)
 
-        users = await self.admin_handler.get_users_paginate(
+        users = await self.store.get_users_paginate(
             start, limit, user_id, guests, deactivated
         )
         ret = {"users": users}
@@ -516,8 +518,8 @@ class SearchUsersRestServlet(RestServlet):
     PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)")
 
     def __init__(self, hs):
-        self.store = hs.get_datastore()
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
 
@@ -540,7 +542,7 @@ class SearchUsersRestServlet(RestServlet):
         term = parse_string(request, "term", required=True)
         logger.info("term: %s ", term)
 
-        ret = await self.handlers.admin_handler.search_users(term)
+        ret = await self.handlers.store.search_users(term)
         return 200, ret
 
 
@@ -574,8 +576,8 @@ class UserAdminServlet(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
 
     async def on_GET(self, request, user_id):
         await assert_requester_is_admin(self.auth, request)
@@ -585,8 +587,7 @@ class UserAdminServlet(RestServlet):
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Only local users can be admins of this homeserver")
 
-        is_admin = await self.handlers.admin_handler.get_user_server_admin(target_user)
-        is_admin = bool(is_admin)
+        is_admin = await self.store.is_server_admin(target_user)
 
         return 200, {"admin": is_admin}
 
@@ -609,8 +610,6 @@ class UserAdminServlet(RestServlet):
         if target_user == auth_user and not set_admin_to:
             raise SynapseError(400, "You may not demote yourself.")
 
-        await self.handlers.admin_handler.set_user_server_admin(
-            target_user, set_admin_to
-        )
+        await self.store.set_user_server_admin(target_user, set_admin_to)
 
         return 200, {}
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index cb4b2b39a0..49306642ed 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -291,7 +291,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="is_server_admin",
         )
 
-        return res if res else False
+        return bool(res) if res else False
 
     def set_server_admin(self, user, admin):
         """Sets whether a user is an admin of this homeserver.
-- 
cgit 1.4.1


From d31f5f4d89694a6e41b1c9af09ed6405ecb07376 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Thu, 23 Jan 2020 11:37:26 +0000
Subject: Update admin room docs with correct endpoints (#6770)

---
 changelog.d/6770.doc    | 1 +
 docs/admin_api/rooms.md | 8 ++++----
 2 files changed, 5 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6770.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6770.doc b/changelog.d/6770.doc
new file mode 100644
index 0000000000..a251b82238
--- /dev/null
+++ b/changelog.d/6770.doc
@@ -0,0 +1 @@
+Fix endpoint documentation for the List Rooms admin api.
\ No newline at end of file
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 082721ea95..2db457c1b6 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -46,7 +46,7 @@ The following fields are possible in the JSON response body:
 A standard request with no filtering:
 
 ```
-GET /_synapse/admin/rooms
+GET /_synapse/admin/v1/rooms
 
 {}
 ```
@@ -78,7 +78,7 @@ Response:
 Filtering by room name:
 
 ```
-GET /_synapse/admin/rooms?search_term=TWIM
+GET /_synapse/admin/v1/rooms?search_term=TWIM
 
 {}
 ```
@@ -103,7 +103,7 @@ Response:
 Paginating through a list of rooms:
 
 ```
-GET /_synapse/admin/rooms?order_by=size
+GET /_synapse/admin/v1/rooms?order_by=size
 
 {}
 ```
@@ -139,7 +139,7 @@ To get the next batch of room results, we repeat our request, setting the `from`
 parameter to the value of `next_token`.
 
 ```
-GET /_synapse/admin/rooms?order_by=size&from=100
+GET /_synapse/admin/v1/rooms?order_by=size&from=100
 
 {}
 ```
-- 
cgit 1.4.1


From 5bd3cb7260984164c4c54eb2add1fa7821795360 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 23 Jan 2020 12:03:58 +0000
Subject: Minor fixes to user admin api (#6761)

* don't insist on a password (this is valid if you have an SSO login)
* fix reference to undefined `requester`
---
 changelog.d/6761.bugfix     |  1 +
 synapse/rest/admin/users.py | 14 +++++---------
 2 files changed, 6 insertions(+), 9 deletions(-)
 create mode 100644 changelog.d/6761.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6761.bugfix b/changelog.d/6761.bugfix
new file mode 100644
index 0000000000..1c664c02df
--- /dev/null
+++ b/changelog.d/6761.bugfix
@@ -0,0 +1 @@
+Minor fixes to `PUT /_synapse/admin/v2/users` admin api.
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 927e9ca9ee..3455741195 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -153,7 +153,8 @@ class UserRestServletV2(RestServlet):
         return 200, ret
 
     async def on_PUT(self, request, user_id):
-        await assert_requester_is_admin(self.auth, request)
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
 
         target_user = UserID.from_string(user_id)
         body = parse_json_object_from_request(request)
@@ -164,8 +165,6 @@ class UserRestServletV2(RestServlet):
         user = await self.admin_handler.get_user(target_user)
 
         if user:  # modify user
-            requester = await self.auth.get_user_by_req(request)
-
             if "displayname" in body:
                 await self.profile_handler.set_displayname(
                     target_user, requester, body["displayname"], True
@@ -212,11 +211,8 @@ class UserRestServletV2(RestServlet):
             return 200, user
 
         else:  # create user
-            if "password" not in body:
-                raise SynapseError(
-                    400, "password must be specified", errcode=Codes.BAD_JSON
-                )
-            elif (
+            password = body.get("password")
+            if password is not None and (
                 not isinstance(body["password"], text_type)
                 or len(body["password"]) > 512
             ):
@@ -231,7 +227,7 @@ class UserRestServletV2(RestServlet):
 
             user_id = await self.registration_handler.register_user(
                 localpart=target_user.localpart,
-                password=body["password"],
+                password=password,
                 admin=bool(admin),
                 default_display_name=displayname,
                 user_type=user_type,
-- 
cgit 1.4.1


From 6b7462a13fff8f4e1dbad0bb4d81bbe0515af7c1 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 23 Jan 2020 12:11:44 +0000
Subject: a bit of debugging for media storage providers (#6757)

* a bit of debugging for media storage providers

* changelog
---
 changelog.d/6757.misc                     | 1 +
 synapse/rest/media/v1/media_storage.py    | 1 +
 synapse/rest/media/v1/storage_provider.py | 6 ++++++
 3 files changed, 8 insertions(+)
 create mode 100644 changelog.d/6757.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6757.misc b/changelog.d/6757.misc
new file mode 100644
index 0000000000..a50c5e974a
--- /dev/null
+++ b/changelog.d/6757.misc
@@ -0,0 +1 @@
+Add some debugging for media storage providers.
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 3b87717a5a..683a79c966 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -148,6 +148,7 @@ class MediaStorage(object):
         for provider in self.storage_providers:
             res = yield provider.fetch(path, file_info)
             if res:
+                logger.debug("Streaming %s from %s", path, provider)
                 return res
 
         return None
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 37687ea7f4..858680be26 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -77,6 +77,9 @@ class StorageProviderWrapper(StorageProvider):
         self.store_synchronous = store_synchronous
         self.store_remote = store_remote
 
+    def __str__(self):
+        return "StorageProviderWrapper[%s]" % (self.backend,)
+
     def store_file(self, path, file_info):
         if not file_info.server_name and not self.store_local:
             return defer.succeed(None)
@@ -114,6 +117,9 @@ class FileStorageProviderBackend(StorageProvider):
         self.cache_directory = hs.config.media_store_path
         self.base_directory = config
 
+    def __str__(self):
+        return "FileStorageProviderBackend[%s]" % (self.base_directory,)
+
     def store_file(self, path, file_info):
         """See StorageProvider.store_file"""
 
-- 
cgit 1.4.1


From 1dc5a791cf0aad8d672ee4e2e84b544c6be94431 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 23 Jan 2020 12:59:29 +0000
Subject: Fixup changelog

---
 CHANGES.md           | 5 ++++-
 changelog.d/6770.doc | 1 -
 2 files changed, 4 insertions(+), 2 deletions(-)
 delete mode 100644 changelog.d/6770.doc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index a582f0fb58..9a53e1872d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,7 +1,10 @@
 Synapse 1.9.0 (2020-01-23)
 ==========================
 
-No significant changes.
+Improved Documentation
+----------------------
+
+- Fix endpoint documentation for the List Rooms admin api. ([\#6770](https://github.com/matrix-org/synapse/issues/6770))
 
 
 Synapse 1.9.0rc1 (2020-01-22)
diff --git a/changelog.d/6770.doc b/changelog.d/6770.doc
deleted file mode 100644
index a251b82238..0000000000
--- a/changelog.d/6770.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix endpoint documentation for the List Rooms admin api.
\ No newline at end of file
-- 
cgit 1.4.1


From fa4d609e20318821e2ffbeb35bfddbc86be81be0 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 23 Jan 2020 15:19:03 +0000
Subject: Make 'event.redacts' never raise. (#6771)

There are quite a few places that we assume that a redaction event has a
corresponding `redacts` key, which is not always the case. So lets
cheekily make it so that event.redacts just returns None instead.
---
 changelog.d/6771.bugfix                           |  1 +
 synapse/events/__init__.py                        | 28 +++++++++++++++---
 synapse/storage/data_stores/main/events.py        |  2 +-
 synapse/storage/data_stores/main/events_worker.py |  2 +-
 tests/storage/test_redaction.py                   | 35 +++++++++++++++++++++++
 5 files changed, 62 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6771.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6771.bugfix b/changelog.d/6771.bugfix
new file mode 100644
index 0000000000..623ba24acb
--- /dev/null
+++ b/changelog.d/6771.bugfix
@@ -0,0 +1 @@
+Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key).
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 88ed6d764f..72c09327f4 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -116,16 +116,32 @@ class _EventInternalMetadata(object):
         return getattr(self, "redacted", False)
 
 
-def _event_dict_property(key):
+_SENTINEL = object()
+
+
+def _event_dict_property(key, default=_SENTINEL):
+    """Creates a new property for the given key that delegates access to
+    `self._event_dict`.
+
+    The default is used if the key is missing from the `_event_dict`, if given,
+    otherwise an AttributeError will be raised.
+
+    Note: If a default is given then `hasattr` will always return true.
+    """
+
     # We want to be able to use hasattr with the event dict properties.
     # However, (on python3) hasattr expects AttributeError to be raised. Hence,
     # we need to transform the KeyError into an AttributeError
-    def getter(self):
+
+    def getter_raises(self):
         try:
             return self._event_dict[key]
         except KeyError:
             raise AttributeError(key)
 
+    def getter_default(self):
+        return self._event_dict.get(key, default)
+
     def setter(self, v):
         try:
             self._event_dict[key] = v
@@ -138,7 +154,11 @@ def _event_dict_property(key):
         except KeyError:
             raise AttributeError(key)
 
-    return property(getter, setter, delete)
+    if default is _SENTINEL:
+        # No default given, so use the getter that raises
+        return property(getter_raises, setter, delete)
+    else:
+        return property(getter_default, setter, delete)
 
 
 class EventBase(object):
@@ -165,7 +185,7 @@ class EventBase(object):
     origin = _event_dict_property("origin")
     origin_server_ts = _event_dict_property("origin_server_ts")
     prev_events = _event_dict_property("prev_events")
-    redacts = _event_dict_property("redacts")
+    redacts = _event_dict_property("redacts", None)
     room_id = _event_dict_property("room_id")
     sender = _event_dict_property("sender")
     user_id = _event_dict_property("sender")
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 596daf8909..ce553566a5 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -951,7 +951,7 @@ class EventsStore(
             elif event.type == EventTypes.Message:
                 # Insert into the event_search table.
                 self._store_room_message_txn(txn, event)
-            elif event.type == EventTypes.Redaction:
+            elif event.type == EventTypes.Redaction and event.redacts is not None:
                 # Insert into the redactions table.
                 self._store_redaction(txn, event)
             elif event.type == EventTypes.Retention:
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 3b93e0597a..7251e819f5 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -287,7 +287,7 @@ class EventsWorkerStore(SQLBaseStore):
             # we have to recheck auth now.
 
             if not allow_rejected and entry.event.type == EventTypes.Redaction:
-                if not hasattr(entry.event, "redacts"):
+                if entry.event.redacts is None:
                     # A redacted redaction doesn't have a `redacts` key, in
                     # which case lets just withhold the event.
                     #
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index dc45173355..feb1c07cb2 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -398,3 +398,38 @@ class RedactionTestCase(unittest.HomeserverTestCase):
         self.get_success(
             self.store.get_event(first_redact_event.event_id, allow_none=True)
         )
+
+    def test_store_redacted_redaction(self):
+        """Tests that we can store a redacted redaction.
+        """
+
+        self.get_success(
+            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
+        )
+
+        builder = self.event_builder_factory.for_room_version(
+            RoomVersions.V1,
+            {
+                "type": EventTypes.Redaction,
+                "sender": self.u_alice.to_string(),
+                "room_id": self.room1.to_string(),
+                "content": {"reason": "foo"},
+            },
+        )
+
+        redaction_event, context = self.get_success(
+            self.event_creation_handler.create_new_client_event(builder)
+        )
+
+        self.get_success(
+            self.storage.persistence.persist_event(redaction_event, context)
+        )
+
+        # Now lets jump to the future where we have censored the redaction event
+        # in the DB.
+        self.reactor.advance(60 * 60 * 24 * 31)
+
+        # We just want to check that fetching the event doesn't raise an exception.
+        self.get_success(
+            self.store.get_event(redaction_event.event_id, allow_none=True)
+        )
-- 
cgit 1.4.1


From aa6ad288f16ed263b2a94b480259639d52ff6cad Mon Sep 17 00:00:00 2001
From: Jason Robinson 
Date: Fri, 24 Jan 2020 11:01:57 +0200
Subject: Clarifications to the workers documentation

* Add note that user_dir requires disabling user dir
  updates from the main synapse process.
* Add note that federation_reader should have
  the federation listener resource.

Signed-off-by: Jason Robinson 
---
 changelog.d/6775.doc | 1 +
 docs/workers.md      | 7 +++++++
 2 files changed, 8 insertions(+)
 create mode 100644 changelog.d/6775.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc
new file mode 100644
index 0000000000..9421250f8b
--- /dev/null
+++ b/changelog.d/6775.doc
@@ -0,0 +1 @@
+Clarify documentation related to user_dir and federation_reader workers.
diff --git a/docs/workers.md b/docs/workers.md
index 0ab269fd96..a5d6d18f23 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -185,6 +185,9 @@ reverse-proxy configuration.
 The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
 instance.
 
+Note that the `worker_listeners.resources.name` needs to be set to `federation`
+for this worker.
+
 ### `synapse.app.federation_sender`
 
 Handles sending federation traffic to other servers. Doesn't handle any
@@ -265,6 +268,10 @@ the following regular expressions:
 
     ^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
 
+When using this worker you must also set `update_user_directory: False` in the 
+shared configuration file to stop the main synapse running background 
+jobs related to updating the user directory.
+
 ### `synapse.app.frontend_proxy`
 
 Proxies some frequently-requested client endpoints to add caching and remove
-- 
cgit 1.4.1


From 9f7aaf90b5ef76416852f35201a851d45eccc0a1 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 24 Jan 2020 14:28:40 +0000
Subject: Validate client_secret parameter (#6767)

---
 changelog.d/6767.bugfix                  |  1 +
 synapse/handlers/identity.py             |  4 ++-
 synapse/rest/client/v2_alpha/account.py  | 23 ++++++++++----
 synapse/rest/client/v2_alpha/register.py |  3 ++
 synapse/util/stringutils.py              | 17 +++++++++++
 tests/util/test_stringutils.py           | 51 ++++++++++++++++++++++++++++++++
 6 files changed, 93 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6767.bugfix
 create mode 100644 tests/util/test_stringutils.py

(limited to 'changelog.d')

diff --git a/changelog.d/6767.bugfix b/changelog.d/6767.bugfix
new file mode 100644
index 0000000000..63c7c63315
--- /dev/null
+++ b/changelog.d/6767.bugfix
@@ -0,0 +1 @@
+Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release.
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 000fbf090f..23f07832e7 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -38,7 +38,7 @@ from synapse.api.errors import (
 from synapse.config.emailconfig import ThreepidBehaviour
 from synapse.http.client import SimpleHttpClient
 from synapse.util.hash import sha256_and_url_safe_base64
-from synapse.util.stringutils import random_string
+from synapse.util.stringutils import assert_valid_client_secret, random_string
 
 from ._base import BaseHandler
 
@@ -84,6 +84,8 @@ class IdentityHandler(BaseHandler):
             raise SynapseError(
                 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
             )
+        assert_valid_client_secret(client_secret)
+
         session_id = creds.get("sid")
         if not session_id:
             raise SynapseError(
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index fc240f5cf8..dc837d6c75 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -30,6 +30,7 @@ from synapse.http.servlet import (
 )
 from synapse.push.mailer import Mailer, load_jinja2_templates
 from synapse.util.msisdn import phone_number_to_msisdn
+from synapse.util.stringutils import assert_valid_client_secret
 from synapse.util.threepids import check_3pid_allowed
 
 from ._base import client_patterns, interactive_auth_handler
@@ -81,6 +82,8 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
 
         # Extract params from body
         client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
+
         email = body["email"]
         send_attempt = body["send_attempt"]
         next_link = body.get("next_link")  # Optional param
@@ -166,8 +169,9 @@ class PasswordResetSubmitTokenServlet(RestServlet):
             )
 
         sid = parse_string(request, "sid", required=True)
-        client_secret = parse_string(request, "client_secret", required=True)
         token = parse_string(request, "token", required=True)
+        client_secret = parse_string(request, "client_secret", required=True)
+        assert_valid_client_secret(client_secret)
 
         # Attempt to validate a 3PID session
         try:
@@ -353,6 +357,8 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
         body = parse_json_object_from_request(request)
         assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
         client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
+
         email = body["email"]
         send_attempt = body["send_attempt"]
         next_link = body.get("next_link")  # Optional param
@@ -413,6 +419,8 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
             body, ["client_secret", "country", "phone_number", "send_attempt"]
         )
         client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
+
         country = body["country"]
         phone_number = body["phone_number"]
         send_attempt = body["send_attempt"]
@@ -493,8 +501,9 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
             )
 
         sid = parse_string(request, "sid", required=True)
-        client_secret = parse_string(request, "client_secret", required=True)
         token = parse_string(request, "token", required=True)
+        client_secret = parse_string(request, "client_secret", required=True)
+        assert_valid_client_secret(client_secret)
 
         # Attempt to validate a 3PID session
         try:
@@ -559,6 +568,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
 
         body = parse_json_object_from_request(request)
         assert_params_in_dict(body, ["client_secret", "sid", "token"])
+        assert_valid_client_secret(body["client_secret"])
 
         # Proxy submit_token request to msisdn threepid delegate
         response = await self.identity_handler.proxy_msisdn_submit_token(
@@ -600,8 +610,9 @@ class ThreepidRestServlet(RestServlet):
             )
         assert_params_in_dict(threepid_creds, ["client_secret", "sid"])
 
-        client_secret = threepid_creds["client_secret"]
         sid = threepid_creds["sid"]
+        client_secret = threepid_creds["client_secret"]
+        assert_valid_client_secret(client_secret)
 
         validation_session = await self.identity_handler.validate_threepid_session(
             client_secret, sid
@@ -637,8 +648,9 @@ class ThreepidAddRestServlet(RestServlet):
         body = parse_json_object_from_request(request)
 
         assert_params_in_dict(body, ["client_secret", "sid"])
-        client_secret = body["client_secret"]
         sid = body["sid"]
+        client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
 
         await self.auth_handler.validate_user_via_ui_auth(
             requester, body, self.hs.get_ip_from_request(request)
@@ -676,8 +688,9 @@ class ThreepidBindRestServlet(RestServlet):
         assert_params_in_dict(body, ["id_server", "sid", "client_secret"])
         id_server = body["id_server"]
         sid = body["sid"]
-        client_secret = body["client_secret"]
         id_access_token = body.get("id_access_token")  # optional
+        client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
 
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 1bda9aec7e..a09189b1b4 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -49,6 +49,7 @@ from synapse.http.servlet import (
 from synapse.push.mailer import load_jinja2_templates
 from synapse.util.msisdn import phone_number_to_msisdn
 from synapse.util.ratelimitutils import FederationRateLimiter
+from synapse.util.stringutils import assert_valid_client_secret
 from synapse.util.threepids import check_3pid_allowed
 
 from ._base import client_patterns, interactive_auth_handler
@@ -116,6 +117,8 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
 
         # Extract params from body
         client_secret = body["client_secret"]
+        assert_valid_client_secret(client_secret)
+
         email = body["email"]
         send_attempt = body["send_attempt"]
         next_link = body.get("next_link")  # Optional param
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 982c6d81ca..2c0dcb5208 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,14 +15,22 @@
 # limitations under the License.
 
 import random
+import re
 import string
 
 import six
 from six import PY2, PY3
 from six.moves import range
 
+from synapse.api.errors import Codes, SynapseError
+
 _string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
 
+# https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-register-email-requesttoken
+# Note: The : character is allowed here for older clients, but will be removed in a
+# future release. Context: https://github.com/matrix-org/synapse/issues/6766
+client_secret_regex = re.compile(r"^[0-9a-zA-Z\.\=\_\-\:]+$")
+
 # random_string and random_string_with_symbols are used for a range of things,
 # some cryptographically important, some less so. We use SystemRandom to make sure
 # we get cryptographically-secure randoms.
@@ -109,3 +118,11 @@ def exception_to_unicode(e):
         return msg.decode("utf-8", errors="replace")
     else:
         return msg
+
+
+def assert_valid_client_secret(client_secret):
+    """Validate that a given string matches the client_secret regex defined by the spec"""
+    if client_secret_regex.match(client_secret) is None:
+        raise SynapseError(
+            400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM
+        )
diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py
new file mode 100644
index 0000000000..4f4da29a98
--- /dev/null
+++ b/tests/util/test_stringutils.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api.errors import SynapseError
+from synapse.util.stringutils import assert_valid_client_secret
+
+from .. import unittest
+
+
+class StringUtilsTestCase(unittest.TestCase):
+    def test_client_secret_regex(self):
+        """Ensure that client_secret does not contain illegal characters"""
+        good = [
+            "abcde12345",
+            "ABCabc123",
+            "_--something==_",
+            "...--==-18913",
+            "8Dj2odd-e9asd.cd==_--ddas-secret-",
+            # We temporarily allow : characters: https://github.com/matrix-org/synapse/issues/6766
+            # To be removed in a future release
+            "SECRET:1234567890",
+        ]
+
+        bad = [
+            "--+-/secret",
+            "\\dx--dsa288",
+            "",
+            "AAS//",
+            "asdj**",
+            ">X>
Date: Mon, 27 Jan 2020 14:09:59 +0200
Subject: Formatting of changelog

Co-Authored-By: Brendan Abolivier 
---
 changelog.d/6775.doc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc
index 9421250f8b..c6078ef82d 100644
--- a/changelog.d/6775.doc
+++ b/changelog.d/6775.doc
@@ -1 +1 @@
-Clarify documentation related to user_dir and federation_reader workers.
+Clarify documentation related to `user_dir` and `federation_reader` workers.
-- 
cgit 1.4.1


From d5275fc55f4edc42d1543825da2c13df63d96927 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 27 Jan 2020 13:47:50 +0000
Subject: Propagate cache invalidates from workers to other workers. (#6748)

Currently if a worker invalidates a cache it will be streamed to master, which then didn't forward those to other workers.
---
 changelog.d/6748.misc                     |  1 +
 synapse/replication/tcp/protocol.py       |  2 +-
 synapse/replication/tcp/resource.py       |  9 ++++++---
 synapse/storage/data_stores/main/cache.py | 22 +++++++++++++++++++++-
 4 files changed, 29 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6748.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc
new file mode 100644
index 0000000000..de320d4cd9
--- /dev/null
+++ b/changelog.d/6748.misc
@@ -0,0 +1 @@
+Propagate cache invalidates from workers to other workers.
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 131e5acb09..bc1482a9bb 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -459,7 +459,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         await self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
 
     async def on_INVALIDATE_CACHE(self, cmd):
-        self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
+        await self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
 
     async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
         self.streamer.on_remote_server_up(cmd.data)
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 6ebf944f66..ce60ae2e07 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -17,7 +17,7 @@
 
 import logging
 import random
-from typing import List
+from typing import Any, List
 
 from six import itervalues
 
@@ -271,11 +271,14 @@ class ReplicationStreamer(object):
         self.notifier.on_new_replication_data()
 
     @measure_func("repl.on_invalidate_cache")
-    def on_invalidate_cache(self, cache_func, keys):
+    async def on_invalidate_cache(self, cache_func: str, keys: List[Any]):
         """The client has asked us to invalidate a cache
         """
         invalidate_cache_counter.inc()
-        getattr(self.store, cache_func).invalidate(tuple(keys))
+
+        # We invalidate the cache locally, but then also stream that to other
+        # workers.
+        await self.store.invalidate_cache_and_stream(cache_func, tuple(keys))
 
     @measure_func("repl.on_user_ip")
     async def on_user_ip(
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py
index afa2b41c98..d4c44dcc75 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/data_stores/main/cache.py
@@ -16,7 +16,7 @@
 
 import itertools
 import logging
-from typing import Any, Iterable, Optional
+from typing import Any, Iterable, Optional, Tuple
 
 from twisted.internet import defer
 
@@ -33,6 +33,26 @@ CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
 
 
 class CacheInvalidationStore(SQLBaseStore):
+    async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
+        """Invalidates the cache and adds it to the cache stream so slaves
+        will know to invalidate their caches.
+
+        This should only be used to invalidate caches where slaves won't
+        otherwise know from other replication streams that the cache should
+        be invalidated.
+        """
+        cache_func = getattr(self, cache_name, None)
+        if not cache_func:
+            return
+
+        cache_func.invalidate(keys)
+        await self.runInteraction(
+            "invalidate_cache_and_stream",
+            self._send_invalidation_to_replication,
+            cache_func.__name__,
+            keys,
+        )
+
     def _invalidate_cache_and_stream(self, txn, cache_func, keys):
         """Invalidates the cache and adds it to the cache stream so slaves
         will know to invalidate their caches.
-- 
cgit 1.4.1


From 8df862e45d9848c226399c8e39d31497461516ff Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 27 Jan 2020 14:30:57 +0000
Subject: Add `rooms.room_version` column (#6729)

This is so that we don't have to rely on pulling it out from `current_state_events` table.
---
 changelog.d/6729.misc                              |  1 +
 synapse/federation/federation_client.py            | 50 ++++++++----
 synapse/handlers/federation.py                     | 65 +++++++++++----
 synapse/handlers/room.py                           | 52 +++++++-----
 .../client/v2_alpha/room_upgrade_rest_servlet.py   |  3 +-
 synapse/storage/data_stores/main/room.py           | 94 ++++++++++++++++++++--
 .../main/schema/delta/57/rooms_version_column.sql  | 24 ++++++
 synapse/storage/data_stores/main/state.py          | 34 +++++---
 tests/storage/test_room.py                         |  7 +-
 tests/storage/test_state.py                        |  5 +-
 tests/utils.py                                     |  8 ++
 11 files changed, 270 insertions(+), 73 deletions(-)
 create mode 100644 changelog.d/6729.misc
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql

(limited to 'changelog.d')

diff --git a/changelog.d/6729.misc b/changelog.d/6729.misc
new file mode 100644
index 0000000000..5537355bea
--- /dev/null
+++ b/changelog.d/6729.misc
@@ -0,0 +1 @@
+Record room versions in the `rooms` table.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index af652a7659..d57e8ca7a2 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -17,6 +17,7 @@
 import copy
 import itertools
 import logging
+from typing import Dict, Iterable
 
 from prometheus_client import Counter
 
@@ -29,6 +30,7 @@ from synapse.api.errors import (
     FederationDeniedError,
     HttpResponseException,
     SynapseError,
+    UnsupportedRoomVersionError,
 )
 from synapse.api.room_versions import (
     KNOWN_ROOM_VERSIONS,
@@ -385,6 +387,8 @@ class FederationClient(FederationBase):
                 return res
             except InvalidResponseError as e:
                 logger.warning("Failed to %s via %s: %s", description, destination, e)
+            except UnsupportedRoomVersionError:
+                raise
             except HttpResponseException as e:
                 if not 500 <= e.code < 600:
                     raise e.to_synapse_error()
@@ -404,7 +408,13 @@ class FederationClient(FederationBase):
         raise SynapseError(502, "Failed to %s via any server" % (description,))
 
     def make_membership_event(
-        self, destinations, room_id, user_id, membership, content, params
+        self,
+        destinations: Iterable[str],
+        room_id: str,
+        user_id: str,
+        membership: str,
+        content: dict,
+        params: Dict[str, str],
     ):
         """
         Creates an m.room.member event, with context, without participating in the room.
@@ -417,21 +427,23 @@ class FederationClient(FederationBase):
         Note that this does not append any events to any graphs.
 
         Args:
-            destinations (Iterable[str]): Candidate homeservers which are probably
+            destinations: Candidate homeservers which are probably
                 participating in the room.
-            room_id (str): The room in which the event will happen.
-            user_id (str): The user whose membership is being evented.
-            membership (str): The "membership" property of the event. Must be
-                one of "join" or "leave".
-            content (dict): Any additional data to put into the content field
-                of the event.
-            params (dict[str, str|Iterable[str]]): Query parameters to include in the
-                request.
+            room_id: The room in which the event will happen.
+            user_id: The user whose membership is being evented.
+            membership: The "membership" property of the event. Must be one of
+                "join" or "leave".
+            content: Any additional data to put into the content field of the
+                event.
+            params: Query parameters to include in the request.
         Return:
-            Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
-            `(origin, event, event_format)` where origin is the remote
-            homeserver which generated the event, and event_format is one of
-            `synapse.api.room_versions.EventFormatVersions`.
+            Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of
+            `(origin, event, room_version)` where origin is the remote
+            homeserver which generated the event, and room_version is the
+            version of the room.
+
+            Fails with a `UnsupportedRoomVersionError` if remote responds with
+            a room version we don't understand.
 
             Fails with a ``SynapseError`` if the chosen remote server
             returns a 300/400 code.
@@ -453,8 +465,12 @@ class FederationClient(FederationBase):
 
             # Note: If not supplied, the room version may be either v1 or v2,
             # however either way the event format version will be v1.
-            room_version = ret.get("room_version", RoomVersions.V1.identifier)
-            event_format = room_version_to_event_format(room_version)
+            room_version_id = ret.get("room_version", RoomVersions.V1.identifier)
+            room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
+            if not room_version:
+                raise UnsupportedRoomVersionError()
+
+            event_format = room_version_to_event_format(room_version_id)
 
             pdu_dict = ret.get("event", None)
             if not isinstance(pdu_dict, dict):
@@ -478,7 +494,7 @@ class FederationClient(FederationBase):
                 event_dict=pdu_dict,
             )
 
-            return (destination, ev, event_format)
+            return (destination, ev, room_version)
 
         return self._try_destination_list(
             "make_" + membership, destinations, send_request
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d4f9a792fc..f824ee79a0 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -44,10 +44,10 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
 from synapse.crypto.event_signing import compute_event_signature
 from synapse.event_auth import auth_types_for_event
-from synapse.events import EventBase
+from synapse.events import EventBase, room_version_to_event_format
 from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.logging.context import (
@@ -703,8 +703,20 @@ class FederationHandler(BaseHandler):
 
         if not room:
             try:
+                prev_state_ids = await context.get_prev_state_ids()
+                create_event = await self.store.get_event(
+                    prev_state_ids[(EventTypes.Create, "")]
+                )
+
+                room_version_id = create_event.content.get(
+                    "room_version", RoomVersions.V1.identifier
+                )
+
                 await self.store.store_room(
-                    room_id=room_id, room_creator_user_id="", is_public=False
+                    room_id=room_id,
+                    room_creator_user_id="",
+                    is_public=False,
+                    room_version=KNOWN_ROOM_VERSIONS[room_version_id],
                 )
             except StoreError:
                 logger.exception("Failed to store room.")
@@ -1186,7 +1198,7 @@ class FederationHandler(BaseHandler):
         """
         logger.debug("Joining %s to %s", joinee, room_id)
 
-        origin, event, event_format_version = yield self._make_and_verify_event(
+        origin, event, room_version = yield self._make_and_verify_event(
             target_hosts,
             room_id,
             joinee,
@@ -1214,6 +1226,8 @@ class FederationHandler(BaseHandler):
                 target_hosts.insert(0, origin)
             except ValueError:
                 pass
+
+            event_format_version = room_version_to_event_format(room_version.identifier)
             ret = yield self.federation_client.send_join(
                 target_hosts, event, event_format_version
             )
@@ -1234,13 +1248,18 @@ class FederationHandler(BaseHandler):
 
             try:
                 yield self.store.store_room(
-                    room_id=room_id, room_creator_user_id="", is_public=False
+                    room_id=room_id,
+                    room_creator_user_id="",
+                    is_public=False,
+                    room_version=room_version,
                 )
             except Exception:
                 # FIXME
                 pass
 
-            yield self._persist_auth_tree(origin, auth_chain, state, event)
+            yield self._persist_auth_tree(
+                origin, auth_chain, state, event, room_version
+            )
 
             # Check whether this room is the result of an upgrade of a room we already know
             # about. If so, migrate over user information
@@ -1486,7 +1505,7 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content):
-        origin, event, event_format_version = yield self._make_and_verify_event(
+        origin, event, room_version = yield self._make_and_verify_event(
             target_hosts, room_id, user_id, "leave", content=content
         )
         # Mark as outlier as we don't have any state for this event; we're not
@@ -1513,7 +1532,11 @@ class FederationHandler(BaseHandler):
     def _make_and_verify_event(
         self, target_hosts, room_id, user_id, membership, content={}, params=None
     ):
-        origin, event, format_ver = yield self.federation_client.make_membership_event(
+        (
+            origin,
+            event,
+            room_version,
+        ) = yield self.federation_client.make_membership_event(
             target_hosts, room_id, user_id, membership, content, params=params
         )
 
@@ -1525,7 +1548,7 @@ class FederationHandler(BaseHandler):
         assert event.user_id == user_id
         assert event.state_key == user_id
         assert event.room_id == room_id
-        return origin, event, format_ver
+        return origin, event, room_version
 
     @defer.inlineCallbacks
     @log_function
@@ -1810,7 +1833,14 @@ class FederationHandler(BaseHandler):
         )
 
     @defer.inlineCallbacks
-    def _persist_auth_tree(self, origin, auth_events, state, event):
+    def _persist_auth_tree(
+        self,
+        origin: str,
+        auth_events: List[EventBase],
+        state: List[EventBase],
+        event: EventBase,
+        room_version: RoomVersion,
+    ):
         """Checks the auth chain is valid (and passes auth checks) for the
         state and event. Then persists the auth chain and state atomically.
         Persists the event separately. Notifies about the persisted events
@@ -1819,10 +1849,12 @@ class FederationHandler(BaseHandler):
         Will attempt to fetch missing auth events.
 
         Args:
-            origin (str): Where the events came from
-            auth_events (list)
-            state (list)
-            event (Event)
+            origin: Where the events came from
+            auth_events
+            state
+            event
+            room_version: The room version we expect this room to have, and
+                will raise if it doesn't match the version in the create event.
 
         Returns:
             Deferred
@@ -1848,10 +1880,13 @@ class FederationHandler(BaseHandler):
             # invalid, and it would fail auth checks anyway.
             raise SynapseError(400, "No create event in state")
 
-        room_version = create_event.content.get(
+        room_version_id = create_event.content.get(
             "room_version", RoomVersions.V1.identifier
         )
 
+        if room_version.identifier != room_version_id:
+            raise SynapseError(400, "Room version mismatch")
+
         missing_auth_events = set()
         for e in itertools.chain(auth_events, state, [event]):
             for e_id in e.auth_event_ids():
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 9f50196ea7..a9490782b7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -29,7 +29,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
 from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.storage.state import StateFilter
 from synapse.types import (
@@ -100,13 +100,15 @@ class RoomCreationHandler(BaseHandler):
         self.third_party_event_rules = hs.get_third_party_event_rules()
 
     @defer.inlineCallbacks
-    def upgrade_room(self, requester, old_room_id, new_version):
+    def upgrade_room(
+        self, requester: Requester, old_room_id: str, new_version: RoomVersion
+    ):
         """Replace a room with a new room with a different version
 
         Args:
-            requester (synapse.types.Requester): the user requesting the upgrade
-            old_room_id (unicode): the id of the room to be replaced
-            new_version (unicode): the new room version to use
+            requester: the user requesting the upgrade
+            old_room_id: the id of the room to be replaced
+            new_version: the new room version to use
 
         Returns:
             Deferred[unicode]: the new room id
@@ -151,7 +153,7 @@ class RoomCreationHandler(BaseHandler):
         if r is None:
             raise NotFoundError("Unknown room id %s" % (old_room_id,))
         new_room_id = yield self._generate_room_id(
-            creator_id=user_id, is_public=r["is_public"]
+            creator_id=user_id, is_public=r["is_public"], room_version=new_version,
         )
 
         logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
@@ -299,18 +301,22 @@ class RoomCreationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def clone_existing_room(
-        self, requester, old_room_id, new_room_id, new_room_version, tombstone_event_id
+        self,
+        requester: Requester,
+        old_room_id: str,
+        new_room_id: str,
+        new_room_version: RoomVersion,
+        tombstone_event_id: str,
     ):
         """Populate a new room based on an old room
 
         Args:
-            requester (synapse.types.Requester): the user requesting the upgrade
-            old_room_id (unicode): the id of the room to be replaced
-            new_room_id (unicode): the id to give the new room (should already have been
+            requester: the user requesting the upgrade
+            old_room_id : the id of the room to be replaced
+            new_room_id: the id to give the new room (should already have been
                 created with _gemerate_room_id())
-            new_room_version (unicode): the new room version to use
-            tombstone_event_id (unicode|str): the ID of the tombstone event in the old
-                room.
+            new_room_version: the new room version to use
+            tombstone_event_id: the ID of the tombstone event in the old room.
         Returns:
             Deferred
         """
@@ -320,7 +326,7 @@ class RoomCreationHandler(BaseHandler):
             raise SynapseError(403, "You are not permitted to create rooms")
 
         creation_content = {
-            "room_version": new_room_version,
+            "room_version": new_room_version.identifier,
             "predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
         }
 
@@ -577,14 +583,15 @@ class RoomCreationHandler(BaseHandler):
         if ratelimit:
             yield self.ratelimit(requester)
 
-        room_version = config.get(
+        room_version_id = config.get(
             "room_version", self.config.default_room_version.identifier
         )
 
-        if not isinstance(room_version, string_types):
+        if not isinstance(room_version_id, string_types):
             raise SynapseError(400, "room_version must be a string", Codes.BAD_JSON)
 
-        if room_version not in KNOWN_ROOM_VERSIONS:
+        room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
+        if room_version is None:
             raise SynapseError(
                 400,
                 "Your homeserver does not support this room version",
@@ -631,7 +638,9 @@ class RoomCreationHandler(BaseHandler):
         visibility = config.get("visibility", None)
         is_public = visibility == "public"
 
-        room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
+        room_id = yield self._generate_room_id(
+            creator_id=user_id, is_public=is_public, room_version=room_version,
+        )
 
         directory_handler = self.hs.get_handlers().directory_handler
         if room_alias:
@@ -660,7 +669,7 @@ class RoomCreationHandler(BaseHandler):
         creation_content = config.get("creation_content", {})
 
         # override any attempt to set room versions via the creation_content
-        creation_content["room_version"] = room_version
+        creation_content["room_version"] = room_version.identifier
 
         yield self._send_events_for_new_room(
             requester,
@@ -849,7 +858,9 @@ class RoomCreationHandler(BaseHandler):
             yield send(etype=etype, state_key=state_key, content=content)
 
     @defer.inlineCallbacks
-    def _generate_room_id(self, creator_id, is_public):
+    def _generate_room_id(
+        self, creator_id: str, is_public: str, room_version: RoomVersion,
+    ):
         # autogen room IDs and try to create it. We may clash, so just
         # try a few times till one goes through, giving up eventually.
         attempts = 0
@@ -863,6 +874,7 @@ class RoomCreationHandler(BaseHandler):
                     room_id=gen_room_id,
                     room_creator_user_id=creator_id,
                     is_public=is_public,
+                    room_version=room_version,
                 )
                 return gen_room_id
             except StoreError:
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index ca97330797..f357015a70 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -64,7 +64,8 @@ class RoomUpgradeRestServlet(RestServlet):
         assert_params_in_dict(content, ("new_version",))
         new_version = content["new_version"]
 
-        if new_version not in KNOWN_ROOM_VERSIONS:
+        new_version = KNOWN_ROOM_VERSIONS.get(content["new_version"])
+        if new_version is None:
             raise SynapseError(
                 400,
                 "Your homeserver does not support this room version",
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index d968803ad2..9a17e336ba 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -29,9 +29,10 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
+from synapse.api.room_versions import RoomVersion, RoomVersions
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.main.search import SearchStore
-from synapse.storage.database import Database
+from synapse.storage.database import Database, LoggingTransaction
 from synapse.types import ThirdPartyInstanceID
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
@@ -734,6 +735,7 @@ class RoomWorkerStore(SQLBaseStore):
 
 class RoomBackgroundUpdateStore(SQLBaseStore):
     REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
+    ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
 
     def __init__(self, database: Database, db_conn, hs):
         super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs)
@@ -749,6 +751,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             self._remove_tombstoned_rooms_from_directory,
         )
 
+        self.db.updates.register_background_update_handler(
+            self.ADD_ROOMS_ROOM_VERSION_COLUMN,
+            self._background_add_rooms_room_version_column,
+        )
+
     @defer.inlineCallbacks
     def _background_insert_retention(self, progress, batch_size):
         """Retrieves a list of all rooms within a range and inserts an entry for each of
@@ -817,6 +824,73 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
 
         defer.returnValue(batch_size)
 
+    async def _background_add_rooms_room_version_column(
+        self, progress: dict, batch_size: int
+    ):
+        """Background update to go and add room version inforamtion to `rooms`
+        table from `current_state_events` table.
+        """
+
+        last_room_id = progress.get("room_id", "")
+
+        def _background_add_rooms_room_version_column_txn(txn: LoggingTransaction):
+            sql = """
+                SELECT room_id, json FROM current_state_events
+                INNER JOIN event_json USING (room_id, event_id)
+                WHERE room_id > ? AND type = 'm.room.create' AND state_key = ''
+                ORDER BY room_id
+                LIMIT ?
+            """
+
+            txn.execute(sql, (last_room_id, batch_size))
+
+            updates = []
+            for room_id, event_json in txn:
+                event_dict = json.loads(event_json)
+                room_version_id = event_dict.get("content", {}).get(
+                    "room_version", RoomVersions.V1.identifier
+                )
+
+                creator = event_dict.get("content").get("creator")
+
+                updates.append((room_id, creator, room_version_id))
+
+            if not updates:
+                return True
+
+            new_last_room_id = ""
+            for room_id, creator, room_version_id in updates:
+                # We upsert here just in case we don't already have a row,
+                # mainly for paranoia as much badness would happen if we don't
+                # insert the row and then try and get the room version for the
+                # room.
+                self.db.simple_upsert_txn(
+                    txn,
+                    table="rooms",
+                    keyvalues={"room_id": room_id},
+                    values={"room_version": room_version_id},
+                    insertion_values={"is_public": False, "creator": creator},
+                )
+                new_last_room_id = room_id
+
+            self.db.updates._background_update_progress_txn(
+                txn, self.ADD_ROOMS_ROOM_VERSION_COLUMN, {"room_id": new_last_room_id}
+            )
+
+            return False
+
+        end = await self.db.runInteraction(
+            "_background_add_rooms_room_version_column",
+            _background_add_rooms_room_version_column_txn,
+        )
+
+        if end:
+            await self.db.updates._end_background_update(
+                self.ADD_ROOMS_ROOM_VERSION_COLUMN
+            )
+
+        return batch_size
+
     async def _remove_tombstoned_rooms_from_directory(
         self, progress, batch_size
     ) -> int:
@@ -881,14 +955,21 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
         self.config = hs.config
 
     @defer.inlineCallbacks
-    def store_room(self, room_id, room_creator_user_id, is_public):
+    def store_room(
+        self,
+        room_id: str,
+        room_creator_user_id: str,
+        is_public: bool,
+        room_version: RoomVersion,
+    ):
         """Stores a room.
 
         Args:
-            room_id (str): The desired room ID, can be None.
-            room_creator_user_id (str): The user ID of the room creator.
-            is_public (bool): True to indicate that this room should appear in
-            public room lists.
+            room_id: The desired room ID, can be None.
+            room_creator_user_id: The user ID of the room creator.
+            is_public: True to indicate that this room should appear in
+                public room lists.
+            room_version: The version of the room
         Raises:
             StoreError if the room could not be stored.
         """
@@ -902,6 +983,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                         "room_id": room_id,
                         "creator": room_creator_user_id,
                         "is_public": is_public,
+                        "room_version": room_version.identifier,
                     },
                 )
                 if is_public:
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql
new file mode 100644
index 0000000000..352a66f5b0
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql
@@ -0,0 +1,24 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We want to start storing the room version independently of
+-- `current_state_events` so that we can delete stale entries from it without
+-- losing the information.
+ALTER TABLE rooms ADD COLUMN room_version TEXT;
+
+
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('add_rooms_room_version_column', '{}');
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 33bebd1c48..bd7b0276f1 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -60,24 +60,34 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
         super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
 
-    @defer.inlineCallbacks
-    def get_room_version(self, room_id):
+    @cached(max_entries=10000)
+    async def get_room_version(self, room_id: str) -> str:
         """Get the room_version of a given room
 
-        Args:
-            room_id (str)
-
-        Returns:
-            Deferred[str]
-
         Raises:
-            NotFoundError if the room is unknown
+            NotFoundError: if the room is unknown
         """
-        # for now we do this by looking at the create event. We may want to cache this
-        # more intelligently in future.
+
+        # First we try looking up room version from the database, but for old
+        # rooms we might not have added the room version to it yet so we fall
+        # back to previous behaviour and look in current state events.
+
+        # We really should have an entry in the rooms table for every room we
+        # care about, but let's be a bit paranoid (at least while the background
+        # update is happening) to avoid breaking existing rooms.
+        version = await self.db.simple_select_one_onecol(
+            table="rooms",
+            keyvalues={"room_id": room_id},
+            retcol="room_version",
+            desc="get_room_version",
+            allow_none=True,
+        )
+
+        if version is not None:
+            return version
 
         # Retrieve the room's create event
-        create_event = yield self.get_create_event_for_room(room_id)
+        create_event = await self.get_create_event_for_room(room_id)
         return create_event.content.get("room_version", "1")
 
     @defer.inlineCallbacks
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index 3ddaa151fe..086adeb8fd 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -17,6 +17,7 @@
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
+from synapse.api.room_versions import RoomVersions
 from synapse.types import RoomAlias, RoomID, UserID
 
 from tests import unittest
@@ -40,6 +41,7 @@ class RoomStoreTestCase(unittest.TestCase):
             self.room.to_string(),
             room_creator_user_id=self.u_creator.to_string(),
             is_public=True,
+            room_version=RoomVersions.V1,
         )
 
     @defer.inlineCallbacks
@@ -68,7 +70,10 @@ class RoomEventsStoreTestCase(unittest.TestCase):
         self.room = RoomID.from_string("!abcde:test")
 
         yield self.store.store_room(
-            self.room.to_string(), room_creator_user_id="@creator:text", is_public=True
+            self.room.to_string(),
+            room_creator_user_id="@creator:text",
+            is_public=True,
+            room_version=RoomVersions.V1,
         )
 
     @defer.inlineCallbacks
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index d6ecf102f8..04d58fbf24 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -45,7 +45,10 @@ class StateStoreTestCase(tests.unittest.TestCase):
         self.room = RoomID.from_string("!abc123:test")
 
         yield self.store.store_room(
-            self.room.to_string(), room_creator_user_id="@creator:text", is_public=True
+            self.room.to_string(),
+            room_creator_user_id="@creator:text",
+            is_public=True,
+            room_version=RoomVersions.V1,
         )
 
     @defer.inlineCallbacks
diff --git a/tests/utils.py b/tests/utils.py
index e2e9cafd79..513f358f4f 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -639,9 +639,17 @@ def create_room(hs, room_id, creator_id):
     """
 
     persistence_store = hs.get_storage().persistence
+    store = hs.get_datastore()
     event_builder_factory = hs.get_event_builder_factory()
     event_creation_handler = hs.get_event_creation_handler()
 
+    yield store.store_room(
+        room_id=room_id,
+        room_creator_user_id=creator_id,
+        is_public=False,
+        room_version=RoomVersions.V1,
+    )
+
     builder = event_builder_factory.for_room_version(
         RoomVersions.V1,
         {
-- 
cgit 1.4.1


From 02b44db922f01a35787d2535a834c9774b68020b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 28 Jan 2020 13:44:21 +0000
Subject: Warn if postgres database has non-C locale. (#6734)

As using non-C locale can cause issues on upgrading OS.
---
 UPGRADE.rst                         |  9 ++++++++
 changelog.d/6734.bugfix             |  1 +
 docs/postgres.md                    | 20 +++++++++++++++++-
 synapse/storage/engines/postgres.py | 42 +++++++++++++++++++++++++++++++++++++
 synapse/storage/engines/sqlite.py   |  5 +++++
 synapse/storage/prepare_database.py |  5 +++++
 6 files changed, 81 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6734.bugfix

(limited to 'changelog.d')

diff --git a/UPGRADE.rst b/UPGRADE.rst
index a0202932b1..470246f128 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -76,6 +76,15 @@ for example:
      dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
 
 
+Upgrading to ****
+===============================
+
+Synapse will now log a warning on start up if used with a PostgreSQL database
+that has a non-recommended locale set.
+
+See [docs/postgres.md](docs/postgres.md) for details.
+
+
 Upgrading to v1.8.0
 ===================
 
diff --git a/changelog.d/6734.bugfix b/changelog.d/6734.bugfix
new file mode 100644
index 0000000000..79c6bab4d1
--- /dev/null
+++ b/changelog.d/6734.bugfix
@@ -0,0 +1 @@
+Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS).
diff --git a/docs/postgres.md b/docs/postgres.md
index 7cb1ad18d4..e0793ecee8 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -32,7 +32,7 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
     su - postgres
     # Or, if your system uses sudo to get administrative rights
     sudo -u postgres bash
-  
+
 Then, create a user ``synapse_user`` with:
 
     createuser --pwprompt synapse_user
@@ -63,6 +63,24 @@ You may need to enable password authentication so `synapse_user` can
 connect to the database. See
 .
 
+### Fixing incorrect `COLLATE` or `CTYPE`
+
+Synapse will refuse to set up a new database if it has the wrong values of
+`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
+different locales can cause issues if the locale library is updated from
+underneath the database, or if a different version of the locale is used on any
+replicas.
+
+The safest way to fix the issue is to take a dump and recreate the database with
+the correct `COLLATE` and `CTYPE` parameters (as per
+[docs/postgres.md](docs/postgres.md)). It is also possible to change the
+parameters on a live database and run a `REINDEX` on the entire database,
+however extreme care must be taken to avoid database corruption.
+
+Note that the above may fail with an error about duplicate rows if corruption
+has already occurred, and such duplicate rows will need to be manually removed.
+
+
 ## Tuning Postgres
 
 The default settings should be fine for most deployments. For larger
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index c84cb452b0..a077345960 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -13,8 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 from ._base import IncorrectDatabaseSetup
 
+logger = logging.getLogger(__name__)
+
 
 class PostgresEngine(object):
     single_threaded = False
@@ -52,6 +56,44 @@ class PostgresEngine(object):
                     "See docs/postgres.rst for more information." % (rows[0][0],)
                 )
 
+            txn.execute(
+                "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()"
+            )
+            collation, ctype = txn.fetchone()
+            if collation != "C":
+                logger.warning(
+                    "Database has incorrect collation of %r. Should be 'C'", collation
+                )
+
+            if ctype != "C":
+                logger.warning(
+                    "Database has incorrect ctype of %r. Should be 'C'", ctype
+                )
+
+    def check_new_database(self, txn):
+        """Gets called when setting up a brand new database. This allows us to
+        apply stricter checks on new databases versus existing database.
+        """
+
+        txn.execute(
+            "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()"
+        )
+        collation, ctype = txn.fetchone()
+
+        errors = []
+
+        if collation != "C":
+            errors.append("    - 'COLLATE' is set to %r. Should be 'C'" % (collation,))
+
+        if ctype != "C":
+            errors.append("    - 'CTYPE' is set to %r. Should be 'C'" % (collation,))
+
+        if errors:
+            raise IncorrectDatabaseSetup(
+                "Database is incorrectly configured:\n\n%s\n\n"
+                "See docs/postgres.md for more information." % ("\n".join(errors))
+            )
+
     def convert_param_style(self, sql):
         return sql.replace("?", "%s")
 
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index cbf52f5191..641e490697 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -59,6 +59,11 @@ class Sqlite3Engine(object):
             if version < (3, 11, 0):
                 raise RuntimeError("Synapse requires sqlite 3.11 or above.")
 
+    def check_new_database(self, txn):
+        """Gets called when setting up a brand new database. This allows us to
+        apply stricter checks on new databases versus existing database.
+        """
+
     def convert_param_style(self, sql):
         return sql
 
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index e86984cd50..c285ef52a0 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -136,6 +136,11 @@ def _setup_new_database(cur, database_engine, data_stores):
         data_stores (list[str]): The names of the data stores to instantiate
             on the given database.
     """
+
+    # We're about to set up a brand new database so we check that its
+    # configured to our liking.
+    database_engine.check_new_database(cur)
+
     current_dir = os.path.join(dir_path, "schema", "full_schemas")
     directory_entries = os.listdir(current_dir)
 
-- 
cgit 1.4.1


From a8ce7aeb433e08f46306797a1252668c178a7825 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 28 Jan 2020 14:18:29 +0000
Subject: Pass room version object into event_auth.check and check_redaction
 (#6788)

These are easier to work with than the strings and we normally have one around.

This fixes `FederationHander._persist_auth_tree` which was passing a
RoomVersion object into event_auth.check instead of a string.
---
 changelog.d/6788.misc          |  1 +
 synapse/api/auth.py            |  7 +++++--
 synapse/event_auth.py          | 34 +++++++++++++++++++++-------------
 synapse/handlers/federation.py | 18 +++++++++++-------
 synapse/handlers/message.py    |  8 ++++++--
 synapse/state/v1.py            |  4 ++--
 synapse/state/v2.py            |  4 +++-
 tests/test_event_auth.py       | 11 ++++-------
 8 files changed, 53 insertions(+), 34 deletions(-)
 create mode 100644 changelog.d/6788.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6788.misc b/changelog.d/6788.misc
new file mode 100644
index 0000000000..5537355bea
--- /dev/null
+++ b/changelog.d/6788.misc
@@ -0,0 +1 @@
+Record room versions in the `rooms` table.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 2cbfab2569..8b1277ad02 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -33,6 +33,7 @@ from synapse.api.errors import (
     MissingClientTokenError,
     ResourceLimitError,
 )
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.config.server import is_threepid_reserved
 from synapse.types import StateMap, UserID
 from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
@@ -77,15 +78,17 @@ class Auth(object):
         self._account_validity = hs.config.account_validity
 
     @defer.inlineCallbacks
-    def check_from_context(self, room_version, event, context, do_sig_check=True):
+    def check_from_context(self, room_version: str, event, context, do_sig_check=True):
         prev_state_ids = yield context.get_prev_state_ids()
         auth_events_ids = yield self.compute_auth_events(
             event, prev_state_ids, for_verification=True
         )
         auth_events = yield self.store.get_events(auth_events_ids)
         auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)}
+
+        room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
         event_auth.check(
-            room_version, event, auth_events=auth_events, do_sig_check=do_sig_check
+            room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
         )
 
     @defer.inlineCallbacks
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index e3a1ba47a0..016d5678e5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,17 +24,27 @@ from unpaddedbase64 import decode_base64
 
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.api.errors import AuthError, EventSizeError, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.api.room_versions import (
+    KNOWN_ROOM_VERSIONS,
+    EventFormatVersions,
+    RoomVersion,
+)
 from synapse.types import UserID, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
 
-def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
+def check(
+    room_version_obj: RoomVersion,
+    event,
+    auth_events,
+    do_sig_check=True,
+    do_size_check=True,
+):
     """ Checks if this event is correctly authed.
 
     Args:
-        room_version (str): the version of the room
+        room_version_obj: the version of the room
         event: the event being checked.
         auth_events (dict: event-key -> event): the existing room state.
 
@@ -97,10 +108,11 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
                 403, "Creation event's room_id domain does not match sender's"
             )
 
-        room_version = event.content.get("room_version", "1")
-        if room_version not in KNOWN_ROOM_VERSIONS:
+        room_version_prop = event.content.get("room_version", "1")
+        if room_version_prop not in KNOWN_ROOM_VERSIONS:
             raise AuthError(
-                403, "room appears to have unsupported version %s" % (room_version,)
+                403,
+                "room appears to have unsupported version %s" % (room_version_prop,),
             )
         # FIXME
         logger.debug("Allowing! %s", event)
@@ -160,7 +172,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
         _check_power_levels(event, auth_events)
 
     if event.type == EventTypes.Redaction:
-        check_redaction(room_version, event, auth_events)
+        check_redaction(room_version_obj, event, auth_events)
 
     logger.debug("Allowing! %s", event)
 
@@ -386,7 +398,7 @@ def _can_send_event(event, auth_events):
     return True
 
 
-def check_redaction(room_version, event, auth_events):
+def check_redaction(room_version_obj: RoomVersion, event, auth_events):
     """Check whether the event sender is allowed to redact the target event.
 
     Returns:
@@ -406,11 +418,7 @@ def check_redaction(room_version, event, auth_events):
     if user_level >= redact_level:
         return False
 
-    v = KNOWN_ROOM_VERSIONS.get(room_version)
-    if not v:
-        raise RuntimeError("Unrecognized room version %r" % (room_version,))
-
-    if v.event_format == EventFormatVersions.V1:
+    if room_version_obj.event_format == EventFormatVersions.V1:
         redacter_domain = get_domain_from_id(event.event_id)
         redactee_domain = get_domain_from_id(event.redacts)
         if redacter_domain == redactee_domain:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index f824ee79a0..180f165a7a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -47,7 +47,7 @@ from synapse.api.errors import (
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
 from synapse.crypto.event_signing import compute_event_signature
 from synapse.event_auth import auth_types_for_event
-from synapse.events import EventBase, room_version_to_event_format
+from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.logging.context import (
@@ -1198,7 +1198,7 @@ class FederationHandler(BaseHandler):
         """
         logger.debug("Joining %s to %s", joinee, room_id)
 
-        origin, event, room_version = yield self._make_and_verify_event(
+        origin, event, room_version_obj = yield self._make_and_verify_event(
             target_hosts,
             room_id,
             joinee,
@@ -1227,7 +1227,7 @@ class FederationHandler(BaseHandler):
             except ValueError:
                 pass
 
-            event_format_version = room_version_to_event_format(room_version.identifier)
+            event_format_version = room_version_obj.event_format
             ret = yield self.federation_client.send_join(
                 target_hosts, event, event_format_version
             )
@@ -1251,14 +1251,14 @@ class FederationHandler(BaseHandler):
                     room_id=room_id,
                     room_creator_user_id="",
                     is_public=False,
-                    room_version=room_version,
+                    room_version=room_version_obj,
                 )
             except Exception:
                 # FIXME
                 pass
 
             yield self._persist_auth_tree(
-                origin, auth_chain, state, event, room_version
+                origin, auth_chain, state, event, room_version_obj
             )
 
             # Check whether this room is the result of an upgrade of a room we already know
@@ -2022,6 +2022,7 @@ class FederationHandler(BaseHandler):
 
         if do_soft_fail_check:
             room_version = yield self.store.get_room_version(event.room_id)
+            room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
 
             # Calculate the "current state".
             if state is not None:
@@ -2071,7 +2072,9 @@ class FederationHandler(BaseHandler):
             }
 
             try:
-                event_auth.check(room_version, event, auth_events=current_auth_events)
+                event_auth.check(
+                    room_version_obj, event, auth_events=current_auth_events
+                )
             except AuthError as e:
                 logger.warning("Soft-failing %r because %s", event, e)
                 event.internal_metadata.soft_failed = True
@@ -2155,6 +2158,7 @@ class FederationHandler(BaseHandler):
             defer.Deferred[EventContext]: updated context object
         """
         room_version = yield self.store.get_room_version(event.room_id)
+        room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
 
         try:
             context = yield self._update_auth_events_and_context_for_auth(
@@ -2172,7 +2176,7 @@ class FederationHandler(BaseHandler):
             )
 
         try:
-            event_auth.check(room_version, event, auth_events=auth_events)
+            event_auth.check(room_version_obj, event, auth_events=auth_events)
         except AuthError as e:
             logger.warning("Failed auth resolution for %r because %s", event, e)
             context.rejected = RejectedReason.AUTH_ERROR
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 8ea3aca2f4..9a0f661b9b 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -40,7 +40,7 @@ from synapse.api.errors import (
     NotFoundError,
     SynapseError,
 )
-from synapse.api.room_versions import RoomVersions
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
 from synapse.api.urls import ConsentURIBuilder
 from synapse.events.validator import EventValidator
 from synapse.logging.context import run_in_background
@@ -962,9 +962,13 @@ class EventCreationHandler(object):
             )
             auth_events = yield self.store.get_events(auth_events_ids)
             auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
+
             room_version = yield self.store.get_room_version(event.room_id)
+            room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
 
-            if event_auth.check_redaction(room_version, event, auth_events=auth_events):
+            if event_auth.check_redaction(
+                room_version_obj, event, auth_events=auth_events
+            ):
                 # this user doesn't have 'redact' rights, so we need to do some more
                 # checks on the original event. Let's start by checking the original
                 # event exists.
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index d6c34ce3b7..24b7c0faef 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -281,7 +281,7 @@ def _resolve_auth_events(events, auth_events):
         try:
             # The signatures have already been checked at this point
             event_auth.check(
-                RoomVersions.V1.identifier,
+                RoomVersions.V1,
                 event,
                 auth_events,
                 do_sig_check=False,
@@ -299,7 +299,7 @@ def _resolve_normal_events(events, auth_events):
         try:
             # The signatures have already been checked at this point
             event_auth.check(
-                RoomVersions.V1.identifier,
+                RoomVersions.V1,
                 event,
                 auth_events,
                 do_sig_check=False,
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 6216fdd204..531018c6a5 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -26,6 +26,7 @@ import synapse.state
 from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.events import EventBase
 from synapse.types import StateMap
 
@@ -402,6 +403,7 @@ def _iterative_auth_checks(
         Deferred[StateMap[str]]: Returns the final updated state
     """
     resolved_state = base_state.copy()
+    room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
 
     for event_id in event_ids:
         event = event_map[event_id]
@@ -430,7 +432,7 @@ def _iterative_auth_checks(
 
         try:
             event_auth.check(
-                room_version,
+                room_version_obj,
                 event,
                 auth_events,
                 do_sig_check=False,
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 8b2741d277..ca20b085a2 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -37,7 +37,7 @@ class EventAuthTestCase(unittest.TestCase):
 
         # creator should be able to send state
         event_auth.check(
-            RoomVersions.V1.identifier,
+            RoomVersions.V1,
             _random_state_event(creator),
             auth_events,
             do_sig_check=False,
@@ -47,7 +47,7 @@ class EventAuthTestCase(unittest.TestCase):
         self.assertRaises(
             AuthError,
             event_auth.check,
-            RoomVersions.V1.identifier,
+            RoomVersions.V1,
             _random_state_event(joiner),
             auth_events,
             do_sig_check=False,
@@ -76,7 +76,7 @@ class EventAuthTestCase(unittest.TestCase):
         self.assertRaises(
             AuthError,
             event_auth.check,
-            RoomVersions.V1.identifier,
+            RoomVersions.V1,
             _random_state_event(pleb),
             auth_events,
             do_sig_check=False,
@@ -84,10 +84,7 @@ class EventAuthTestCase(unittest.TestCase):
 
         # king should be able to send state
         event_auth.check(
-            RoomVersions.V1.identifier,
-            _random_state_event(king),
-            auth_events,
-            do_sig_check=False,
+            RoomVersions.V1, _random_state_event(king), auth_events, do_sig_check=False,
         )
 
 
-- 
cgit 1.4.1


From 99e205fc214a65d307d4f5484321bcbb32a60b5f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 27 Jan 2020 16:16:16 +0000
Subject: changelog

---
 changelog.d/6787.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6787.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6787.misc b/changelog.d/6787.misc
new file mode 100644
index 0000000000..82fe636173
--- /dev/null
+++ b/changelog.d/6787.misc
@@ -0,0 +1 @@
+Implement updated auth rules from MSC2260.
-- 
cgit 1.4.1


From fbe0a82c0d603b12d8c1d9a2a1121dafb5616213 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 28 Jan 2020 09:43:57 +0000
Subject: update changelog

---
 changelog.d/6787.feature | 1 +
 changelog.d/6787.misc    | 1 -
 2 files changed, 1 insertion(+), 1 deletion(-)
 create mode 100644 changelog.d/6787.feature
 delete mode 100644 changelog.d/6787.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6787.feature b/changelog.d/6787.feature
new file mode 100644
index 0000000000..df9e4b77ab
--- /dev/null
+++ b/changelog.d/6787.feature
@@ -0,0 +1 @@
+Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
diff --git a/changelog.d/6787.misc b/changelog.d/6787.misc
deleted file mode 100644
index 82fe636173..0000000000
--- a/changelog.d/6787.misc
+++ /dev/null
@@ -1 +0,0 @@
-Implement updated auth rules from MSC2260.
-- 
cgit 1.4.1


From e17a11066192354f6c6144135a14e7abe524f44c Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 28 Jan 2020 14:43:21 +0000
Subject: Detect unknown remote devices and mark cache as stale (#6776)

We just mark the fact that the cache may be stale in the database for
now.
---
 changelog.d/6776.misc                              |  1 +
 synapse/handlers/devicemessage.py                  | 57 +++++++++++++++++++++-
 synapse/handlers/federation.py                     | 20 ++++++++
 synapse/replication/slave/storage/devices.py       |  2 +-
 synapse/storage/data_stores/main/devices.py        | 29 +++++++++--
 .../delta/57/device_list_remote_cache_stale.sql    | 25 ++++++++++
 6 files changed, 126 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/6776.misc
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql

(limited to 'changelog.d')

diff --git a/changelog.d/6776.misc b/changelog.d/6776.misc
new file mode 100644
index 0000000000..4f9a4ac7a5
--- /dev/null
+++ b/changelog.d/6776.misc
@@ -0,0 +1 @@
+Detect unknown remote devices and mark cache as stale.
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 73b9e120f5..5c5fe77be2 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Any, Dict
 
 from canonicaljson import json
 
@@ -65,6 +66,9 @@ class DeviceMessageHandler(object):
                 logger.warning("Request for keys for non-local user %s", user_id)
                 raise SynapseError(400, "Not a user here")
 
+            if not by_device:
+                continue
+
             messages_by_device = {
                 device_id: {
                     "content": message_content,
@@ -73,8 +77,11 @@ class DeviceMessageHandler(object):
                 }
                 for device_id, message_content in by_device.items()
             }
-            if messages_by_device:
-                local_messages[user_id] = messages_by_device
+            local_messages[user_id] = messages_by_device
+
+            yield self._check_for_unknown_devices(
+                message_type, sender_user_id, by_device
+            )
 
         stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
             origin, message_id, local_messages
@@ -84,6 +91,52 @@ class DeviceMessageHandler(object):
             "to_device_key", stream_id, users=local_messages.keys()
         )
 
+    @defer.inlineCallbacks
+    def _check_for_unknown_devices(
+        self,
+        message_type: str,
+        sender_user_id: str,
+        by_device: Dict[str, Dict[str, Any]],
+    ):
+        """Checks inbound device messages for unkown remote devices, and if
+        found marks the remote cache for the user as stale.
+        """
+
+        if message_type != "m.room_key_request":
+            return
+
+        # Get the sending device IDs
+        requesting_device_ids = set()
+        for message_content in by_device.values():
+            device_id = message_content.get("requesting_device_id")
+            requesting_device_ids.add(device_id)
+
+        # Check if we are tracking the devices of the remote user.
+        room_ids = yield self.store.get_rooms_for_user(sender_user_id)
+        if not room_ids:
+            logger.info(
+                "Received device message from remote device we don't"
+                " share a room with: %s %s",
+                sender_user_id,
+                requesting_device_ids,
+            )
+            return
+
+        # If we are tracking check that we know about the sending
+        # devices.
+        cached_devices = yield self.store.get_cached_devices_for_user(sender_user_id)
+
+        unknown_devices = requesting_device_ids - set(cached_devices)
+        if unknown_devices:
+            logger.info(
+                "Received device message from remote device not in our cache: %s %s",
+                sender_user_id,
+                unknown_devices,
+            )
+            yield self.store.mark_remote_user_device_cache_as_stale(sender_user_id)
+            # TODO: Poke something to start trying to refetch user's
+            # keys.
+
     @defer.inlineCallbacks
     def send_device_message(self, sender_user_id, message_type, messages):
         set_tag("number_of_messages", len(messages))
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 180f165a7a..a67020a259 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -742,6 +742,26 @@ class FederationHandler(BaseHandler):
                     user = UserID.from_string(event.state_key)
                     await self.user_joined_room(user, room_id)
 
+        # For encrypted messages we check that we know about the sending device,
+        # if we don't then we mark the device cache for that user as stale.
+        if event.type == EventTypes.Encryption:
+            device_id = event.content.get("device_id")
+            if device_id is not None:
+                cached_devices = await self.store.get_cached_devices_for_user(
+                    event.sender
+                )
+                if device_id not in cached_devices:
+                    logger.info(
+                        "Received event from remote device not in our cache: %s %s",
+                        event.sender,
+                        device_id,
+                    )
+                    await self.store.mark_remote_user_device_cache_as_stale(
+                        event.sender
+                    )
+                    # TODO: Poke something to start trying to refetch user's
+                    # keys.
+
     @log_function
     async def backfill(self, dest, room_id, limit, extremities):
         """ Trigger a backfill request to `dest` for the given `room_id`
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index dc625e0d7a..1c77687eea 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -72,6 +72,6 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
                 destination, token
             )
 
-        self._get_cached_devices_for_user.invalidate((user_id,))
+        self.get_cached_devices_for_user.invalidate((user_id,))
         self._get_cached_user_device.invalidate_many((user_id,))
         self.get_device_list_last_stream_id_for_remote.invalidate((user_id,))
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index f0a7962dd0..30bf66b2b6 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -457,7 +457,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 device = yield self._get_cached_user_device(user_id, device_id)
                 results.setdefault(user_id, {})[device_id] = device
             else:
-                results[user_id] = yield self._get_cached_devices_for_user(user_id)
+                results[user_id] = yield self.get_cached_devices_for_user(user_id)
 
         set_tag("in_cache", results)
         set_tag("not_in_cache", user_ids_not_in_cache)
@@ -475,12 +475,12 @@ class DeviceWorkerStore(SQLBaseStore):
         return db_to_json(content)
 
     @cachedInlineCallbacks()
-    def _get_cached_devices_for_user(self, user_id):
+    def get_cached_devices_for_user(self, user_id):
         devices = yield self.db.simple_select_list(
             table="device_lists_remote_cache",
             keyvalues={"user_id": user_id},
             retcols=("device_id", "content"),
-            desc="_get_cached_devices_for_user",
+            desc="get_cached_devices_for_user",
         )
         return {
             device["device_id"]: db_to_json(device["content"]) for device in devices
@@ -641,6 +641,18 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return results
 
+    def mark_remote_user_device_cache_as_stale(self, user_id: str):
+        """Records that the server has reason to believe the cache of the devices
+        for the remote users is out of date.
+        """
+        return self.db.simple_upsert(
+            table="device_lists_remote_resync",
+            keyvalues={"user_id": user_id},
+            values={},
+            insertion_values={"added_ts": self._clock.time_msec()},
+            desc="make_remote_user_device_cache_as_stale",
+        )
+
 
 class DeviceBackgroundUpdateStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
@@ -887,7 +899,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             )
 
         txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id))
-        txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,))
+        txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
         txn.call_after(
             self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
         )
@@ -902,6 +914,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             lock=False,
         )
 
+        # If we're replacing the remote user's device list cache presumably
+        # we've done a full resync, so we remove the entry that says we need
+        # to resync
+        self.db.simple_delete_txn(
+            txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id},
+        )
+
     def update_remote_device_list_cache(self, user_id, devices, stream_id):
         """Replace the entire cache of the remote user's devices.
 
@@ -942,7 +961,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             ],
         )
 
-        txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,))
+        txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
         txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,))
         txn.call_after(
             self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
diff --git a/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql
new file mode 100644
index 0000000000..c3b6de2099
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql
@@ -0,0 +1,25 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Records whether the server thinks that the remote users cached device lists
+-- may be out of date (e.g. if we have received a to device message from a
+-- device we don't know about).
+CREATE TABLE IF NOT EXISTS device_lists_remote_resync (
+    user_id TEXT NOT NULL,
+    added_ts BIGINT NOT NULL
+);
+
+CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync (user_id);
+CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync (added_ts);
-- 
cgit 1.4.1


From fcfb591b312d6ec124c67aef2136a2d5948cadbe Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 28 Jan 2020 18:59:48 +0000
Subject: Fix outbound federation request metrics (#6795)

---
 changelog.d/6795.bugfix                | 1 +
 synapse/http/matrixfederationclient.py | 4 ++++
 2 files changed, 5 insertions(+)
 create mode 100644 changelog.d/6795.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6795.bugfix b/changelog.d/6795.bugfix
new file mode 100644
index 0000000000..d1585653b1
--- /dev/null
+++ b/changelog.d/6795.bugfix
@@ -0,0 +1 @@
+Fix outbound federation request metrics.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 16765d54e0..6f1bb04d8b 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -408,6 +408,8 @@ class MatrixFederationHttpClient(object):
                         _sec_timeout,
                     )
 
+                    outgoing_requests_counter.labels(method_bytes).inc()
+
                     try:
                         with Measure(self.clock, "outbound_request"):
                             # we don't want all the fancy cookie and redirect handling
@@ -440,6 +442,8 @@ class MatrixFederationHttpClient(object):
                         response.phrase.decode("ascii", errors="replace"),
                     )
 
+                    incoming_responses_counter.labels(method_bytes, response.code).inc()
+
                     set_tag(tags.HTTP_STATUS_CODE, response.code)
 
                     if 200 <= response.code < 300:
-- 
cgit 1.4.1


From 2cad8baa7030a86efc103599d79412741654dc15 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 29 Jan 2020 09:56:41 +0000
Subject: Fix bug when querying remote user keys that require a resync. (#6796)

We ended up only returning a single device, rather than all of them.
---
 changelog.d/6796.bugfix      | 1 +
 synapse/handlers/e2e_keys.py | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6796.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6796.bugfix b/changelog.d/6796.bugfix
new file mode 100644
index 0000000000..206a157311
--- /dev/null
+++ b/changelog.d/6796.bugfix
@@ -0,0 +1 @@
+Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 2d889364d4..95a9d71f41 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -208,8 +208,9 @@ class E2eKeysHandler(object):
                         )
 
                     user_devices = user_devices["devices"]
+                    user_results = results.setdefault(user_id, {})
                     for device in user_devices:
-                        results[user_id] = {device["device_id"]: device["keys"]}
+                        user_results[device["device_id"]] = device["keys"]
                     user_ids_updated.append(user_id)
                 except Exception as e:
                     failures[destination] = _exception_to_failure(e)
-- 
cgit 1.4.1


From 611215a49cedf8d5f63c53168173763731d02260 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 29 Jan 2020 11:01:32 +0000
Subject: Delete current state when server leaves a room (#6792)

Otherwise its just stale data, which may get deleted later anyway so
can't be relied on. It's also a bit of a shotgun if we're trying to get
the current state of a room we're not in.
---
 changelog.d/6792.misc                      |   1 +
 synapse/storage/data_stores/main/events.py | 183 +++++++++++++++++------------
 synapse/storage/persist_events.py          |  89 +++++++++++++-
 3 files changed, 198 insertions(+), 75 deletions(-)
 create mode 100644 changelog.d/6792.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6792.misc b/changelog.d/6792.misc
new file mode 100644
index 0000000000..fa31d509b3
--- /dev/null
+++ b/changelog.d/6792.misc
@@ -0,0 +1 @@
+Delete current state from the database when server leaves a room.
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index ce553566a5..c9d0d68c3a 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -32,6 +32,7 @@ from twisted.internet import defer
 import synapse.metrics
 from synapse.api.constants import EventContentFields, EventTypes
 from synapse.api.errors import SynapseError
+from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase  # noqa: F401
 from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.events.utils import prune_event_dict
@@ -468,84 +469,93 @@ class EventsStore(
             to_delete = delta_state.to_delete
             to_insert = delta_state.to_insert
 
-            # First we add entries to the current_state_delta_stream. We
-            # do this before updating the current_state_events table so
-            # that we can use it to calculate the `prev_event_id`. (This
-            # allows us to not have to pull out the existing state
-            # unnecessarily).
-            #
-            # The stream_id for the update is chosen to be the minimum of the stream_ids
-            # for the batch of the events that we are persisting; that means we do not
-            # end up in a situation where workers see events before the
-            # current_state_delta updates.
-            #
-            sql = """
-                INSERT INTO current_state_delta_stream
-                (stream_id, room_id, type, state_key, event_id, prev_event_id)
-                SELECT ?, ?, ?, ?, ?, (
-                    SELECT event_id FROM current_state_events
-                    WHERE room_id = ? AND type = ? AND state_key = ?
+            if delta_state.no_longer_in_room:
+                # Server is no longer in the room so we delete the room from
+                # current_state_events, being careful we've already updated the
+                # rooms.room_version column (which gets populated in a
+                # background task).
+                self._upsert_room_version_txn(txn, room_id)
+
+                # Before deleting we populate the current_state_delta_stream
+                # so that async background tasks get told what happened.
+                sql = """
+                    INSERT INTO current_state_delta_stream
+                        (stream_id, room_id, type, state_key, event_id, prev_event_id)
+                    SELECT ?, room_id, type, state_key, null, event_id
+                        FROM current_state_events
+                        WHERE room_id = ?
+                """
+                txn.execute(sql, (stream_id, room_id))
+
+                self.db.simple_delete_txn(
+                    txn, table="current_state_events", keyvalues={"room_id": room_id},
                 )
-            """
-            txn.executemany(
-                sql,
-                (
-                    (
-                        stream_id,
-                        room_id,
-                        etype,
-                        state_key,
-                        None,
-                        room_id,
-                        etype,
-                        state_key,
+            else:
+                # We're still in the room, so we update the current state as normal.
+
+                # First we add entries to the current_state_delta_stream. We
+                # do this before updating the current_state_events table so
+                # that we can use it to calculate the `prev_event_id`. (This
+                # allows us to not have to pull out the existing state
+                # unnecessarily).
+                #
+                # The stream_id for the update is chosen to be the minimum of the stream_ids
+                # for the batch of the events that we are persisting; that means we do not
+                # end up in a situation where workers see events before the
+                # current_state_delta updates.
+                #
+                sql = """
+                    INSERT INTO current_state_delta_stream
+                    (stream_id, room_id, type, state_key, event_id, prev_event_id)
+                    SELECT ?, ?, ?, ?, ?, (
+                        SELECT event_id FROM current_state_events
+                        WHERE room_id = ? AND type = ? AND state_key = ?
                     )
-                    for etype, state_key in to_delete
-                    # We sanity check that we're deleting rather than updating
-                    if (etype, state_key) not in to_insert
-                ),
-            )
-            txn.executemany(
-                sql,
-                (
+                """
+                txn.executemany(
+                    sql,
                     (
-                        stream_id,
-                        room_id,
-                        etype,
-                        state_key,
-                        ev_id,
-                        room_id,
-                        etype,
-                        state_key,
-                    )
-                    for (etype, state_key), ev_id in iteritems(to_insert)
-                ),
-            )
+                        (
+                            stream_id,
+                            room_id,
+                            etype,
+                            state_key,
+                            to_insert.get((etype, state_key)),
+                            room_id,
+                            etype,
+                            state_key,
+                        )
+                        for etype, state_key in itertools.chain(to_delete, to_insert)
+                    ),
+                )
+                # Now we actually update the current_state_events table
 
-            # Now we actually update the current_state_events table
+                txn.executemany(
+                    "DELETE FROM current_state_events"
+                    " WHERE room_id = ? AND type = ? AND state_key = ?",
+                    (
+                        (room_id, etype, state_key)
+                        for etype, state_key in itertools.chain(to_delete, to_insert)
+                    ),
+                )
 
-            txn.executemany(
-                "DELETE FROM current_state_events"
-                " WHERE room_id = ? AND type = ? AND state_key = ?",
-                (
-                    (room_id, etype, state_key)
-                    for etype, state_key in itertools.chain(to_delete, to_insert)
-                ),
-            )
+                # We include the membership in the current state table, hence we do
+                # a lookup when we insert. This assumes that all events have already
+                # been inserted into room_memberships.
+                txn.executemany(
+                    """INSERT INTO current_state_events
+                        (room_id, type, state_key, event_id, membership)
+                    VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
+                    """,
+                    [
+                        (room_id, key[0], key[1], ev_id, ev_id)
+                        for key, ev_id in iteritems(to_insert)
+                    ],
+                )
 
-            # We include the membership in the current state table, hence we do
-            # a lookup when we insert. This assumes that all events have already
-            # been inserted into room_memberships.
-            txn.executemany(
-                """INSERT INTO current_state_events
-                    (room_id, type, state_key, event_id, membership)
-                VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
-                """,
-                [
-                    (room_id, key[0], key[1], ev_id, ev_id)
-                    for key, ev_id in iteritems(to_insert)
-                ],
-            )
+            # We now update `local_current_membership`. We do this regardless
+            # of whether we're still in the room or not to handle the case where
+            # e.g. we just got banned (where we need to record that fact here).
 
             # Note: Do we really want to delete rows here (that we do not
             # subsequently reinsert below)? While technically correct it means
@@ -601,6 +611,35 @@ class EventsStore(
 
             self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
 
+    def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str):
+        """Update the room version in the database based off current state
+        events.
+
+        This is used when we're about to delete current state and we want to
+        ensure that the `rooms.room_version` column is up to date.
+        """
+
+        sql = """
+            SELECT json FROM event_json
+            INNER JOIN current_state_events USING (room_id, event_id)
+            WHERE room_id = ? AND type = ? AND state_key = ?
+        """
+        txn.execute(sql, (room_id, EventTypes.Create, ""))
+        row = txn.fetchone()
+        if row:
+            event_json = json.loads(row[0])
+            content = event_json.get("content", {})
+            creator = content.get("creator")
+            room_version_id = content.get("room_version", RoomVersions.V1.identifier)
+
+            self.db.simple_upsert_txn(
+                txn,
+                table="rooms",
+                keyvalues={"room_id": room_id},
+                values={"room_version": room_version_id},
+                insertion_values={"is_public": False, "creator": creator},
+            )
+
     def _update_forward_extremities_txn(
         self, txn, new_forward_extremities, max_stream_order
     ):
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 368c457321..d060c8b992 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import itertools
 import logging
 from collections import deque, namedtuple
 from typing import Iterable, List, Optional, Tuple
@@ -27,7 +28,7 @@ from prometheus_client import Counter, Histogram
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
 from synapse.events import FrozenEvent
 from synapse.events.snapshot import EventContext
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
@@ -72,17 +73,20 @@ stale_forward_extremities_counter = Histogram(
 )
 
 
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True)
 class DeltaState:
     """Deltas to use to update the `current_state_events` table.
 
     Attributes:
         to_delete: List of type/state_keys to delete from current state
         to_insert: Map of state to upsert into current state
+        no_longer_in_room: The server is not longer in the room, so the room
+            should e.g. be removed from `current_state_events` table.
     """
 
     to_delete = attr.ib(type=List[Tuple[str, str]])
     to_insert = attr.ib(type=StateMap[str])
+    no_longer_in_room = attr.ib(type=bool, default=False)
 
 
 class _EventPeristenceQueue(object):
@@ -396,11 +400,12 @@ class EventsPersistenceStorage(object):
                         # If either are not None then there has been a change,
                         # and we need to work out the delta (or use that
                         # given)
+                        delta = None
                         if delta_ids is not None:
                             # If there is a delta we know that we've
                             # only added or replaced state, never
                             # removed keys entirely.
-                            state_delta_for_room[room_id] = DeltaState([], delta_ids)
+                            delta = DeltaState([], delta_ids)
                         elif current_state is not None:
                             with Measure(
                                 self._clock, "persist_events.calculate_state_delta"
@@ -408,6 +413,22 @@ class EventsPersistenceStorage(object):
                                 delta = await self._calculate_state_delta(
                                     room_id, current_state
                                 )
+
+                        if delta:
+                            # If we have a change of state then lets check
+                            # whether we're actually still a member of the room,
+                            # or if our last user left. If we're no longer in
+                            # the room then we delete the current state and
+                            # extremities.
+                            is_still_joined = await self._is_server_still_joined(
+                                room_id, ev_ctx_rm, delta, current_state
+                            )
+                            if not is_still_joined:
+                                logger.info("Server no longer in room %s", room_id)
+                                latest_event_ids = []
+                                current_state = {}
+                                delta.no_longer_in_room = True
+
                             state_delta_for_room[room_id] = delta
 
                         # If we have the current_state then lets prefill
@@ -660,3 +681,65 @@ class EventsPersistenceStorage(object):
         }
 
         return DeltaState(to_delete=to_delete, to_insert=to_insert)
+
+    async def _is_server_still_joined(
+        self,
+        room_id: str,
+        ev_ctx_rm: List[Tuple[FrozenEvent, EventContext]],
+        delta: DeltaState,
+        current_state: Optional[StateMap[str]],
+    ) -> bool:
+        """Check if the server will still be joined after the given events have
+        been persised.
+
+        Args:
+            room_id
+            ev_ctx_rm
+            delta: The delta of current state between what is in the database
+                and what the new current state will be.
+            current_state: The new current state if it already been calculated,
+                otherwise None.
+        """
+
+        if not any(
+            self.is_mine_id(state_key)
+            for typ, state_key in itertools.chain(delta.to_delete, delta.to_insert)
+            if typ == EventTypes.Member
+        ):
+            # There have been no changes to membership of our users, so nothing
+            # has changed and we assume we're still in the room.
+            return True
+
+        # Check if any of the given events are a local join that appear in the
+        # current state
+        for (typ, state_key), event_id in delta.to_insert.items():
+            if typ != EventTypes.Member or not self.is_mine_id(state_key):
+                continue
+
+            for event, _ in ev_ctx_rm:
+                if event_id == event.event_id:
+                    if event.membership == Membership.JOIN:
+                        return True
+
+        # There's been a change of membership but we don't have a local join
+        # event in the new events, so we need to check the full state.
+        if current_state is None:
+            current_state = await self.main_store.get_current_state_ids(room_id)
+            current_state = dict(current_state)
+            for key in delta.to_delete:
+                current_state.pop(key, None)
+
+            current_state.update(delta.to_insert)
+
+        event_ids = [
+            event_id
+            for (typ, state_key,), event_id in current_state.items()
+            if typ == EventTypes.Member and self.is_mine_id(state_key)
+        ]
+
+        rows = await self.main_store.get_membership_from_event_ids(event_ids)
+        is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
+        if is_still_joined:
+            return True
+        else:
+            return False
-- 
cgit 1.4.1


From 6b9e1014cf9c107f3198999159fbc935376fdcc9 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 29 Jan 2020 11:23:01 +0000
Subject: Fix race in federation sender that delayed device updates. (#6799)

We were sending device updates down both the federation stream and
device streams. This mean there was a race if the federation sender
worker processed the federation stream first, as when the sender checked
if there were new device updates the slaved ID generator hadn't been
updated with the new stream IDs and so returned nothing.

This situation is correctly handled by events/receipts/etc by not
sending updates down the federation stream and instead having the
federation sender worker listen on the other streams and poke the
transaction queues as appropriate.
---
 changelog.d/6799.bugfix          |  1 +
 synapse/app/federation_sender.py | 20 +++++++++++++++++++-
 synapse/federation/send_queue.py | 32 +++-----------------------------
 3 files changed, 23 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6799.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6799.bugfix b/changelog.d/6799.bugfix
new file mode 100644
index 0000000000..322a2758af
--- /dev/null
+++ b/changelog.d/6799.bugfix
@@ -0,0 +1 @@
+Fix race in federation sender worker that delayed sending of device updates.
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 38d11fdd0f..63a91f1177 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -38,7 +38,11 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.replication.tcp.streams._base import ReceiptsStream
+from synapse.replication.tcp.streams._base import (
+    DeviceListsStream,
+    ReceiptsStream,
+    ToDeviceStream,
+)
 from synapse.server import HomeServer
 from synapse.storage.database import Database
 from synapse.types import ReadReceipt
@@ -256,6 +260,20 @@ class FederationSenderHandler(object):
                 "process_receipts_for_federation", self._on_new_receipts, rows
             )
 
+        # ... as well as device updates and messages
+        elif stream_name == DeviceListsStream.NAME:
+            hosts = set(row.destination for row in rows)
+            for host in hosts:
+                self.federation_sender.send_device_messages(host)
+
+        elif stream_name == ToDeviceStream.NAME:
+            # The to_device stream includes stuff to be pushed to both local
+            # clients and remote servers, so we ignore entities that start with
+            # '@' (since they'll be local users rather than destinations).
+            hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
+            for host in hosts:
+                self.federation_sender.send_device_messages(host)
+
     @defer.inlineCallbacks
     def _on_new_receipts(self, rows):
         """
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 174f6e42be..0bb82a6bb3 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -69,8 +69,6 @@ class FederationRemoteSendQueue(object):
 
         self.edus = SortedDict()  # stream position -> Edu
 
-        self.device_messages = SortedDict()  # stream position -> destination
-
         self.pos = 1
         self.pos_time = SortedDict()
 
@@ -92,7 +90,6 @@ class FederationRemoteSendQueue(object):
             "keyed_edu",
             "keyed_edu_changed",
             "edus",
-            "device_messages",
             "pos_time",
             "presence_destinations",
         ]:
@@ -171,12 +168,6 @@ class FederationRemoteSendQueue(object):
             for key in keys[:i]:
                 del self.edus[key]
 
-            # Delete things out of device map
-            keys = self.device_messages.keys()
-            i = self.device_messages.bisect_left(position_to_delete)
-            for key in keys[:i]:
-                del self.device_messages[key]
-
     def notify_new_events(self, current_id):
         """As per FederationSender"""
         # We don't need to replicate this as it gets sent down a different
@@ -249,9 +240,8 @@ class FederationRemoteSendQueue(object):
 
     def send_device_messages(self, destination):
         """As per FederationSender"""
-        pos = self._next_pos()
-        self.device_messages[pos] = destination
-        self.notifier.on_new_replication_data()
+        # We don't need to replicate this as it gets sent down a different
+        # stream.
 
     def get_current_token(self):
         return self.pos - 1
@@ -339,14 +329,6 @@ class FederationRemoteSendQueue(object):
         for (pos, edu) in edus:
             rows.append((pos, EduRow(edu)))
 
-        # Fetch changed device messages
-        i = self.device_messages.bisect_right(from_token)
-        j = self.device_messages.bisect_right(to_token) + 1
-        device_messages = {v: k for k, v in self.device_messages.items()[i:j]}
-
-        for (destination, pos) in iteritems(device_messages):
-            rows.append((pos, DeviceRow(destination=destination)))
-
         # Sort rows based on pos
         rows.sort()
 
@@ -504,7 +486,6 @@ ParsedFederationStreamData = namedtuple(
         "presence_destinations",  # list of tuples of UserPresenceState and destinations
         "keyed_edus",  # dict of destination -> { key -> Edu }
         "edus",  # dict of destination -> [Edu]
-        "device_destinations",  # set of destinations
     ),
 )
 
@@ -523,11 +504,7 @@ def process_rows_for_federation(transaction_queue, rows):
     # them into the appropriate collection and then send them off.
 
     buff = ParsedFederationStreamData(
-        presence=[],
-        presence_destinations=[],
-        keyed_edus={},
-        edus={},
-        device_destinations=set(),
+        presence=[], presence_destinations=[], keyed_edus={}, edus={},
     )
 
     # Parse the rows in the stream and add to the buffer
@@ -555,6 +532,3 @@ def process_rows_for_federation(transaction_queue, rows):
     for destination, edu_list in iteritems(buff.edus):
         for edu in edu_list:
             transaction_queue.send_edu(edu, None)
-
-    for destination in buff.device_destinations:
-        transaction_queue.send_device_messages(destination)
-- 
cgit 1.4.1


From 750d4d7599d1985bc262853494b21e9fee34c637 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 28 Jan 2020 11:09:14 +0000
Subject: changelog

---
 changelog.d/6790.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6790.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6790.feature b/changelog.d/6790.feature
new file mode 100644
index 0000000000..df9e4b77ab
--- /dev/null
+++ b/changelog.d/6790.feature
@@ -0,0 +1 @@
+Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
-- 
cgit 1.4.1


From a855b7c3a82458602bd62ed00bffed269f2acfec Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 29 Jan 2020 12:06:31 +0000
Subject: Remove unused DeviceRow class (#6800)

---
 changelog.d/6800.bugfix          |  1 +
 synapse/federation/send_queue.py | 21 +--------------------
 2 files changed, 2 insertions(+), 20 deletions(-)
 create mode 100644 changelog.d/6800.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6800.bugfix b/changelog.d/6800.bugfix
new file mode 100644
index 0000000000..322a2758af
--- /dev/null
+++ b/changelog.d/6800.bugfix
@@ -0,0 +1 @@
+Fix race in federation sender worker that delayed sending of device updates.
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 0bb82a6bb3..001bb304ae 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -454,28 +454,9 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))):  # Edu
         buff.edus.setdefault(self.edu.destination, []).append(self.edu)
 
 
-class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", ("destination",))):  # str
-    """Streams the fact that either a) there is pending to device messages for
-    users on the remote, or b) a local users device has changed and needs to
-    be sent to the remote.
-    """
-
-    TypeId = "d"
-
-    @staticmethod
-    def from_data(data):
-        return DeviceRow(destination=data["destination"])
-
-    def to_data(self):
-        return {"destination": self.destination}
-
-    def add_to_buffer(self, buff):
-        buff.device_destinations.add(self.destination)
-
-
 TypeToRow = {
     Row.TypeId: Row
-    for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow, DeviceRow)
+    for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow,)
 }
 
 
-- 
cgit 1.4.1


From 5a246611e3cbf27cf1dd7e4453adc8040cddd6a2 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 30 Jan 2020 11:25:59 +0000
Subject: Type defintions for use in refactoring for redaction changes (#6803)

* Bump signedjson to 1.1

... so that we can use the type definitions

* Fix breakage caused by upgrade to signedjson 1.1

Thanks, @illicitonion...
---
 changelog.d/6803.misc          |  1 +
 synapse/events/__init__.py     |  5 +++--
 synapse/python_dependencies.py |  4 +++-
 synapse/types.py               |  7 ++++++-
 tests/storage/test_keys.py     | 15 +++++++++++----
 5 files changed, 24 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/6803.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6803.misc b/changelog.d/6803.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6803.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 72c09327f4..f813fa2fe7 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -23,6 +23,7 @@ from unpaddedbase64 import encode_base64
 
 from synapse.api.errors import UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.types import JsonDict
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
 
@@ -197,7 +198,7 @@ class EventBase(object):
     def is_state(self):
         return hasattr(self, "state_key") and self.state_key is not None
 
-    def get_dict(self):
+    def get_dict(self) -> JsonDict:
         d = dict(self._event_dict)
         d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)})
 
@@ -209,7 +210,7 @@ class EventBase(object):
     def get_internal_metadata_dict(self):
         return self.internal_metadata.get_dict()
 
-    def get_pdu_json(self, time_now=None):
+    def get_pdu_json(self, time_now=None) -> JsonDict:
         pdu_json = self.get_dict()
 
         if time_now is not None and "age_ts" in pdu_json["unsigned"]:
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 5871feaafd..8de8cb2c12 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -1,6 +1,7 @@
 # Copyright 2015, 2016 OpenMarket Ltd
 # Copyright 2017 Vector Creations Ltd
 # Copyright 2018 New Vector Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -43,7 +44,8 @@ REQUIREMENTS = [
     "frozendict>=1",
     "unpaddedbase64>=1.1.0",
     "canonicaljson>=1.1.3",
-    "signedjson>=1.0.0",
+    # we use the type definitions added in signedjson 1.1.
+    "signedjson>=1.1.0",
     "pynacl>=1.2.1",
     "idna>=2.5",
     # validating SSL certs for IP addresses requires service_identity 18.1.
diff --git a/synapse/types.py b/synapse/types.py
index 65e4d8c181..f3cd465735 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -17,7 +17,7 @@ import re
 import string
 import sys
 from collections import namedtuple
-from typing import Dict, Tuple, TypeVar
+from typing import Any, Dict, Tuple, TypeVar
 
 import attr
 from signedjson.key import decode_verify_key_bytes
@@ -43,6 +43,11 @@ T = TypeVar("T")
 StateMap = Dict[Tuple[str, str], T]
 
 
+# the type of a JSON-serialisable dict. This could be made stronger, but it will
+# do for now.
+JsonDict = Dict[str, Any]
+
+
 class Requester(
     namedtuple(
         "Requester", ["user", "access_token_id", "is_guest", "device_id", "app_service"]
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index e07ff01201..95f309fbbc 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import signedjson.key
+import unpaddedbase64
 
 from twisted.internet.defer import Deferred
 
@@ -21,11 +22,17 @@ from synapse.storage.keys import FetchKeyResult
 
 import tests.unittest
 
-KEY_1 = signedjson.key.decode_verify_key_base64(
-    "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw"
+
+def decode_verify_key_base64(key_id: str, key_base64: str):
+    key_bytes = unpaddedbase64.decode_base64(key_base64)
+    return signedjson.key.decode_verify_key_bytes(key_id, key_bytes)
+
+
+KEY_1 = decode_verify_key_base64(
+    "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw"
 )
-KEY_2 = signedjson.key.decode_verify_key_base64(
-    "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+KEY_2 = decode_verify_key_base64(
+    "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
 )
 
 
-- 
cgit 1.4.1


From c80a9fe13dc098037a37bb0920a00b2c8cb53174 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 15:06:58 +0000
Subject: When a client asks for remote keys check if should resync. (#6797)

If we detect that the remote users' keys may have changed then we should
attempt to resync against the remote server rather than using the
(potentially) stale local cache.
---
 changelog.d/6797.misc                       |  1 +
 synapse/storage/data_stores/main/devices.py | 32 ++++++++++++++++++++++++++---
 2 files changed, 30 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6797.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6797.misc b/changelog.d/6797.misc
new file mode 100644
index 0000000000..e9127bac51
--- /dev/null
+++ b/changelog.d/6797.misc
@@ -0,0 +1 @@
+When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale.
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index 30bf66b2b6..a34415ff14 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -32,7 +32,7 @@ from synapse.logging.opentracing import (
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import Database
-from synapse.types import get_verify_key_from_cross_signing_key
+from synapse.types import Collection, get_verify_key_from_cross_signing_key
 from synapse.util.caches.descriptors import (
     Cache,
     cached,
@@ -443,8 +443,15 @@ class DeviceWorkerStore(SQLBaseStore):
         """
         user_ids = set(user_id for user_id, _ in query_list)
         user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
-        user_ids_in_cache = set(
-            user_id for user_id, stream_id in user_map.items() if stream_id
+
+        # We go and check if any of the users need to have their device lists
+        # resynced. If they do then we remove them from the cached list.
+        users_needing_resync = yield self.get_user_ids_requiring_device_list_resync(
+            user_ids
+        )
+        user_ids_in_cache = (
+            set(user_id for user_id, stream_id in user_map.items() if stream_id)
+            - users_needing_resync
         )
         user_ids_not_in_cache = user_ids - user_ids_in_cache
 
@@ -641,6 +648,25 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return results
 
+    @defer.inlineCallbacks
+    def get_user_ids_requiring_device_list_resync(self, user_ids: Collection[str]):
+        """Given a list of remote users return the list of users that we
+        should resync the device lists for.
+
+        Returns:
+            Deferred[Set[str]]
+        """
+
+        rows = yield self.db.simple_select_many_batch(
+            table="device_lists_remote_resync",
+            column="user_id",
+            iterable=user_ids,
+            retcols=("user_id",),
+            desc="get_user_ids_requiring_device_list_resync",
+        )
+
+        return {row["user_id"] for row in rows}
+
     def mark_remote_user_device_cache_as_stale(self, user_id: str):
         """Records that the server has reason to believe the cache of the devices
         for the remote users is out of date.
-- 
cgit 1.4.1


From a5bab2d058747eb7165b20808b34c970e34a4b11 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 16:10:30 +0000
Subject: When server leaves room check for stale device lists. (#6801)

When a server leaves a room it may stop sharing a room with remote
users, and thus not get any updates to their device lists. So we need to
check for this case and delete those device lists from the cache.

We don't need to do this if we stop sharing a room because the remote
user leaves the room, because we track that case via looking at
membership changes.
---
 changelog.d/6801.bugfix                        |  1 +
 synapse/storage/data_stores/main/roommember.py | 37 ++++++++++++++++++-
 synapse/storage/persist_events.py              | 51 ++++++++++++++++++++++++--
 3 files changed, 83 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6801.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6801.bugfix b/changelog.d/6801.bugfix
new file mode 100644
index 0000000000..f401fa5d69
--- /dev/null
+++ b/changelog.d/6801.bugfix
@@ -0,0 +1 @@
+Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room.
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 9acef7c950..042289f0e0 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 import logging
-from typing import Iterable, List
+from typing import Iterable, List, Set
 
 from six import iteritems, itervalues
 
@@ -40,7 +40,7 @@ from synapse.storage.roommember import (
     ProfileInfo,
     RoomsForUser,
 )
-from synapse.types import get_domain_from_id
+from synapse.types import Collection, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
@@ -439,6 +439,39 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         return results
 
+    async def get_users_server_still_shares_room_with(
+        self, user_ids: Collection[str]
+    ) -> Set[str]:
+        """Given a list of users return the set that the server still share a
+        room with.
+        """
+
+        if not user_ids:
+            return set()
+
+        def _get_users_server_still_shares_room_with_txn(txn):
+            sql = """
+                SELECT state_key FROM current_state_events
+                WHERE
+                    type = 'm.room.member'
+                    AND membership = 'join'
+                    AND %s
+                GROUP BY state_key
+            """
+
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "state_key", user_ids
+            )
+
+            txn.execute(sql % (clause,), args)
+
+            return set(row[0] for row in txn)
+
+        return await self.db.runInteraction(
+            "get_users_server_still_shares_room_with",
+            _get_users_server_still_shares_room_with_txn,
+        )
+
     @defer.inlineCallbacks
     def get_rooms_for_user(self, user_id, on_invalidate=None):
         """Returns a set of room_ids the user is currently joined to.
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index d060c8b992..86166fd4c1 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -18,7 +18,7 @@
 import itertools
 import logging
 from collections import deque, namedtuple
-from typing import Iterable, List, Optional, Tuple
+from typing import Iterable, List, Optional, Set, Tuple
 
 from six import iteritems
 from six.moves import range
@@ -318,6 +318,11 @@ class EventsPersistenceStorage(object):
             # room
             state_delta_for_room = {}
 
+            # Set of remote users which were in rooms the server has left. We
+            # should check if we still share any rooms and if not we mark their
+            # device lists as stale.
+            potentially_left_users = set()  # type: Set[str]
+
             if not backfilled:
                 with Measure(self._clock, "_calculate_state_and_extrem"):
                     # Work out the new "current state" for each room.
@@ -421,7 +426,11 @@ class EventsPersistenceStorage(object):
                             # the room then we delete the current state and
                             # extremities.
                             is_still_joined = await self._is_server_still_joined(
-                                room_id, ev_ctx_rm, delta, current_state
+                                room_id,
+                                ev_ctx_rm,
+                                delta,
+                                current_state,
+                                potentially_left_users,
                             )
                             if not is_still_joined:
                                 logger.info("Server no longer in room %s", room_id)
@@ -444,6 +453,8 @@ class EventsPersistenceStorage(object):
                 backfilled=backfilled,
             )
 
+            await self._handle_potentially_left_users(potentially_left_users)
+
     async def _calculate_new_extremities(
         self,
         room_id: str,
@@ -688,6 +699,7 @@ class EventsPersistenceStorage(object):
         ev_ctx_rm: List[Tuple[FrozenEvent, EventContext]],
         delta: DeltaState,
         current_state: Optional[StateMap[str]],
+        potentially_left_users: Set[str],
     ) -> bool:
         """Check if the server will still be joined after the given events have
         been persised.
@@ -699,6 +711,9 @@ class EventsPersistenceStorage(object):
                 and what the new current state will be.
             current_state: The new current state if it already been calculated,
                 otherwise None.
+            potentially_left_users: If the server has left the room, then joined
+                remote users will be added to this set to indicate that the
+                server may no longer be sharing a room with them.
         """
 
         if not any(
@@ -741,5 +756,33 @@ class EventsPersistenceStorage(object):
         is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
         if is_still_joined:
             return True
-        else:
-            return False
+
+        # The server will leave the room, so we go and find out which remote
+        # users will still be joined when we leave.
+        remote_event_ids = [
+            event_id
+            for (typ, state_key,), event_id in current_state.items()
+            if typ == EventTypes.Member and not self.is_mine_id(state_key)
+        ]
+        rows = await self.main_store.get_membership_from_event_ids(remote_event_ids)
+        potentially_left_users.update(
+            row["user_id"] for row in rows if row["membership"] == Membership.JOIN
+        )
+
+        return False
+
+    async def _handle_potentially_left_users(self, user_ids: Set[str]):
+        """Given a set of remote users check if the server still shares a room with
+        them. If not then mark those users' device cache as stale.
+        """
+
+        if not user_ids:
+            return
+
+        joined_users = await self.main_store.get_users_server_still_shares_room_with(
+            user_ids
+        )
+        left_users = user_ids - joined_users
+
+        for user_id in left_users:
+            await self.main_store.mark_remote_user_device_list_as_unsubscribed(user_id)
-- 
cgit 1.4.1


From c3d4ad8afdbe181707451410100dec4817c2c01a Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 16:42:11 +0000
Subject: Fix sending server up commands from workers (#6811)

Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
 changelog.d/6811.bugfix                |  1 +
 synapse/federation/transport/client.py |  5 ++++-
 synapse/federation/transport/server.py | 26 +++++++++++++++-----------
 synapse/replication/tcp/client.py      |  4 ++++
 synapse/server.pyi                     | 12 +++++++++++-
 tox.ini                                |  1 +
 6 files changed, 36 insertions(+), 13 deletions(-)
 create mode 100644 changelog.d/6811.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6811.bugfix b/changelog.d/6811.bugfix
new file mode 100644
index 0000000000..361f2fc2e8
--- /dev/null
+++ b/changelog.d/6811.bugfix
@@ -0,0 +1 @@
+Fix waking up other workers when remote server is detected to have come back online.
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 198257414b..dc563538de 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 
 import logging
+from typing import Any, Dict
 
 from six.moves import urllib
 
@@ -352,7 +353,9 @@ class TransportLayerClient(object):
         else:
             path = _create_v1_path("/publicRooms")
 
-            args = {"include_all_networks": "true" if include_all_networks else "false"}
+            args = {
+                "include_all_networks": "true" if include_all_networks else "false"
+            }  # type: Dict[str, Any]
             if third_party_instance_id:
                 args["third_party_instance_id"] = (third_party_instance_id,)
             if limit:
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index d8cf9ed299..125eadd796 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -18,6 +18,7 @@
 import functools
 import logging
 import re
+from typing import Optional, Tuple, Type
 
 from twisted.internet.defer import maybeDeferred
 
@@ -267,6 +268,8 @@ class BaseFederationServlet(object):
                 returned.
     """
 
+    PATH = ""  # Overridden in subclasses, the regex to match against the path.
+
     REQUIRE_AUTH = True
 
     PREFIX = FEDERATION_V1_PREFIX  # Allows specifying the API version
@@ -347,9 +350,6 @@ class BaseFederationServlet(object):
 
             return response
 
-        # Extra logic that functools.wraps() doesn't finish
-        new_func.__self__ = func.__self__
-
         return new_func
 
     def register(self, server):
@@ -824,7 +824,7 @@ class PublicRoomList(BaseFederationServlet):
         if not self.allow_access:
             raise FederationDeniedError(origin)
 
-        limit = int(content.get("limit", 100))
+        limit = int(content.get("limit", 100))  # type: Optional[int]
         since_token = content.get("since", None)
         search_filter = content.get("filter", None)
 
@@ -971,7 +971,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
         if get_domain_from_id(requester_user_id) != origin:
             raise SynapseError(403, "requester_user_id doesn't match origin")
 
-        result = await self.groups_handler.update_room_in_group(
+        result = await self.handler.update_room_in_group(
             group_id, requester_user_id, room_id, config_key, content
         )
 
@@ -1422,11 +1422,13 @@ FEDERATION_SERVLET_CLASSES = (
     On3pidBindServlet,
     FederationVersionServlet,
     RoomComplexityServlet,
-)
+)  # type: Tuple[Type[BaseFederationServlet], ...]
 
-OPENID_SERVLET_CLASSES = (OpenIdUserInfo,)
+OPENID_SERVLET_CLASSES = (
+    OpenIdUserInfo,
+)  # type: Tuple[Type[BaseFederationServlet], ...]
 
-ROOM_LIST_CLASSES = (PublicRoomList,)
+ROOM_LIST_CLASSES = (PublicRoomList,)  # type: Tuple[Type[PublicRoomList], ...]
 
 GROUP_SERVER_SERVLET_CLASSES = (
     FederationGroupsProfileServlet,
@@ -1447,17 +1449,19 @@ GROUP_SERVER_SERVLET_CLASSES = (
     FederationGroupsAddRoomsServlet,
     FederationGroupsAddRoomsConfigServlet,
     FederationGroupsSettingJoinPolicyServlet,
-)
+)  # type: Tuple[Type[BaseFederationServlet], ...]
 
 
 GROUP_LOCAL_SERVLET_CLASSES = (
     FederationGroupsLocalInviteServlet,
     FederationGroupsRemoveLocalUserServlet,
     FederationGroupsBulkPublicisedServlet,
-)
+)  # type: Tuple[Type[BaseFederationServlet], ...]
 
 
-GROUP_ATTESTATION_SERVLET_CLASSES = (FederationGroupsRenewAttestaionServlet,)
+GROUP_ATTESTATION_SERVLET_CLASSES = (
+    FederationGroupsRenewAttestaionServlet,
+)  # type: Tuple[Type[BaseFederationServlet], ...]
 
 DEFAULT_SERVLET_GROUPS = (
     "federation",
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index fc06a7b053..02ab5b66ea 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -31,6 +31,7 @@ from .commands import (
     Command,
     FederationAckCommand,
     InvalidateCacheCommand,
+    RemoteServerUpCommand,
     RemovePusherCommand,
     UserIpCommand,
     UserSyncCommand,
@@ -210,6 +211,9 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen)
         self.send_command(cmd)
 
+    def send_remote_server_up(self, server: str):
+        self.send_command(RemoteServerUpCommand(server))
+
     def await_sync(self, data):
         """Returns a deferred that is resolved when we receive a SYNC command
         with given data.
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 0731403047..90347ac23e 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -2,8 +2,8 @@ import twisted.internet
 
 import synapse.api.auth
 import synapse.config.homeserver
+import synapse.crypto.keyring
 import synapse.federation.sender
-import synapse.federation.transaction_queue
 import synapse.federation.transport.client
 import synapse.handlers
 import synapse.handlers.auth
@@ -17,6 +17,7 @@ import synapse.handlers.room_member
 import synapse.handlers.set_password
 import synapse.http.client
 import synapse.notifier
+import synapse.replication.tcp.client
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
 import synapse.server_notices.server_notices_sender
@@ -27,6 +28,9 @@ class HomeServer(object):
     @property
     def config(self) -> synapse.config.homeserver.HomeServerConfig:
         pass
+    @property
+    def hostname(self) -> str:
+        pass
     def get_auth(self) -> synapse.api.auth.Auth:
         pass
     def get_auth_handler(self) -> synapse.handlers.auth.AuthHandler:
@@ -97,3 +101,9 @@ class HomeServer(object):
         pass
     def get_reactor(self) -> twisted.internet.base.ReactorBase:
         pass
+    def get_keyring(self) -> synapse.crypto.keyring.Keyring:
+        pass
+    def get_tcp_replication(
+        self,
+    ) -> synapse.replication.tcp.client.ReplicationClientHandler:
+        pass
diff --git a/tox.ini b/tox.ini
index 1d946a02ba..88ef12bebd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -179,6 +179,7 @@ extras = all
 commands = mypy \
             synapse/api \
             synapse/config/ \
+            synapse/federation/transport \
             synapse/handlers/ui_auth \
             synapse/logging/ \
             synapse/module_api \
-- 
cgit 1.4.1


From b660327056cdced860d532ab2404a26946da7ef5 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 17:06:38 +0000
Subject: Resync remote device list when detected as stale. (#6786)

---
 changelog.d/6786.misc             |  1 +
 synapse/handlers/devicemessage.py | 10 ++++++++--
 synapse/handlers/federation.py    | 18 ++++++++++++++++--
 tests/handlers/test_typing.py     |  6 +++---
 4 files changed, 28 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6786.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6786.misc b/changelog.d/6786.misc
new file mode 100644
index 0000000000..94c692e53a
--- /dev/null
+++ b/changelog.d/6786.misc
@@ -0,0 +1 @@
+Attempt to resync remote users' devices when detected as stale.
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 5c5fe77be2..05c4b3eec0 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -21,6 +21,7 @@ from canonicaljson import json
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
+from synapse.logging.context import run_in_background
 from synapse.logging.opentracing import (
     get_active_span_text_map,
     log_kv,
@@ -48,6 +49,8 @@ class DeviceMessageHandler(object):
             "m.direct_to_device", self.on_direct_to_device_edu
         )
 
+        self._device_list_updater = hs.get_device_handler().device_list_updater
+
     @defer.inlineCallbacks
     def on_direct_to_device_edu(self, origin, content):
         local_messages = {}
@@ -134,8 +137,11 @@ class DeviceMessageHandler(object):
                 unknown_devices,
             )
             yield self.store.mark_remote_user_device_cache_as_stale(sender_user_id)
-            # TODO: Poke something to start trying to refetch user's
-            # keys.
+
+            # Immediately attempt a resync in the background
+            run_in_background(
+                self._device_list_updater.user_device_resync, sender_user_id
+            )
 
     @defer.inlineCallbacks
     def send_device_message(self, sender_user_id, message_type, messages):
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index a67020a259..ca484e5458 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -57,6 +57,7 @@ from synapse.logging.context import (
     run_in_background,
 )
 from synapse.logging.utils import log_function
+from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
 from synapse.replication.http.federation import (
     ReplicationCleanRoomRestServlet,
     ReplicationFederationSendEventsRestServlet,
@@ -156,6 +157,13 @@ class FederationHandler(BaseHandler):
             hs
         )
 
+        if hs.config.worker_app:
+            self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client(
+                hs
+            )
+        else:
+            self._device_list_updater = hs.get_device_handler().device_list_updater
+
         # When joining a room we need to queue any events for that room up
         self.room_queues = {}
         self._room_pdu_linearizer = Linearizer("fed_room_pdu")
@@ -759,8 +767,14 @@ class FederationHandler(BaseHandler):
                     await self.store.mark_remote_user_device_cache_as_stale(
                         event.sender
                     )
-                    # TODO: Poke something to start trying to refetch user's
-                    # keys.
+
+                    # Immediately attempt a resync in the background
+                    if self.config.worker_app:
+                        return run_in_background(self._user_device_resync, event.sender)
+                    else:
+                        return run_in_background(
+                            self._device_list_updater.user_device_resync, event.sender
+                        )
 
     @log_function
     async def backfill(self, dest, room_id, limit, extremities):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 596ddc6970..68b9847bd2 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -81,6 +81,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             ]
         )
 
+        # the tests assume that we are starting at unix time 1000
+        reactor.pump((1000,))
+
         hs = self.setup_test_homeserver(
             notifier=Mock(), http_client=mock_federation_client, keyring=mock_keyring
         )
@@ -90,9 +93,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         return hs
 
     def prepare(self, reactor, clock, hs):
-        # the tests assume that we are starting at unix time 1000
-        reactor.pump((1000,))
-
         mock_notifier = hs.get_notifier()
         self.on_new_event = mock_notifier.on_new_event
 
-- 
cgit 1.4.1


From 57ad702af0511aff36ca69fb2e9fc3399cce3a8d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 17:17:44 +0000
Subject: Backgroud update to clean out rooms from current state (#6802)

---
 changelog.d/6802.misc                              |   1 +
 .../delta/57/delete_old_current_state_events.sql   |  19 ++++
 synapse/storage/data_stores/main/state.py          | 108 ++++++++++++++++++++-
 3 files changed, 126 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6802.misc
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql

(limited to 'changelog.d')

diff --git a/changelog.d/6802.misc b/changelog.d/6802.misc
new file mode 100644
index 0000000000..a77ba1d7a5
--- /dev/null
+++ b/changelog.d/6802.misc
@@ -0,0 +1 @@
+Add background update to clean out left rooms from current state.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
new file mode 100644
index 0000000000..a133d87a19
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
@@ -0,0 +1,19 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add background update to go and delete current state events for rooms the
+-- server is no longer in.
+INSERT into background_updates (update_name, progress_json)
+    VALUES ('delete_old_current_state_events', '{}');
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index bd7b0276f1..9b6f68e777 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -21,12 +21,13 @@ from six import iteritems
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import NotFoundError
 from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
+from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
 from synapse.storage.database import Database
 from synapse.storage.state import StateFilter
 from synapse.util.caches import intern_string
@@ -300,14 +301,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         return set(row["state_group"] for row in rows)
 
 
-class MainStateBackgroundUpdateStore(SQLBaseStore):
+class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
 
     CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
     EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
+    DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events"
 
     def __init__(self, database: Database, db_conn, hs):
         super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
+        self.server_name = hs.hostname
+
         self.db.updates.register_background_index_update(
             self.CURRENT_STATE_INDEX_UPDATE_NAME,
             index_name="current_state_events_member_index",
@@ -321,6 +325,106 @@ class MainStateBackgroundUpdateStore(SQLBaseStore):
             table="event_to_state_groups",
             columns=["state_group"],
         )
+        self.db.updates.register_background_update_handler(
+            self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms,
+        )
+
+    async def _background_remove_left_rooms(self, progress, batch_size):
+        """Background update to delete rows from `current_state_events` and
+        `event_forward_extremities` tables of rooms that the server is no
+        longer joined to.
+        """
+
+        last_room_id = progress.get("last_room_id", "")
+
+        def _background_remove_left_rooms_txn(txn):
+            sql = """
+                SELECT DISTINCT room_id FROM current_state_events
+                WHERE room_id > ? ORDER BY room_id LIMIT ?
+            """
+
+            txn.execute(sql, (last_room_id, batch_size))
+            room_ids = list(row[0] for row in txn)
+            if not room_ids:
+                return True, set()
+
+            sql = """
+                SELECT room_id
+                FROM current_state_events
+                WHERE
+                    room_id > ? AND room_id <= ?
+                    AND type = 'm.room.member'
+                    AND membership = 'join'
+                    AND state_key LIKE ?
+                GROUP BY room_id
+            """
+
+            txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name))
+
+            joined_room_ids = set(row[0] for row in txn)
+
+            left_rooms = set(room_ids) - joined_room_ids
+
+            # First we get all users that we still think were joined to the
+            # room. This is so that we can mark those device lists as
+            # potentially stale, since there may have been a period where the
+            # server didn't share a room with the remote user and therefore may
+            # have missed any device updates.
+            rows = self.db.simple_select_many_txn(
+                txn,
+                table="current_state_events",
+                column="room_id",
+                iterable=left_rooms,
+                keyvalues={"type": EventTypes.Member, "membership": Membership.JOIN},
+                retcols=("state_key",),
+            )
+
+            potentially_left_users = set(row["state_key"] for row in rows)
+
+            # Now lets actually delete the rooms from the DB.
+            self.db.simple_delete_many_txn(
+                txn,
+                table="current_state_events",
+                column="room_id",
+                iterable=left_rooms,
+                keyvalues={},
+            )
+
+            self.db.simple_delete_many_txn(
+                txn,
+                table="event_forward_extremities",
+                column="room_id",
+                iterable=left_rooms,
+                keyvalues={},
+            )
+
+            self.db.updates._background_update_progress_txn(
+                txn,
+                self.DELETE_CURRENT_STATE_UPDATE_NAME,
+                {"last_room_id": room_ids[-1]},
+            )
+
+            return False, potentially_left_users
+
+        finished, potentially_left_users = await self.db.runInteraction(
+            "_background_remove_left_rooms", _background_remove_left_rooms_txn
+        )
+
+        if finished:
+            await self.db.updates._end_background_update(
+                self.DELETE_CURRENT_STATE_UPDATE_NAME
+            )
+
+        # Now go and check if we still share a room with the remote users in
+        # the deleted rooms. If not mark their device lists as stale.
+        joined_users = await self.get_users_server_still_shares_room_with(
+            potentially_left_users
+        )
+
+        for user_id in potentially_left_users - joined_users:
+            await self.mark_remote_user_device_list_as_unsubscribed(user_id)
+
+        return batch_size
 
 
 class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
-- 
cgit 1.4.1


From 184303b8650a90256f84bc9801b749a5b81b6d4b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 30 Jan 2020 17:20:55 +0000
Subject: MSC2260: Block direct sends of m.room.aliases events (#6794)

as per MSC2260
---
 changelog.d/6794.feature               |  1 +
 synapse/rest/client/v1/room.py         | 12 ++++++++++
 tests/rest/admin/test_admin.py         |  7 ------
 tests/rest/client/v1/test_directory.py | 41 +++++++++++++---------------------
 4 files changed, 28 insertions(+), 33 deletions(-)
 create mode 100644 changelog.d/6794.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6794.feature b/changelog.d/6794.feature
new file mode 100644
index 0000000000..df9e4b77ab
--- /dev/null
+++ b/changelog.d/6794.feature
@@ -0,0 +1 @@
+Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 5aef8238b8..6f31584c51 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -184,6 +184,12 @@ class RoomStateEventRestServlet(TransactionRestServlet):
 
         content = parse_json_object_from_request(request)
 
+        if event_type == EventTypes.Aliases:
+            # MSC2260
+            raise SynapseError(
+                400, "Cannot send m.room.aliases events via /rooms/{room_id}/state"
+            )
+
         event_dict = {
             "type": event_type,
             "content": content,
@@ -231,6 +237,12 @@ class RoomSendEventRestServlet(TransactionRestServlet):
         requester = await self.auth.get_user_by_req(request, allow_guest=True)
         content = parse_json_object_from_request(request)
 
+        if event_type == EventTypes.Aliases:
+            # MSC2260
+            raise SynapseError(
+                400, "Cannot send m.room.aliases events via /rooms/{room_id}/send"
+            )
+
         event_dict = {
             "type": event_type,
             "content": content,
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 0342aed416..e5984aaad8 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -868,13 +868,6 @@ class RoomTestCase(unittest.HomeserverTestCase):
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # Set this new alias as the canonical alias for this room
-        self.helper.send_state(
-            room_id,
-            "m.room.aliases",
-            {"aliases": [test_alias]},
-            tok=self.admin_user_tok,
-            state_key="test",
-        )
         self.helper.send_state(
             room_id,
             "m.room.canonical_alias",
diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py
index 633b7dbda0..914cf54927 100644
--- a/tests/rest/client/v1/test_directory.py
+++ b/tests/rest/client/v1/test_directory.py
@@ -51,26 +51,30 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.user = self.register_user("user", "test")
         self.user_tok = self.login("user", "test")
 
-    def test_state_event_not_in_room(self):
-        self.ensure_user_left_room()
-        self.set_alias_via_state_event(403)
+    def test_cannot_set_alias_via_state_event(self):
+        self.ensure_user_joined_room()
+        url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % (
+            self.room_id,
+            self.hs.hostname,
+        )
+
+        data = {"aliases": [self.random_alias(5)]}
+        request_data = json.dumps(data)
+
+        request, channel = self.make_request(
+            "PUT", url, request_data, access_token=self.user_tok
+        )
+        self.render(request)
+        self.assertEqual(channel.code, 400, channel.result)
 
     def test_directory_endpoint_not_in_room(self):
         self.ensure_user_left_room()
         self.set_alias_via_directory(403)
 
-    def test_state_event_in_room_too_long(self):
-        self.ensure_user_joined_room()
-        self.set_alias_via_state_event(400, alias_length=256)
-
     def test_directory_in_room_too_long(self):
         self.ensure_user_joined_room()
         self.set_alias_via_directory(400, alias_length=256)
 
-    def test_state_event_in_room(self):
-        self.ensure_user_joined_room()
-        self.set_alias_via_state_event(200)
-
     def test_directory_in_room(self):
         self.ensure_user_joined_room()
         self.set_alias_via_directory(200)
@@ -102,21 +106,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.render(request)
         self.assertEqual(channel.code, 200, channel.result)
 
-    def set_alias_via_state_event(self, expected_code, alias_length=5):
-        url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % (
-            self.room_id,
-            self.hs.hostname,
-        )
-
-        data = {"aliases": [self.random_alias(alias_length)]}
-        request_data = json.dumps(data)
-
-        request, channel = self.make_request(
-            "PUT", url, request_data, access_token=self.user_tok
-        )
-        self.render(request)
-        self.assertEqual(channel.code, expected_code, channel.result)
-
     def set_alias_via_directory(self, expected_code, alias_length=5):
         url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length)
         data = {"room_id": self.room_id}
-- 
cgit 1.4.1


From e0992fcc5be9e850a5007d1d09fea79bea949cf6 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 30 Jan 2020 17:55:34 +0000
Subject: Log when we delete room in bg update (#6816)

---
 changelog.d/6816.misc                     | 1 +
 synapse/storage/data_stores/main/state.py | 2 ++
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6816.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6816.misc b/changelog.d/6816.misc
new file mode 100644
index 0000000000..a77ba1d7a5
--- /dev/null
+++ b/changelog.d/6816.misc
@@ -0,0 +1 @@
+Add background update to clean out left rooms from current state.
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 9b6f68e777..4167f83c9b 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -365,6 +365,8 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
 
             left_rooms = set(room_ids) - joined_room_ids
 
+            logger.info("Deleting current state left rooms: %r", left_rooms)
+
             # First we get all users that we still think were joined to the
             # room. This is so that we can mark those device lists as
             # potentially stale, since there may have been a period where the
-- 
cgit 1.4.1


From 46a446828d1b4b1ca2d9b0dcae97323a1bbc0c0b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 30 Jan 2020 22:13:02 +0000
Subject: pass room version into FederationHandler.on_invite_request (#6805)

---
 changelog.d/6805.misc                   | 1 +
 synapse/federation/federation_server.py | 2 +-
 synapse/handlers/federation.py          | 6 +++---
 3 files changed, 5 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6805.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6805.misc b/changelog.d/6805.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6805.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 8eddb3bf2c..9562faa3ee 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -410,7 +410,7 @@ class FederationServer(FederationBase):
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
         pdu = await self._check_sigs_and_hash(room_version, pdu)
-        ret_pdu = await self.handler.on_invite_request(origin, pdu)
+        ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
         time_now = self._clock.time_msec()
         return {"event": ret_pdu.get_pdu_json(time_now)}
 
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index ca484e5458..01372f6d47 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1482,13 +1482,13 @@ class FederationHandler(BaseHandler):
         return {"state": list(state.values()), "auth_chain": auth_chain}
 
     @defer.inlineCallbacks
-    def on_invite_request(self, origin, pdu):
+    def on_invite_request(
+        self, origin: str, event: EventBase, room_version: RoomVersion
+    ):
         """ We've got an invite event. Process and persist it. Sign it.
 
         Respond with the now signed event.
         """
-        event = pdu
-
         if event.state_key is None:
             raise SynapseError(400, "The invite event did not have a state key")
 
-- 
cgit 1.4.1


From 540c5e168b3f7f22d7af905d6d01dcf2a615dff3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Wed, 29 Jan 2020 18:19:06 +0000
Subject: changelog

---
 changelog.d/6806.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6806.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6806.misc b/changelog.d/6806.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6806.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
-- 
cgit 1.4.1


From 7d846e870422c65f3fb436e5b0e543dae17719fc Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 31 Jan 2020 09:49:13 +0000
Subject: Fix bug with getting missing auth event during join 500'ed (#6810)

---
 changelog.d/6810.misc          | 1 +
 synapse/handlers/federation.py | 6 +++++-
 2 files changed, 6 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6810.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6810.misc b/changelog.d/6810.misc
new file mode 100644
index 0000000000..5537355bea
--- /dev/null
+++ b/changelog.d/6810.misc
@@ -0,0 +1 @@
+Record room versions in the `rooms` table.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 01372f6d47..1f92640f86 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1929,7 +1929,11 @@ class FederationHandler(BaseHandler):
 
         for e_id in missing_auth_events:
             m_ev = yield self.federation_client.get_pdu(
-                [origin], e_id, room_version=room_version, outlier=True, timeout=10000
+                [origin],
+                e_id,
+                room_version=room_version.identifier,
+                outlier=True,
+                timeout=10000,
             )
             if m_ev and m_ev.event_id == e_id:
                 event_map[e_id] = m_ev
-- 
cgit 1.4.1


From f6fa2c0b31e2b3695a91f34a06974f428bd5d45c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 31 Jan 2020 10:30:29 +0000
Subject: newsfile

---
 changelog.d/6820.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6820.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6820.misc b/changelog.d/6820.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6820.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
-- 
cgit 1.4.1


From 7f93eb190301024d373a573fc75a58f592469e9f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 31 Jan 2020 13:47:43 +0000
Subject: pass room_version into compute_event_signature (#6807)

---
 changelog.d/6807.misc           |  1 +
 synapse/crypto/event_signing.py | 28 ++++++++++++++++++++--------
 synapse/handlers/federation.py  |  5 ++++-
 3 files changed, 25 insertions(+), 9 deletions(-)
 create mode 100644 changelog.d/6807.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6807.misc b/changelog.d/6807.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6807.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 1f2bccf700..5f733c1cf5 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
-
+#
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
 import collections.abc
 import hashlib
 import logging
+from typing import Dict
 
 from canonicaljson import encode_canonical_json
 from signedjson.sign import sign_json
@@ -115,18 +117,28 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
     return hashed.name, hashed.digest()
 
 
-def compute_event_signature(event_dict, signature_name, signing_key):
+def compute_event_signature(
+    room_version: RoomVersion,
+    event_dict: JsonDict,
+    signature_name: str,
+    signing_key: SigningKey,
+) -> Dict[str, Dict[str, str]]:
     """Compute the signature of the event for the given name and key.
 
     Args:
-        event_dict (dict): The event as a dict
-        signature_name (str): The name of the entity signing the event
+        room_version: the version of the room that this event is in.
+            (the room version determines the redaction algorithm and hence the
+            json to be signed)
+
+        event_dict: The event as a dict
+
+        signature_name: The name of the entity signing the event
             (typically the server's hostname).
-        signing_key (syutil.crypto.SigningKey): The key to sign with
+
+        signing_key: The key to sign with
 
     Returns:
-        dict[str, dict[str, str]]: Returns a dictionary in the same format of
-        an event's signatures field.
+        a dictionary in the same format of an event's signatures field.
     """
     redact_json = prune_event_dict(event_dict)
     redact_json.pop("age_ts", None)
@@ -161,5 +173,5 @@ def add_hashes_and_signatures(
     event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
 
     event_dict["signatures"] = compute_event_signature(
-        event_dict, signature_name=signature_name, signing_key=signing_key
+        room_version, event_dict, signature_name=signature_name, signing_key=signing_key
     )
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 0f10c3e9b1..c86d3177e9 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1528,7 +1528,10 @@ class FederationHandler(BaseHandler):
 
         event.signatures.update(
             compute_event_signature(
-                event.get_pdu_json(), self.hs.hostname, self.hs.config.signing_key[0]
+                room_version,
+                event.get_pdu_json(),
+                self.hs.hostname,
+                self.hs.config.signing_key[0],
             )
         )
 
-- 
cgit 1.4.1


From 83b0ea047b355ade44985af123f4807faa7892ab Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 31 Jan 2020 14:04:15 +0000
Subject: Fix deleting of stale marker for device lists (#6819)

We were in fact only deleting stale marker when we got an incremental
update, rather than when we did a full resync.
---
 changelog.d/6819.misc                       |  1 +
 synapse/storage/data_stores/main/devices.py | 14 +++++++-------
 2 files changed, 8 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6819.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6819.misc b/changelog.d/6819.misc
new file mode 100644
index 0000000000..4f9a4ac7a5
--- /dev/null
+++ b/changelog.d/6819.misc
@@ -0,0 +1 @@
+Detect unknown remote devices and mark cache as stale.
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index a34415ff14..ea0503476f 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -940,13 +940,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             lock=False,
         )
 
-        # If we're replacing the remote user's device list cache presumably
-        # we've done a full resync, so we remove the entry that says we need
-        # to resync
-        self.db.simple_delete_txn(
-            txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id},
-        )
-
     def update_remote_device_list_cache(self, user_id, devices, stream_id):
         """Replace the entire cache of the remote user's devices.
 
@@ -1003,6 +996,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             lock=False,
         )
 
+        # If we're replacing the remote user's device list cache presumably
+        # we've done a full resync, so we remove the entry that says we need
+        # to resync
+        self.db.simple_delete_txn(
+            txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id},
+        )
+
     @defer.inlineCallbacks
     def add_device_change_to_streams(self, user_id, device_ids, hosts):
         """Persist that a user's devices have been updated, and which hosts
-- 
cgit 1.4.1


From ac0d45b78b647f6744b5850da88a4ca8c76666b9 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 31 Jan 2020 15:35:37 +0000
Subject: 1.10.0rc1

---
 CHANGES.md               | 44 ++++++++++++++++++++++++++++++++++++++++++++
 UPGRADE.rst              |  4 ++--
 changelog.d/6729.misc    |  1 -
 changelog.d/6734.bugfix  |  1 -
 changelog.d/6748.misc    |  1 -
 changelog.d/6751.misc    |  1 -
 changelog.d/6757.misc    |  1 -
 changelog.d/6761.bugfix  |  1 -
 changelog.d/6767.bugfix  |  1 -
 changelog.d/6771.bugfix  |  1 -
 changelog.d/6775.doc     |  1 -
 changelog.d/6776.misc    |  1 -
 changelog.d/6786.misc    |  1 -
 changelog.d/6787.feature |  1 -
 changelog.d/6788.misc    |  1 -
 changelog.d/6790.feature |  1 -
 changelog.d/6792.misc    |  1 -
 changelog.d/6794.feature |  1 -
 changelog.d/6795.bugfix  |  1 -
 changelog.d/6796.bugfix  |  1 -
 changelog.d/6797.misc    |  1 -
 changelog.d/6799.bugfix  |  1 -
 changelog.d/6800.bugfix  |  1 -
 changelog.d/6801.bugfix  |  1 -
 changelog.d/6802.misc    |  1 -
 changelog.d/6803.misc    |  1 -
 changelog.d/6805.misc    |  1 -
 changelog.d/6806.misc    |  1 -
 changelog.d/6807.misc    |  1 -
 changelog.d/6810.misc    |  1 -
 changelog.d/6811.bugfix  |  1 -
 changelog.d/6816.misc    |  1 -
 changelog.d/6819.misc    |  1 -
 changelog.d/6820.misc    |  1 -
 synapse/__init__.py      |  2 +-
 35 files changed, 47 insertions(+), 35 deletions(-)
 delete mode 100644 changelog.d/6729.misc
 delete mode 100644 changelog.d/6734.bugfix
 delete mode 100644 changelog.d/6748.misc
 delete mode 100644 changelog.d/6751.misc
 delete mode 100644 changelog.d/6757.misc
 delete mode 100644 changelog.d/6761.bugfix
 delete mode 100644 changelog.d/6767.bugfix
 delete mode 100644 changelog.d/6771.bugfix
 delete mode 100644 changelog.d/6775.doc
 delete mode 100644 changelog.d/6776.misc
 delete mode 100644 changelog.d/6786.misc
 delete mode 100644 changelog.d/6787.feature
 delete mode 100644 changelog.d/6788.misc
 delete mode 100644 changelog.d/6790.feature
 delete mode 100644 changelog.d/6792.misc
 delete mode 100644 changelog.d/6794.feature
 delete mode 100644 changelog.d/6795.bugfix
 delete mode 100644 changelog.d/6796.bugfix
 delete mode 100644 changelog.d/6797.misc
 delete mode 100644 changelog.d/6799.bugfix
 delete mode 100644 changelog.d/6800.bugfix
 delete mode 100644 changelog.d/6801.bugfix
 delete mode 100644 changelog.d/6802.misc
 delete mode 100644 changelog.d/6803.misc
 delete mode 100644 changelog.d/6805.misc
 delete mode 100644 changelog.d/6806.misc
 delete mode 100644 changelog.d/6807.misc
 delete mode 100644 changelog.d/6810.misc
 delete mode 100644 changelog.d/6811.bugfix
 delete mode 100644 changelog.d/6816.misc
 delete mode 100644 changelog.d/6819.misc
 delete mode 100644 changelog.d/6820.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index 4c413b72ee..6686cafa5b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,47 @@
+Synapse 1.10.0rc1 (2020-01-31)
+==============================
+
+Features
+--------
+
+- Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794))
+
+
+Bugfixes
+--------
+
+- Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). ([\#6734](https://github.com/matrix-org/synapse/issues/6734))
+- Minor fixes to `PUT /_synapse/admin/v2/users` admin api. ([\#6761](https://github.com/matrix-org/synapse/issues/6761))
+- Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. ([\#6767](https://github.com/matrix-org/synapse/issues/6767))
+- Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). ([\#6771](https://github.com/matrix-org/synapse/issues/6771))
+- Fix outbound federation request metrics. ([\#6795](https://github.com/matrix-org/synapse/issues/6795))
+- Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. ([\#6796](https://github.com/matrix-org/synapse/issues/6796))
+- Fix race in federation sender worker that delayed sending of device updates. ([\#6799](https://github.com/matrix-org/synapse/issues/6799), [\#6800](https://github.com/matrix-org/synapse/issues/6800))
+- Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. ([\#6801](https://github.com/matrix-org/synapse/issues/6801))
+- Fix waking up other workers when remote server is detected to have come back online. ([\#6811](https://github.com/matrix-org/synapse/issues/6811))
+
+
+Improved Documentation
+----------------------
+
+- Clarify documentation related to `user_dir` and `federation_reader` workers. ([\#6775](https://github.com/matrix-org/synapse/issues/6775))
+
+
+Internal Changes
+----------------
+
+- Record room versions in the `rooms` table. ([\#6729](https://github.com/matrix-org/synapse/issues/6729), [\#6788](https://github.com/matrix-org/synapse/issues/6788), [\#6810](https://github.com/matrix-org/synapse/issues/6810))
+- Propagate cache invalidates from workers to other workers. ([\#6748](https://github.com/matrix-org/synapse/issues/6748))
+- Remove some unnecessary admin handler abstraction methods. ([\#6751](https://github.com/matrix-org/synapse/issues/6751))
+- Add some debugging for media storage providers. ([\#6757](https://github.com/matrix-org/synapse/issues/6757))
+- Detect unknown remote devices and mark cache as stale. ([\#6776](https://github.com/matrix-org/synapse/issues/6776), [\#6819](https://github.com/matrix-org/synapse/issues/6819))
+- Attempt to resync remote users' devices when detected as stale. ([\#6786](https://github.com/matrix-org/synapse/issues/6786))
+- Delete current state from the database when server leaves a room. ([\#6792](https://github.com/matrix-org/synapse/issues/6792))
+- When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. ([\#6797](https://github.com/matrix-org/synapse/issues/6797))
+- Add background update to clean out left rooms from current state. ([\#6802](https://github.com/matrix-org/synapse/issues/6802), [\#6816](https://github.com/matrix-org/synapse/issues/6816))
+- Refactoring work in preparation for changing the event redaction algorithm. ([\#6803](https://github.com/matrix-org/synapse/issues/6803), [\#6805](https://github.com/matrix-org/synapse/issues/6805), [\#6806](https://github.com/matrix-org/synapse/issues/6806), [\#6807](https://github.com/matrix-org/synapse/issues/6807), [\#6820](https://github.com/matrix-org/synapse/issues/6820))
+
+
 Synapse 1.9.1 (2020-01-28)
 ==========================
 
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 470246f128..1c5db1c4a8 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -76,8 +76,8 @@ for example:
      dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
 
 
-Upgrading to ****
-===============================
+Upgrading to v1.10.0
+====================
 
 Synapse will now log a warning on start up if used with a PostgreSQL database
 that has a non-recommended locale set.
diff --git a/changelog.d/6729.misc b/changelog.d/6729.misc
deleted file mode 100644
index 5537355bea..0000000000
--- a/changelog.d/6729.misc
+++ /dev/null
@@ -1 +0,0 @@
-Record room versions in the `rooms` table.
diff --git a/changelog.d/6734.bugfix b/changelog.d/6734.bugfix
deleted file mode 100644
index 79c6bab4d1..0000000000
--- a/changelog.d/6734.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS).
diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc
deleted file mode 100644
index de320d4cd9..0000000000
--- a/changelog.d/6748.misc
+++ /dev/null
@@ -1 +0,0 @@
-Propagate cache invalidates from workers to other workers.
diff --git a/changelog.d/6751.misc b/changelog.d/6751.misc
deleted file mode 100644
index 7222520528..0000000000
--- a/changelog.d/6751.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove some unnecessary admin handler abstraction methods.
\ No newline at end of file
diff --git a/changelog.d/6757.misc b/changelog.d/6757.misc
deleted file mode 100644
index a50c5e974a..0000000000
--- a/changelog.d/6757.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some debugging for media storage providers.
diff --git a/changelog.d/6761.bugfix b/changelog.d/6761.bugfix
deleted file mode 100644
index 1c664c02df..0000000000
--- a/changelog.d/6761.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Minor fixes to `PUT /_synapse/admin/v2/users` admin api.
diff --git a/changelog.d/6767.bugfix b/changelog.d/6767.bugfix
deleted file mode 100644
index 63c7c63315..0000000000
--- a/changelog.d/6767.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release.
diff --git a/changelog.d/6771.bugfix b/changelog.d/6771.bugfix
deleted file mode 100644
index 623ba24acb..0000000000
--- a/changelog.d/6771.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key).
diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc
deleted file mode 100644
index c6078ef82d..0000000000
--- a/changelog.d/6775.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify documentation related to `user_dir` and `federation_reader` workers.
diff --git a/changelog.d/6776.misc b/changelog.d/6776.misc
deleted file mode 100644
index 4f9a4ac7a5..0000000000
--- a/changelog.d/6776.misc
+++ /dev/null
@@ -1 +0,0 @@
-Detect unknown remote devices and mark cache as stale.
diff --git a/changelog.d/6786.misc b/changelog.d/6786.misc
deleted file mode 100644
index 94c692e53a..0000000000
--- a/changelog.d/6786.misc
+++ /dev/null
@@ -1 +0,0 @@
-Attempt to resync remote users' devices when detected as stale.
diff --git a/changelog.d/6787.feature b/changelog.d/6787.feature
deleted file mode 100644
index df9e4b77ab..0000000000
--- a/changelog.d/6787.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
diff --git a/changelog.d/6788.misc b/changelog.d/6788.misc
deleted file mode 100644
index 5537355bea..0000000000
--- a/changelog.d/6788.misc
+++ /dev/null
@@ -1 +0,0 @@
-Record room versions in the `rooms` table.
diff --git a/changelog.d/6790.feature b/changelog.d/6790.feature
deleted file mode 100644
index df9e4b77ab..0000000000
--- a/changelog.d/6790.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
diff --git a/changelog.d/6792.misc b/changelog.d/6792.misc
deleted file mode 100644
index fa31d509b3..0000000000
--- a/changelog.d/6792.misc
+++ /dev/null
@@ -1 +0,0 @@
-Delete current state from the database when server leaves a room.
diff --git a/changelog.d/6794.feature b/changelog.d/6794.feature
deleted file mode 100644
index df9e4b77ab..0000000000
--- a/changelog.d/6794.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260).
diff --git a/changelog.d/6795.bugfix b/changelog.d/6795.bugfix
deleted file mode 100644
index d1585653b1..0000000000
--- a/changelog.d/6795.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix outbound federation request metrics.
diff --git a/changelog.d/6796.bugfix b/changelog.d/6796.bugfix
deleted file mode 100644
index 206a157311..0000000000
--- a/changelog.d/6796.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device.
diff --git a/changelog.d/6797.misc b/changelog.d/6797.misc
deleted file mode 100644
index e9127bac51..0000000000
--- a/changelog.d/6797.misc
+++ /dev/null
@@ -1 +0,0 @@
-When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale.
diff --git a/changelog.d/6799.bugfix b/changelog.d/6799.bugfix
deleted file mode 100644
index 322a2758af..0000000000
--- a/changelog.d/6799.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix race in federation sender worker that delayed sending of device updates.
diff --git a/changelog.d/6800.bugfix b/changelog.d/6800.bugfix
deleted file mode 100644
index 322a2758af..0000000000
--- a/changelog.d/6800.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix race in federation sender worker that delayed sending of device updates.
diff --git a/changelog.d/6801.bugfix b/changelog.d/6801.bugfix
deleted file mode 100644
index f401fa5d69..0000000000
--- a/changelog.d/6801.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room.
diff --git a/changelog.d/6802.misc b/changelog.d/6802.misc
deleted file mode 100644
index a77ba1d7a5..0000000000
--- a/changelog.d/6802.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add background update to clean out left rooms from current state.
diff --git a/changelog.d/6803.misc b/changelog.d/6803.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6803.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6805.misc b/changelog.d/6805.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6805.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6806.misc b/changelog.d/6806.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6806.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6807.misc b/changelog.d/6807.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6807.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6810.misc b/changelog.d/6810.misc
deleted file mode 100644
index 5537355bea..0000000000
--- a/changelog.d/6810.misc
+++ /dev/null
@@ -1 +0,0 @@
-Record room versions in the `rooms` table.
diff --git a/changelog.d/6811.bugfix b/changelog.d/6811.bugfix
deleted file mode 100644
index 361f2fc2e8..0000000000
--- a/changelog.d/6811.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix waking up other workers when remote server is detected to have come back online.
diff --git a/changelog.d/6816.misc b/changelog.d/6816.misc
deleted file mode 100644
index a77ba1d7a5..0000000000
--- a/changelog.d/6816.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add background update to clean out left rooms from current state.
diff --git a/changelog.d/6819.misc b/changelog.d/6819.misc
deleted file mode 100644
index 4f9a4ac7a5..0000000000
--- a/changelog.d/6819.misc
+++ /dev/null
@@ -1 +0,0 @@
-Detect unknown remote devices and mark cache as stale.
diff --git a/changelog.d/6820.misc b/changelog.d/6820.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6820.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index a236888d3c..bd942d3e1c 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.9.1"
+__version__ = "1.10.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From b0d112e78b96168852260b3986d348ae3a98292f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 3 Feb 2020 13:15:24 +0000
Subject: Fix `room_version` in `on_invite_request` flow (#6827)

I messed this up a bit in #6805, but fortunately we weren't actually doing
anything with the room_version so it didn't matter that it was a str not a RoomVersion.
---
 changelog.d/6827.misc                   |  1 +
 synapse/federation/federation_server.py | 13 ++++++++-----
 synapse/federation/transport/server.py  |  4 ++--
 3 files changed, 11 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6827.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6827.misc b/changelog.d/6827.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6827.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index a4c97ed458..d92d5e8064 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -54,7 +54,7 @@ from synapse.replication.http.federation import (
     ReplicationFederationSendEduRestServlet,
     ReplicationGetQueryRestServlet,
 )
-from synapse.types import get_domain_from_id
+from synapse.types import JsonDict, get_domain_from_id
 from synapse.util import glob_to_regex, unwrapFirstError
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.caches.response_cache import ResponseCache
@@ -396,20 +396,23 @@ class FederationServer(FederationBase):
         time_now = self._clock.time_msec()
         return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
 
-    async def on_invite_request(self, origin, content, room_version):
-        if room_version not in KNOWN_ROOM_VERSIONS:
+    async def on_invite_request(
+        self, origin: str, content: JsonDict, room_version_id: str
+    ):
+        room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
+        if not room_version:
             raise SynapseError(
                 400,
                 "Homeserver does not support this room version",
                 Codes.UNSUPPORTED_ROOM_VERSION,
             )
 
-        format_ver = room_version_to_event_format(room_version)
+        format_ver = room_version.event_format
 
         pdu = event_from_pdu_json(content, format_ver)
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
-        pdu = await self._check_sigs_and_hash(room_version, pdu)
+        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
         ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
         time_now = self._clock.time_msec()
         return {"event": ret_pdu.get_pdu_json(time_now)}
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 125eadd796..ae48ba8157 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -579,7 +579,7 @@ class FederationV1InviteServlet(BaseFederationServlet):
         # state resolution algorithm, and we don't use that for processing
         # invites
         content = await self.handler.on_invite_request(
-            origin, content, room_version=RoomVersions.V1.identifier
+            origin, content, room_version_id=RoomVersions.V1.identifier
         )
 
         # V1 federation API is defined to return a content of `[200, {...}]`
@@ -606,7 +606,7 @@ class FederationV2InviteServlet(BaseFederationServlet):
         event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
 
         content = await self.handler.on_invite_request(
-            origin, event, room_version=room_version
+            origin, event, room_version_id=room_version
         )
         return 200, content
 
-- 
cgit 1.4.1


From 370080531ef7ae3f075ff8f577f42c2b6e25295c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 3 Feb 2020 13:18:42 +0000
Subject: Allow URL-encoded user IDs on user admin api paths (#6825)

---
 changelog.d/6825.bugfix     | 1 +
 synapse/rest/admin/users.py | 4 ++--
 2 files changed, 3 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6825.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6825.bugfix b/changelog.d/6825.bugfix
new file mode 100644
index 0000000000..d3cacd6d9a
--- /dev/null
+++ b/changelog.d/6825.bugfix
@@ -0,0 +1 @@
+Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting.
\ No newline at end of file
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 3455741195..f1c4434f5c 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -105,7 +105,7 @@ class UsersRestServletV2(RestServlet):
 
 
 class UserRestServletV2(RestServlet):
-    PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P@[^/]+)$"),)
+    PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]+)$"),)
 
     """Get request to list user details.
     This needs user to have administrator access in Synapse.
@@ -568,7 +568,7 @@ class UserAdminServlet(RestServlet):
                 {}
     """
 
-    PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P@[^/]*)/admin$"),)
+    PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P[^/]*)/admin$"),)
 
     def __init__(self, hs):
         self.hs = hs
-- 
cgit 1.4.1


From e49eb1a886c6f139887b1e71f8234e02e738a84a Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 3 Feb 2020 16:30:21 +0000
Subject: changelog

---
 changelog.d/6837.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6837.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc
new file mode 100644
index 0000000000..0496f12de8
--- /dev/null
+++ b/changelog.d/6837.misc
@@ -0,0 +1 @@
+Port much of `synapse.handlers.federation` to async/await.
-- 
cgit 1.4.1


From ae5b3104f0023171b2bb89f08a066e5974ee7666 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 3 Feb 2020 17:10:54 +0000
Subject: Fix stacktraces when using ObservableDeferred and async/await (#6836)

---
 changelog.d/6836.misc         | 1 +
 synapse/util/async_helpers.py | 4 ++++
 2 files changed, 5 insertions(+)
 create mode 100644 changelog.d/6836.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc
new file mode 100644
index 0000000000..232488e1e5
--- /dev/null
+++ b/changelog.d/6836.misc
@@ -0,0 +1 @@
+Fix stacktraces when using `ObservableDeferred` and async/await.
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 04b6abdc24..581dffd8a0 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -73,6 +73,10 @@ class ObservableDeferred(object):
         def errback(f):
             object.__setattr__(self, "_result", (False, f))
             while self._observers:
+                # This is a little bit of magic to correctly propagate stack
+                # traces when we `await` on one of the observer deferreds.
+                f.value.__failure__ = f
+
                 try:
                     # TODO: Handle errors here.
                     self._observers.pop().errback(f)
-- 
cgit 1.4.1


From b9391c957572224c3a7c22870102fcbd24dea4e0 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 3 Feb 2020 18:05:44 +0000
Subject: Add typing to SyncHandler (#6821)

Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
 changelog.d/6821.misc           |   1 +
 synapse/events/__init__.py      |  18 +-
 synapse/handlers/sync.py        | 705 +++++++++++++++++++++-------------------
 tests/storage/test_redaction.py |   5 +-
 tox.ini                         |   1 +
 5 files changed, 381 insertions(+), 349 deletions(-)
 create mode 100644 changelog.d/6821.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc
new file mode 100644
index 0000000000..1d5265d5e2
--- /dev/null
+++ b/changelog.d/6821.misc
@@ -0,0 +1 @@
+Add type hints to `SyncHandler`.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index f813fa2fe7..92f76703b3 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -189,8 +189,14 @@ class EventBase(object):
     redacts = _event_dict_property("redacts", None)
     room_id = _event_dict_property("room_id")
     sender = _event_dict_property("sender")
+    state_key = _event_dict_property("state_key")
+    type = _event_dict_property("type")
     user_id = _event_dict_property("sender")
 
+    @property
+    def event_id(self) -> str:
+        raise NotImplementedError()
+
     @property
     def membership(self):
         return self.content["membership"]
@@ -281,10 +287,7 @@ class FrozenEvent(EventBase):
         else:
             frozen_dict = event_dict
 
-        self.event_id = event_dict["event_id"]
-        self.type = event_dict["type"]
-        if "state_key" in event_dict:
-            self.state_key = event_dict["state_key"]
+        self._event_id = event_dict["event_id"]
 
         super(FrozenEvent, self).__init__(
             frozen_dict,
@@ -294,6 +297,10 @@ class FrozenEvent(EventBase):
             rejected_reason=rejected_reason,
         )
 
+    @property
+    def event_id(self) -> str:
+        return self._event_id
+
     def __str__(self):
         return self.__repr__()
 
@@ -332,9 +339,6 @@ class FrozenEventV2(EventBase):
             frozen_dict = event_dict
 
         self._event_id = None
-        self.type = event_dict["type"]
-        if "state_key" in event_dict:
-            self.state_key = event_dict["state_key"]
 
         super(FrozenEventV2, self).__init__(
             frozen_dict,
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index cd95f85e3f..5f060241b4 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -14,20 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import collections
 import itertools
 import logging
+from typing import Any, Dict, FrozenSet, List, Optional, Set, Tuple
 
 from six import iteritems, itervalues
 
+import attr
 from prometheus_client import Counter
 
 from synapse.api.constants import EventTypes, Membership
+from synapse.api.filtering import FilterCollection
+from synapse.events import EventBase
 from synapse.logging.context import LoggingContext
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.storage.roommember import MemberSummary
 from synapse.storage.state import StateFilter
-from synapse.types import RoomStreamToken
+from synapse.types import (
+    Collection,
+    JsonDict,
+    RoomStreamToken,
+    StateMap,
+    StreamToken,
+    UserID,
+)
 from synapse.util.async_helpers import concurrently_execute
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.caches.lrucache import LruCache
@@ -62,17 +72,22 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
 LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
 
 
-SyncConfig = collections.namedtuple(
-    "SyncConfig", ["user", "filter_collection", "is_guest", "request_key", "device_id"]
-)
+@attr.s(slots=True, frozen=True)
+class SyncConfig:
+    user = attr.ib(type=UserID)
+    filter_collection = attr.ib(type=FilterCollection)
+    is_guest = attr.ib(type=bool)
+    request_key = attr.ib(type=Tuple[Any, ...])
+    device_id = attr.ib(type=str)
 
 
-class TimelineBatch(
-    collections.namedtuple("TimelineBatch", ["prev_batch", "events", "limited"])
-):
-    __slots__ = []
+@attr.s(slots=True, frozen=True)
+class TimelineBatch:
+    prev_batch = attr.ib(type=StreamToken)
+    events = attr.ib(type=List[EventBase])
+    limited = attr.ib(bool)
 
-    def __nonzero__(self):
+    def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
         to tell if room needs to be part of the sync result.
         """
@@ -81,23 +96,17 @@ class TimelineBatch(
     __bool__ = __nonzero__  # python3
 
 
-class JoinedSyncResult(
-    collections.namedtuple(
-        "JoinedSyncResult",
-        [
-            "room_id",  # str
-            "timeline",  # TimelineBatch
-            "state",  # dict[(str, str), FrozenEvent]
-            "ephemeral",
-            "account_data",
-            "unread_notifications",
-            "summary",
-        ],
-    )
-):
-    __slots__ = []
-
-    def __nonzero__(self):
+@attr.s(slots=True, frozen=True)
+class JoinedSyncResult:
+    room_id = attr.ib(type=str)
+    timeline = attr.ib(type=TimelineBatch)
+    state = attr.ib(type=StateMap[EventBase])
+    ephemeral = attr.ib(type=List[JsonDict])
+    account_data = attr.ib(type=List[JsonDict])
+    unread_notifications = attr.ib(type=JsonDict)
+    summary = attr.ib(type=Optional[JsonDict])
+
+    def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
         to tell if room needs to be part of the sync result.
         """
@@ -113,20 +122,14 @@ class JoinedSyncResult(
     __bool__ = __nonzero__  # python3
 
 
-class ArchivedSyncResult(
-    collections.namedtuple(
-        "ArchivedSyncResult",
-        [
-            "room_id",  # str
-            "timeline",  # TimelineBatch
-            "state",  # dict[(str, str), FrozenEvent]
-            "account_data",
-        ],
-    )
-):
-    __slots__ = []
-
-    def __nonzero__(self):
+@attr.s(slots=True, frozen=True)
+class ArchivedSyncResult:
+    room_id = attr.ib(type=str)
+    timeline = attr.ib(type=TimelineBatch)
+    state = attr.ib(type=StateMap[EventBase])
+    account_data = attr.ib(type=List[JsonDict])
+
+    def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
         to tell if room needs to be part of the sync result.
         """
@@ -135,70 +138,88 @@ class ArchivedSyncResult(
     __bool__ = __nonzero__  # python3
 
 
-class InvitedSyncResult(
-    collections.namedtuple(
-        "InvitedSyncResult",
-        ["room_id", "invite"],  # str  # FrozenEvent: the invite event
-    )
-):
-    __slots__ = []
+@attr.s(slots=True, frozen=True)
+class InvitedSyncResult:
+    room_id = attr.ib(type=str)
+    invite = attr.ib(type=EventBase)
 
-    def __nonzero__(self):
+    def __nonzero__(self) -> bool:
         """Invited rooms should always be reported to the client"""
         return True
 
     __bool__ = __nonzero__  # python3
 
 
-class GroupsSyncResult(
-    collections.namedtuple("GroupsSyncResult", ["join", "invite", "leave"])
-):
-    __slots__ = []
+@attr.s(slots=True, frozen=True)
+class GroupsSyncResult:
+    join = attr.ib(type=JsonDict)
+    invite = attr.ib(type=JsonDict)
+    leave = attr.ib(type=JsonDict)
 
-    def __nonzero__(self):
+    def __nonzero__(self) -> bool:
         return bool(self.join or self.invite or self.leave)
 
     __bool__ = __nonzero__  # python3
 
 
-class DeviceLists(
-    collections.namedtuple(
-        "DeviceLists",
-        [
-            "changed",  # list of user_ids whose devices may have changed
-            "left",  # list of user_ids whose devices we no longer track
-        ],
-    )
-):
-    __slots__ = []
+@attr.s(slots=True, frozen=True)
+class DeviceLists:
+    """
+    Attributes:
+        changed: List of user_ids whose devices may have changed
+        left: List of user_ids whose devices we no longer track
+    """
+
+    changed = attr.ib(type=Collection[str])
+    left = attr.ib(type=Collection[str])
 
-    def __nonzero__(self):
+    def __nonzero__(self) -> bool:
         return bool(self.changed or self.left)
 
     __bool__ = __nonzero__  # python3
 
 
-class SyncResult(
-    collections.namedtuple(
-        "SyncResult",
-        [
-            "next_batch",  # Token for the next sync
-            "presence",  # List of presence events for the user.
-            "account_data",  # List of account_data events for the user.
-            "joined",  # JoinedSyncResult for each joined room.
-            "invited",  # InvitedSyncResult for each invited room.
-            "archived",  # ArchivedSyncResult for each archived room.
-            "to_device",  # List of direct messages for the device.
-            "device_lists",  # List of user_ids whose devices have changed
-            "device_one_time_keys_count",  # Dict of algorithm to count for one time keys
-            # for this device
-            "groups",
-        ],
-    )
-):
-    __slots__ = []
-
-    def __nonzero__(self):
+@attr.s
+class _RoomChanges:
+    """The set of room entries to include in the sync, plus the set of joined
+    and left room IDs since last sync.
+    """
+
+    room_entries = attr.ib(type=List["RoomSyncResultBuilder"])
+    invited = attr.ib(type=List[InvitedSyncResult])
+    newly_joined_rooms = attr.ib(type=List[str])
+    newly_left_rooms = attr.ib(type=List[str])
+
+
+@attr.s(slots=True, frozen=True)
+class SyncResult:
+    """
+    Attributes:
+        next_batch: Token for the next sync
+        presence: List of presence events for the user.
+        account_data: List of account_data events for the user.
+        joined: JoinedSyncResult for each joined room.
+        invited: InvitedSyncResult for each invited room.
+        archived: ArchivedSyncResult for each archived room.
+        to_device: List of direct messages for the device.
+        device_lists: List of user_ids whose devices have changed
+        device_one_time_keys_count: Dict of algorithm to count for one time keys
+            for this device
+        groups: Group updates, if any
+    """
+
+    next_batch = attr.ib(type=StreamToken)
+    presence = attr.ib(type=List[JsonDict])
+    account_data = attr.ib(type=List[JsonDict])
+    joined = attr.ib(type=List[JoinedSyncResult])
+    invited = attr.ib(type=List[InvitedSyncResult])
+    archived = attr.ib(type=List[ArchivedSyncResult])
+    to_device = attr.ib(type=List[JsonDict])
+    device_lists = attr.ib(type=DeviceLists)
+    device_one_time_keys_count = attr.ib(type=JsonDict)
+    groups = attr.ib(type=Optional[GroupsSyncResult])
+
+    def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
         to tell if the notifier needs to wait for more events when polling for
         events.
@@ -240,13 +261,15 @@ class SyncHandler(object):
         )
 
     async def wait_for_sync_for_user(
-        self, sync_config, since_token=None, timeout=0, full_state=False
-    ):
+        self,
+        sync_config: SyncConfig,
+        since_token: Optional[StreamToken] = None,
+        timeout: int = 0,
+        full_state: bool = False,
+    ) -> SyncResult:
         """Get the sync for a client if we have new data for it now. Otherwise
         wait for new data to arrive on the server. If the timeout expires, then
         return an empty sync result.
-        Returns:
-            Deferred[SyncResult]
         """
         # If the user is not part of the mau group, then check that limits have
         # not been exceeded (if not part of the group by this point, almost certain
@@ -265,8 +288,12 @@ class SyncHandler(object):
         return res
 
     async def _wait_for_sync_for_user(
-        self, sync_config, since_token, timeout, full_state
-    ):
+        self,
+        sync_config: SyncConfig,
+        since_token: Optional[StreamToken] = None,
+        timeout: int = 0,
+        full_state: bool = False,
+    ) -> SyncResult:
         if since_token is None:
             sync_type = "initial_sync"
         elif full_state:
@@ -305,25 +332,33 @@ class SyncHandler(object):
 
         return result
 
-    def current_sync_for_user(self, sync_config, since_token=None, full_state=False):
+    async def current_sync_for_user(
+        self,
+        sync_config: SyncConfig,
+        since_token: Optional[StreamToken] = None,
+        full_state: bool = False,
+    ) -> SyncResult:
         """Get the sync for client needed to match what the server has now.
-        Returns:
-            A Deferred SyncResult.
         """
-        return self.generate_sync_result(sync_config, since_token, full_state)
+        return await self.generate_sync_result(sync_config, since_token, full_state)
 
-    async def push_rules_for_user(self, user):
+    async def push_rules_for_user(self, user: UserID) -> JsonDict:
         user_id = user.to_string()
         rules = await self.store.get_push_rules_for_user(user_id)
         rules = format_push_rules_for_user(user, rules)
         return rules
 
-    async def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None):
+    async def ephemeral_by_room(
+        self,
+        sync_result_builder: "SyncResultBuilder",
+        now_token: StreamToken,
+        since_token: Optional[StreamToken] = None,
+    ) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]:
         """Get the ephemeral events for each room the user is in
         Args:
-            sync_result_builder(SyncResultBuilder)
-            now_token (StreamToken): Where the server is currently up to.
-            since_token (StreamToken): Where the server was when the client
+            sync_result_builder
+            now_token: Where the server is currently up to.
+            since_token: Where the server was when the client
                 last synced.
         Returns:
             A tuple of the now StreamToken, updated to reflect the which typing
@@ -348,7 +383,7 @@ class SyncHandler(object):
             )
             now_token = now_token.copy_and_replace("typing_key", typing_key)
 
-            ephemeral_by_room = {}
+            ephemeral_by_room = {}  # type: JsonDict
 
             for event in typing:
                 # we want to exclude the room_id from the event, but modifying the
@@ -380,13 +415,13 @@ class SyncHandler(object):
 
     async def _load_filtered_recents(
         self,
-        room_id,
-        sync_config,
-        now_token,
-        since_token=None,
-        recents=None,
-        newly_joined_room=False,
-    ):
+        room_id: str,
+        sync_config: SyncConfig,
+        now_token: StreamToken,
+        since_token: Optional[StreamToken] = None,
+        potential_recents: Optional[List[EventBase]] = None,
+        newly_joined_room: bool = False,
+    ) -> TimelineBatch:
         """
         Returns:
             a Deferred TimelineBatch
@@ -397,21 +432,29 @@ class SyncHandler(object):
                 sync_config.filter_collection.blocks_all_room_timeline()
             )
 
-            if recents is None or newly_joined_room or timeline_limit < len(recents):
+            if (
+                potential_recents is None
+                or newly_joined_room
+                or timeline_limit < len(potential_recents)
+            ):
                 limited = True
             else:
                 limited = False
 
-            if recents:
-                recents = sync_config.filter_collection.filter_room_timeline(recents)
+            if potential_recents:
+                recents = sync_config.filter_collection.filter_room_timeline(
+                    potential_recents
+                )
 
                 # We check if there are any state events, if there are then we pass
                 # all current state events to the filter_events function. This is to
                 # ensure that we always include current state in the timeline
-                current_state_ids = frozenset()
+                current_state_ids = frozenset()  # type: FrozenSet[str]
                 if any(e.is_state() for e in recents):
-                    current_state_ids = await self.state.get_current_state_ids(room_id)
-                    current_state_ids = frozenset(itervalues(current_state_ids))
+                    current_state_ids_map = await self.state.get_current_state_ids(
+                        room_id
+                    )
+                    current_state_ids = frozenset(itervalues(current_state_ids_map))
 
                 recents = await filter_events_for_client(
                     self.storage,
@@ -463,8 +506,10 @@ class SyncHandler(object):
                 # ensure that we always include current state in the timeline
                 current_state_ids = frozenset()
                 if any(e.is_state() for e in loaded_recents):
-                    current_state_ids = await self.state.get_current_state_ids(room_id)
-                    current_state_ids = frozenset(itervalues(current_state_ids))
+                    current_state_ids_map = await self.state.get_current_state_ids(
+                        room_id
+                    )
+                    current_state_ids = frozenset(itervalues(current_state_ids_map))
 
                 loaded_recents = await filter_events_for_client(
                     self.storage,
@@ -493,17 +538,15 @@ class SyncHandler(object):
             limited=limited or newly_joined_room,
         )
 
-    async def get_state_after_event(self, event, state_filter=StateFilter.all()):
+    async def get_state_after_event(
+        self, event: EventBase, state_filter: StateFilter = StateFilter.all()
+    ) -> StateMap[str]:
         """
         Get the room state after the given event
 
         Args:
-            event(synapse.events.EventBase): event of interest
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            A Deferred map from ((type, state_key)->Event)
+            event: event of interest
+            state_filter: The state filter used to fetch state from the database.
         """
         state_ids = await self.state_store.get_state_ids_for_event(
             event.event_id, state_filter=state_filter
@@ -514,18 +557,17 @@ class SyncHandler(object):
         return state_ids
 
     async def get_state_at(
-        self, room_id, stream_position, state_filter=StateFilter.all()
-    ):
+        self,
+        room_id: str,
+        stream_position: StreamToken,
+        state_filter: StateFilter = StateFilter.all(),
+    ) -> StateMap[str]:
         """ Get the room state at a particular stream position
 
         Args:
-            room_id(str): room for which to get state
-            stream_position(StreamToken): point at which to get state
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-
-        Returns:
-            A Deferred map from ((type, state_key)->Event)
+            room_id: room for which to get state
+            stream_position: point at which to get state
+            state_filter: The state filter used to fetch state from the database.
         """
         # FIXME this claims to get the state at a stream position, but
         # get_recent_events_for_room operates by topo ordering. This therefore
@@ -546,23 +588,25 @@ class SyncHandler(object):
             state = {}
         return state
 
-    async def compute_summary(self, room_id, sync_config, batch, state, now_token):
+    async def compute_summary(
+        self,
+        room_id: str,
+        sync_config: SyncConfig,
+        batch: TimelineBatch,
+        state: StateMap[EventBase],
+        now_token: StreamToken,
+    ) -> Optional[JsonDict]:
         """ Works out a room summary block for this room, summarising the number
         of joined members in the room, and providing the 'hero' members if the
         room has no name so clients can consistently name rooms.  Also adds
         state events to 'state' if needed to describe the heroes.
 
-        Args:
-            room_id(str):
-            sync_config(synapse.handlers.sync.SyncConfig):
-            batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
-                the room that will be sent to the user.
-            state(dict): dict of (type, state_key) -> Event as returned by
-                compute_state_delta
-            now_token(str): Token of the end of the current batch.
-
-        Returns:
-             A deferred dict describing the room summary
+        Args
+            room_id
+            sync_config
+            batch: The timeline batch for the room that will be sent to the user.
+            state: State as returned by compute_state_delta
+            now_token: Token of the end of the current batch.
         """
 
         # FIXME: we could/should get this from room_stats when matthew/stats lands
@@ -681,7 +725,7 @@ class SyncHandler(object):
 
         return summary
 
-    def get_lazy_loaded_members_cache(self, cache_key):
+    def get_lazy_loaded_members_cache(self, cache_key: Tuple[str, str]) -> LruCache:
         cache = self.lazy_loaded_members_cache.get(cache_key)
         if cache is None:
             logger.debug("creating LruCache for %r", cache_key)
@@ -692,23 +736,24 @@ class SyncHandler(object):
         return cache
 
     async def compute_state_delta(
-        self, room_id, batch, sync_config, since_token, now_token, full_state
-    ):
+        self,
+        room_id: str,
+        batch: TimelineBatch,
+        sync_config: SyncConfig,
+        since_token: Optional[StreamToken],
+        now_token: StreamToken,
+        full_state: bool,
+    ) -> StateMap[EventBase]:
         """ Works out the difference in state between the start of the timeline
         and the previous sync.
 
         Args:
-            room_id(str):
-            batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
-                the room that will be sent to the user.
-            sync_config(synapse.handlers.sync.SyncConfig):
-            since_token(str|None): Token of the end of the previous batch. May
-                be None.
-            now_token(str): Token of the end of the current batch.
-            full_state(bool): Whether to force returning the full state.
-
-        Returns:
-             A deferred dict of (type, state_key) -> Event
+            room_id:
+            batch: The timeline batch for the room that will be sent to the user.
+            sync_config:
+            since_token: Token of the end of the previous batch. May be None.
+            now_token: Token of the end of the current batch.
+            full_state: Whether to force returning the full state.
         """
         # TODO(mjark) Check if the state events were received by the server
         # after the previous sync, since we need to include those state
@@ -800,6 +845,10 @@ class SyncHandler(object):
                 # about them).
                 state_filter = StateFilter.all()
 
+                # If this is an initial sync then full_state should be set, and
+                # that case is handled above. We assert here to ensure that this
+                # is indeed the case.
+                assert since_token is not None
                 state_at_previous_sync = await self.get_state_at(
                     room_id, stream_position=since_token, state_filter=state_filter
                 )
@@ -874,7 +923,7 @@ class SyncHandler(object):
                     if t[0] == EventTypes.Member:
                         cache.set(t[1], event_id)
 
-        state = {}
+        state = {}  # type: Dict[str, EventBase]
         if state_ids:
             state = await self.store.get_events(list(state_ids.values()))
 
@@ -885,7 +934,9 @@ class SyncHandler(object):
             )
         }
 
-    async def unread_notifs_for_room_id(self, room_id, sync_config):
+    async def unread_notifs_for_room_id(
+        self, room_id: str, sync_config: SyncConfig
+    ) -> Optional[Dict[str, str]]:
         with Measure(self.clock, "unread_notifs_for_room_id"):
             last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
                 user_id=sync_config.user.to_string(),
@@ -893,7 +944,6 @@ class SyncHandler(object):
                 receipt_type="m.read",
             )
 
-            notifs = []
             if last_unread_event_id:
                 notifs = await self.store.get_unread_event_push_actions_by_room_for_user(
                     room_id, sync_config.user.to_string(), last_unread_event_id
@@ -905,17 +955,12 @@ class SyncHandler(object):
         return None
 
     async def generate_sync_result(
-        self, sync_config, since_token=None, full_state=False
-    ):
+        self,
+        sync_config: SyncConfig,
+        since_token: Optional[StreamToken] = None,
+        full_state: bool = False,
+    ) -> SyncResult:
         """Generates a sync result.
-
-        Args:
-            sync_config (SyncConfig)
-            since_token (StreamToken)
-            full_state (bool)
-
-        Returns:
-            Deferred(SyncResult)
         """
         # NB: The now_token gets changed by some of the generate_sync_* methods,
         # this is due to some of the underlying streams not supporting the ability
@@ -977,7 +1022,7 @@ class SyncHandler(object):
         )
 
         device_id = sync_config.device_id
-        one_time_key_counts = {}
+        one_time_key_counts = {}  # type: JsonDict
         if device_id:
             one_time_key_counts = await self.store.count_e2e_one_time_keys(
                 user_id, device_id
@@ -1007,7 +1052,9 @@ class SyncHandler(object):
         )
 
     @measure_func("_generate_sync_entry_for_groups")
-    async def _generate_sync_entry_for_groups(self, sync_result_builder):
+    async def _generate_sync_entry_for_groups(
+        self, sync_result_builder: "SyncResultBuilder"
+    ) -> None:
         user_id = sync_result_builder.sync_config.user.to_string()
         since_token = sync_result_builder.since_token
         now_token = sync_result_builder.now_token
@@ -1052,27 +1099,22 @@ class SyncHandler(object):
     @measure_func("_generate_sync_entry_for_device_list")
     async def _generate_sync_entry_for_device_list(
         self,
-        sync_result_builder,
-        newly_joined_rooms,
-        newly_joined_or_invited_users,
-        newly_left_rooms,
-        newly_left_users,
-    ):
+        sync_result_builder: "SyncResultBuilder",
+        newly_joined_rooms: Set[str],
+        newly_joined_or_invited_users: Set[str],
+        newly_left_rooms: Set[str],
+        newly_left_users: Set[str],
+    ) -> DeviceLists:
         """Generate the DeviceLists section of sync
 
         Args:
-            sync_result_builder (SyncResultBuilder)
-            newly_joined_rooms (set[str]): Set of rooms user has joined since
-                previous sync
-            newly_joined_or_invited_users (set[str]): Set of users that have
-                joined or been invited to a room since previous sync.
-            newly_left_rooms (set[str]): Set of rooms user has left since
+            sync_result_builder
+            newly_joined_rooms: Set of rooms user has joined since previous sync
+            newly_joined_or_invited_users: Set of users that have joined or
+                been invited to a room since previous sync.
+            newly_left_rooms: Set of rooms user has left since previous sync
+            newly_left_users: Set of users that have left a room we're in since
                 previous sync
-            newly_left_users (set[str]): Set of users that have left a room
-                we're in since previous sync
-
-        Returns:
-            Deferred[DeviceLists]
         """
 
         user_id = sync_result_builder.sync_config.user.to_string()
@@ -1133,15 +1175,11 @@ class SyncHandler(object):
         else:
             return DeviceLists(changed=[], left=[])
 
-    async def _generate_sync_entry_for_to_device(self, sync_result_builder):
+    async def _generate_sync_entry_for_to_device(
+        self, sync_result_builder: "SyncResultBuilder"
+    ) -> None:
         """Generates the portion of the sync response. Populates
         `sync_result_builder` with the result.
-
-        Args:
-            sync_result_builder(SyncResultBuilder)
-
-        Returns:
-            Deferred(dict): A dictionary containing the per room account data.
         """
         user_id = sync_result_builder.sync_config.user.to_string()
         device_id = sync_result_builder.sync_config.device_id
@@ -1179,15 +1217,17 @@ class SyncHandler(object):
         else:
             sync_result_builder.to_device = []
 
-    async def _generate_sync_entry_for_account_data(self, sync_result_builder):
+    async def _generate_sync_entry_for_account_data(
+        self, sync_result_builder: "SyncResultBuilder"
+    ) -> Dict[str, Dict[str, JsonDict]]:
         """Generates the account data portion of the sync response. Populates
         `sync_result_builder` with the result.
 
         Args:
-            sync_result_builder(SyncResultBuilder)
+            sync_result_builder
 
         Returns:
-            Deferred(dict): A dictionary containing the per room account data.
+            A dictionary containing the per room account data.
         """
         sync_config = sync_result_builder.sync_config
         user_id = sync_result_builder.sync_config.user.to_string()
@@ -1231,18 +1271,21 @@ class SyncHandler(object):
         return account_data_by_room
 
     async def _generate_sync_entry_for_presence(
-        self, sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
-    ):
+        self,
+        sync_result_builder: "SyncResultBuilder",
+        newly_joined_rooms: Set[str],
+        newly_joined_or_invited_users: Set[str],
+    ) -> None:
         """Generates the presence portion of the sync response. Populates the
         `sync_result_builder` with the result.
 
         Args:
-            sync_result_builder(SyncResultBuilder)
-            newly_joined_rooms(list): List of rooms that the user has joined
-                since the last sync (or empty if an initial sync)
-            newly_joined_or_invited_users(list): List of users that have joined
-                or been invited to rooms since the last sync (or empty if an initial
-                sync)
+            sync_result_builder
+            newly_joined_rooms: Set of rooms that the user has joined since
+                the last sync (or empty if an initial sync)
+            newly_joined_or_invited_users: Set of users that have joined or
+                been invited to rooms since the last sync (or empty if an
+                initial sync)
         """
         now_token = sync_result_builder.now_token
         sync_config = sync_result_builder.sync_config
@@ -1286,17 +1329,19 @@ class SyncHandler(object):
         sync_result_builder.presence = presence
 
     async def _generate_sync_entry_for_rooms(
-        self, sync_result_builder, account_data_by_room
-    ):
+        self,
+        sync_result_builder: "SyncResultBuilder",
+        account_data_by_room: Dict[str, Dict[str, JsonDict]],
+    ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
         """Generates the rooms portion of the sync response. Populates the
         `sync_result_builder` with the result.
 
         Args:
-            sync_result_builder(SyncResultBuilder)
-            account_data_by_room(dict): Dictionary of per room account data
+            sync_result_builder
+            account_data_by_room: Dictionary of per room account data
 
         Returns:
-            Deferred(tuple): Returns a 4-tuple of
+            Returns a 4-tuple of
             `(newly_joined_rooms, newly_joined_or_invited_users,
             newly_left_rooms, newly_left_users)`
         """
@@ -1307,7 +1352,7 @@ class SyncHandler(object):
         )
 
         if block_all_room_ephemeral:
-            ephemeral_by_room = {}
+            ephemeral_by_room = {}  # type: Dict[str, List[JsonDict]]
         else:
             now_token, ephemeral_by_room = await self.ephemeral_by_room(
                 sync_result_builder,
@@ -1328,7 +1373,7 @@ class SyncHandler(object):
                     )
                     if not tags_by_room:
                         logger.debug("no-oping sync")
-                        return [], [], [], []
+                        return set(), set(), set(), set()
 
         ignored_account_data = await self.store.get_global_account_data_by_type_for_user(
             "m.ignored_user_list", user_id=user_id
@@ -1340,19 +1385,22 @@ class SyncHandler(object):
             ignored_users = frozenset()
 
         if since_token:
-            res = await self._get_rooms_changed(sync_result_builder, ignored_users)
-            room_entries, invited, newly_joined_rooms, newly_left_rooms = res
-
+            room_changes = await self._get_rooms_changed(
+                sync_result_builder, ignored_users
+            )
             tags_by_room = await self.store.get_updated_tags(
                 user_id, since_token.account_data_key
             )
         else:
-            res = await self._get_all_rooms(sync_result_builder, ignored_users)
-            room_entries, invited, newly_joined_rooms = res
-            newly_left_rooms = []
+            room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
 
             tags_by_room = await self.store.get_tags_for_user(user_id)
 
+        room_entries = room_changes.room_entries
+        invited = room_changes.invited
+        newly_joined_rooms = room_changes.newly_joined_rooms
+        newly_left_rooms = room_changes.newly_left_rooms
+
         def handle_room_entries(room_entry):
             return self._generate_room_entry(
                 sync_result_builder,
@@ -1392,13 +1440,15 @@ class SyncHandler(object):
         newly_left_users -= newly_joined_or_invited_users
 
         return (
-            newly_joined_rooms,
+            set(newly_joined_rooms),
             newly_joined_or_invited_users,
-            newly_left_rooms,
+            set(newly_left_rooms),
             newly_left_users,
         )
 
-    async def _have_rooms_changed(self, sync_result_builder):
+    async def _have_rooms_changed(
+        self, sync_result_builder: "SyncResultBuilder"
+    ) -> bool:
         """Returns whether there may be any new events that should be sent down
         the sync. Returns True if there are.
         """
@@ -1422,22 +1472,10 @@ class SyncHandler(object):
                 return True
         return False
 
-    async def _get_rooms_changed(self, sync_result_builder, ignored_users):
+    async def _get_rooms_changed(
+        self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str]
+    ) -> _RoomChanges:
         """Gets the the changes that have happened since the last sync.
-
-        Args:
-            sync_result_builder(SyncResultBuilder)
-            ignored_users(set(str)): Set of users ignored by user.
-
-        Returns:
-            Deferred(tuple): Returns a tuple of the form:
-            `(room_entries, invited_rooms, newly_joined_rooms, newly_left_rooms)`
-
-            where:
-                room_entries is a list [RoomSyncResultBuilder]
-                invited_rooms is a list [InvitedSyncResult]
-                newly_joined_rooms is a list[str] of room ids
-                newly_left_rooms is a list[str] of room ids
         """
         user_id = sync_result_builder.sync_config.user.to_string()
         since_token = sync_result_builder.since_token
@@ -1451,7 +1489,7 @@ class SyncHandler(object):
             user_id, since_token.room_key, now_token.room_key
         )
 
-        mem_change_events_by_room_id = {}
+        mem_change_events_by_room_id = {}  # type: Dict[str, List[EventBase]]
         for event in rooms_changed:
             mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
 
@@ -1570,7 +1608,7 @@ class SyncHandler(object):
                 # This is all screaming out for a refactor, as the logic here is
                 # subtle and the moving parts numerous.
                 if leave_event.internal_metadata.is_out_of_band_membership():
-                    batch_events = [leave_event]
+                    batch_events = [leave_event]  # type: Optional[List[EventBase]]
                 else:
                     batch_events = None
 
@@ -1636,18 +1674,17 @@ class SyncHandler(object):
                 )
             room_entries.append(entry)
 
-        return room_entries, invited, newly_joined_rooms, newly_left_rooms
+        return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms)
 
-    async def _get_all_rooms(self, sync_result_builder, ignored_users):
+    async def _get_all_rooms(
+        self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str]
+    ) -> _RoomChanges:
         """Returns entries for all rooms for the user.
 
         Args:
-            sync_result_builder(SyncResultBuilder)
-            ignored_users(set(str)): Set of users ignored by user.
+            sync_result_builder
+            ignored_users: Set of users ignored by user.
 
-        Returns:
-            Deferred(tuple): Returns a tuple of the form:
-            `([RoomSyncResultBuilder], [InvitedSyncResult], [])`
         """
 
         user_id = sync_result_builder.sync_config.user.to_string()
@@ -1709,30 +1746,30 @@ class SyncHandler(object):
                     )
                 )
 
-        return room_entries, invited, []
+        return _RoomChanges(room_entries, invited, [], [])
 
     async def _generate_room_entry(
         self,
-        sync_result_builder,
-        ignored_users,
-        room_builder,
-        ephemeral,
-        tags,
-        account_data,
-        always_include=False,
+        sync_result_builder: "SyncResultBuilder",
+        ignored_users: Set[str],
+        room_builder: "RoomSyncResultBuilder",
+        ephemeral: List[JsonDict],
+        tags: Optional[List[JsonDict]],
+        account_data: Dict[str, JsonDict],
+        always_include: bool = False,
     ):
         """Populates the `joined` and `archived` section of `sync_result_builder`
         based on the `room_builder`.
 
         Args:
-            sync_result_builder(SyncResultBuilder)
-            ignored_users(set(str)): Set of users ignored by user.
-            room_builder(RoomSyncResultBuilder)
-            ephemeral(list): List of new ephemeral events for room
-            tags(list): List of *all* tags for room, or None if there has been
+            sync_result_builder
+            ignored_users: Set of users ignored by user.
+            room_builder
+            ephemeral: List of new ephemeral events for room
+            tags: List of *all* tags for room, or None if there has been
                 no change.
-            account_data(list): List of new account data for room
-            always_include(bool): Always include this room in the sync response,
+            account_data: List of new account data for room
+            always_include: Always include this room in the sync response,
                 even if empty.
         """
         newly_joined = room_builder.newly_joined
@@ -1758,7 +1795,7 @@ class SyncHandler(object):
             sync_config,
             now_token=upto_token,
             since_token=since_token,
-            recents=events,
+            potential_recents=events,
             newly_joined_room=newly_joined,
         )
 
@@ -1809,7 +1846,7 @@ class SyncHandler(object):
             room_id, batch, sync_config, since_token, now_token, full_state=full_state
         )
 
-        summary = {}
+        summary = {}  # type: Optional[JsonDict]
 
         # we include a summary in room responses when we're lazy loading
         # members (as the client otherwise doesn't have enough info to form
@@ -1833,7 +1870,7 @@ class SyncHandler(object):
             )
 
         if room_builder.rtype == "joined":
-            unread_notifications = {}
+            unread_notifications = {}  # type: Dict[str, str]
             room_sync = JoinedSyncResult(
                 room_id=room_id,
                 timeline=batch,
@@ -1860,18 +1897,20 @@ class SyncHandler(object):
                     % (room_id, user_id, len(state))
                 )
         elif room_builder.rtype == "archived":
-            room_sync = ArchivedSyncResult(
+            archived_room_sync = ArchivedSyncResult(
                 room_id=room_id,
                 timeline=batch,
                 state=state,
                 account_data=account_data_events,
             )
-            if room_sync or always_include:
-                sync_result_builder.archived.append(room_sync)
+            if archived_room_sync or always_include:
+                sync_result_builder.archived.append(archived_room_sync)
         else:
             raise Exception("Unrecognized rtype: %r", room_builder.rtype)
 
-    async def get_rooms_for_user_at(self, user_id, stream_ordering):
+    async def get_rooms_for_user_at(
+        self, user_id: str, stream_ordering: int
+    ) -> FrozenSet[str]:
         """Get set of joined rooms for a user at the given stream ordering.
 
         The stream ordering *must* be recent, otherwise this may throw an
@@ -1879,12 +1918,11 @@ class SyncHandler(object):
         current token, which should be perfectly fine).
 
         Args:
-            user_id (str)
-            stream_ordering (int)
+            user_id
+            stream_ordering
 
         ReturnValue:
-            Deferred[frozenset[str]]: Set of room_ids the user is in at given
-            stream_ordering.
+            Set of room_ids the user is in at given stream_ordering.
         """
         joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id)
 
@@ -1911,11 +1949,10 @@ class SyncHandler(object):
             if user_id in users_in_room:
                 joined_room_ids.add(room_id)
 
-        joined_room_ids = frozenset(joined_room_ids)
-        return joined_room_ids
+        return frozenset(joined_room_ids)
 
 
-def _action_has_highlight(actions):
+def _action_has_highlight(actions: List[JsonDict]) -> bool:
     for action in actions:
         try:
             if action.get("set_tweak", None) == "highlight":
@@ -1927,22 +1964,23 @@ def _action_has_highlight(actions):
 
 
 def _calculate_state(
-    timeline_contains, timeline_start, previous, current, lazy_load_members
-):
+    timeline_contains: StateMap[str],
+    timeline_start: StateMap[str],
+    previous: StateMap[str],
+    current: StateMap[str],
+    lazy_load_members: bool,
+) -> StateMap[str]:
     """Works out what state to include in a sync response.
 
     Args:
-        timeline_contains (dict): state in the timeline
-        timeline_start (dict): state at the start of the timeline
-        previous (dict): state at the end of the previous sync (or empty dict
+        timeline_contains: state in the timeline
+        timeline_start: state at the start of the timeline
+        previous: state at the end of the previous sync (or empty dict
             if this is an initial sync)
-        current (dict): state at the end of the timeline
-        lazy_load_members (bool): whether to return members from timeline_start
+        current: state at the end of the timeline
+        lazy_load_members: whether to return members from timeline_start
             or not.  assumes that timeline_start has already been filtered to
             include only the members the client needs to know about.
-
-    Returns:
-        dict
     """
     event_id_to_key = {
         e: key
@@ -1979,15 +2017,16 @@ def _calculate_state(
     return {event_id_to_key[e]: e for e in state_ids}
 
 
-class SyncResultBuilder(object):
+@attr.s
+class SyncResultBuilder:
     """Used to help build up a new SyncResult for a user
 
     Attributes:
-        sync_config (SyncConfig)
-        full_state (bool)
-        since_token (StreamToken)
-        now_token (StreamToken)
-        joined_room_ids (list[str])
+        sync_config
+        full_state: The full_state flag as specified by user
+        since_token: The token supplied by user, or None.
+        now_token: The token to sync up to.
+        joined_room_ids: List of rooms the user is joined to
 
         # The following mirror the fields in a sync response
         presence (list)
@@ -1995,61 +2034,45 @@ class SyncResultBuilder(object):
         joined (list[JoinedSyncResult])
         invited (list[InvitedSyncResult])
         archived (list[ArchivedSyncResult])
-        device (list)
         groups (GroupsSyncResult|None)
         to_device (list)
     """
 
-    def __init__(
-        self, sync_config, full_state, since_token, now_token, joined_room_ids
-    ):
-        """
-        Args:
-            sync_config (SyncConfig)
-            full_state (bool): The full_state flag as specified by user
-            since_token (StreamToken): The token supplied by user, or None.
-            now_token (StreamToken): The token to sync up to.
-            joined_room_ids (list[str]): List of rooms the user is joined to
-        """
-        self.sync_config = sync_config
-        self.full_state = full_state
-        self.since_token = since_token
-        self.now_token = now_token
-        self.joined_room_ids = joined_room_ids
-
-        self.presence = []
-        self.account_data = []
-        self.joined = []
-        self.invited = []
-        self.archived = []
-        self.device = []
-        self.groups = None
-        self.to_device = []
+    sync_config = attr.ib(type=SyncConfig)
+    full_state = attr.ib(type=bool)
+    since_token = attr.ib(type=Optional[StreamToken])
+    now_token = attr.ib(type=StreamToken)
+    joined_room_ids = attr.ib(type=FrozenSet[str])
+
+    presence = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+    account_data = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+    joined = attr.ib(type=List[JoinedSyncResult], default=attr.Factory(list))
+    invited = attr.ib(type=List[InvitedSyncResult], default=attr.Factory(list))
+    archived = attr.ib(type=List[ArchivedSyncResult], default=attr.Factory(list))
+    groups = attr.ib(type=Optional[GroupsSyncResult], default=None)
+    to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list))
 
 
+@attr.s
 class RoomSyncResultBuilder(object):
     """Stores information needed to create either a `JoinedSyncResult` or
     `ArchivedSyncResult`.
+
+    Attributes:
+        room_id
+        rtype: One of `"joined"` or `"archived"`
+        events: List of events to include in the room (more events may be added
+            when generating result).
+        newly_joined: If the user has newly joined the room
+        full_state: Whether the full state should be sent in result
+        since_token: Earliest point to return events from, or None
+        upto_token: Latest point to return events from.
     """
 
-    def __init__(
-        self, room_id, rtype, events, newly_joined, full_state, since_token, upto_token
-    ):
-        """
-        Args:
-            room_id(str)
-            rtype(str): One of `"joined"` or `"archived"`
-            events(list[FrozenEvent]): List of events to include in the room
-                (more events may be added when generating result).
-            newly_joined(bool): If the user has newly joined the room
-            full_state(bool): Whether the full state should be sent in result
-            since_token(StreamToken): Earliest point to return events from, or None
-            upto_token(StreamToken): Latest point to return events from.
-        """
-        self.room_id = room_id
-        self.rtype = rtype
-        self.events = events
-        self.newly_joined = newly_joined
-        self.full_state = full_state
-        self.since_token = since_token
-        self.upto_token = upto_token
+    room_id = attr.ib(type=str)
+    rtype = attr.ib(type=str)
+    events = attr.ib(type=Optional[List[EventBase]])
+    newly_joined = attr.ib(type=bool)
+    full_state = attr.ib(type=bool)
+    since_token = attr.ib(type=Optional[StreamToken])
+    upto_token = attr.ib(type=StreamToken)
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index feb1c07cb2..b9ee6ec1ec 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -238,8 +238,11 @@ class RedactionTestCase(unittest.HomeserverTestCase):
             @defer.inlineCallbacks
             def build(self, prev_event_ids):
                 built_event = yield self._base_builder.build(prev_event_ids)
-                built_event.event_id = self._event_id
+
+                built_event._event_id = self._event_id
                 built_event._event_dict["event_id"] = self._event_id
+                assert built_event.event_id == self._event_id
+
                 return built_event
 
             @property
diff --git a/tox.ini b/tox.ini
index 88ef12bebd..ef22368cf1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -180,6 +180,7 @@ commands = mypy \
             synapse/api \
             synapse/config/ \
             synapse/federation/transport \
+            synapse/handlers/sync.py \
             synapse/handlers/ui_auth \
             synapse/logging/ \
             synapse/module_api \
-- 
cgit 1.4.1


From 4b4536dd02cc1128335383b6cc36afc1f0f6d71c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 3 Feb 2020 21:15:08 +0000
Subject: newsfile

---
 changelog.d/6840.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6840.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc
new file mode 100644
index 0000000000..0496f12de8
--- /dev/null
+++ b/changelog.d/6840.misc
@@ -0,0 +1 @@
+Port much of `synapse.handlers.federation` to async/await.
-- 
cgit 1.4.1


From 245ee142209f634a51942681bb141e617e1ecd55 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Tue, 4 Feb 2020 00:21:07 -0500
Subject: add changelog

---
 changelog.d/6844.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6844.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix
new file mode 100644
index 0000000000..e84aa1029f
--- /dev/null
+++ b/changelog.d/6844.bugfix
@@ -0,0 +1 @@
+Fix an issue with cross-signing where device signatures were not sent to remote servers.
-- 
cgit 1.4.1


From c87572d6e426099fa36e2cd8260319531ec0fbb8 Mon Sep 17 00:00:00 2001
From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com>
Date: Tue, 4 Feb 2020 16:21:09 +0000
Subject: Update CONTRIBUTING.md about merging PRs. (#6846)

---
 CONTRIBUTING.md      | 14 ++++++++++++++
 changelog.d/6846.doc |  1 +
 2 files changed, 15 insertions(+)
 create mode 100644 changelog.d/6846.doc

(limited to 'changelog.d')

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5736ede6c4..4b01b6ac8c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s`
 flag to `git commit`, which uses the name and email set in your
 `user.name` and `user.email` git configs.
 
+## Merge Strategy
+
+We use the commit history of develop/master extensively to identify
+when regressions were introduced and what changes have been made.
+
+We aim to have a clean merge history, which means we normally squash-merge
+changes into develop. For small changes this means there is no need to rebase
+to clean up your PR before merging. Larger changes with an organised set of
+commits may be merged as-is, if the history is judged to be useful.
+
+This use of squash-merging will mean PRs built on each other will be hard to 
+merge. We suggest avoiding these where possible, and if required, ensuring
+each PR has a tidy set of commits to ease merging.
+
 ## Conclusion
 
 That's it! Matrix is a very open and collaborative project as you might expect
diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc
new file mode 100644
index 0000000000..ad69d608c0
--- /dev/null
+++ b/changelog.d/6846.doc
@@ -0,0 +1 @@
+Add details of PR merge strategy to contributing docs.
\ No newline at end of file
-- 
cgit 1.4.1


From 6475382d807e1fed095d1e3fbd04884799ebd612 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 4 Feb 2020 17:25:54 +0000
Subject: Fix detecting unknown devices from remote encrypted events. (#6848)

We were looking at the wrong event type (`m.room.encryption` vs
`m.room.encrypted`).

Also fixup the duplicate `EvenTypes` entries.

Introduced in #6776.
---
 changelog.d/6848.bugfix                   | 1 +
 synapse/api/constants.py                  | 3 +--
 synapse/handlers/federation.py            | 2 +-
 synapse/handlers/room.py                  | 2 +-
 synapse/handlers/stats.py                 | 2 +-
 synapse/storage/data_stores/main/stats.py | 4 ++--
 6 files changed, 7 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6848.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix
new file mode 100644
index 0000000000..65688e5d57
--- /dev/null
+++ b/changelog.d/6848.bugfix
@@ -0,0 +1 @@
+Fix detecting unknown devices from remote encrypted events.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 0ade47e624..cc8577552b 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -77,12 +77,11 @@ class EventTypes(object):
     Aliases = "m.room.aliases"
     Redaction = "m.room.redaction"
     ThirdPartyInvite = "m.room.third_party_invite"
-    Encryption = "m.room.encryption"
     RelatedGroups = "m.room.related_groups"
 
     RoomHistoryVisibility = "m.room.history_visibility"
     CanonicalAlias = "m.room.canonical_alias"
-    Encryption = "m.room.encryption"
+    Encrypted = "m.room.encrypted"
     RoomAvatar = "m.room.avatar"
     RoomEncryption = "m.room.encryption"
     GuestAccess = "m.room.guest_access"
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c86d3177e9..488200a2d1 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -752,7 +752,7 @@ class FederationHandler(BaseHandler):
 
         # For encrypted messages we check that we know about the sending device,
         # if we don't then we mark the device cache for that user as stale.
-        if event.type == EventTypes.Encryption:
+        if event.type == EventTypes.Encrypted:
             device_id = event.content.get("device_id")
             if device_id is not None:
                 cached_devices = await self.store.get_cached_devices_for_user(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 1382399557..b609a65f47 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -360,7 +360,7 @@ class RoomCreationHandler(BaseHandler):
             (EventTypes.RoomHistoryVisibility, ""),
             (EventTypes.GuestAccess, ""),
             (EventTypes.RoomAvatar, ""),
-            (EventTypes.Encryption, ""),
+            (EventTypes.RoomEncryption, ""),
             (EventTypes.ServerACL, ""),
             (EventTypes.RelatedGroups, ""),
             (EventTypes.PowerLevels, ""),
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 7f7d56390e..68e6edace5 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -286,7 +286,7 @@ class StatsHandler(StateDeltasHandler):
                 room_state["history_visibility"] = event_content.get(
                     "history_visibility"
                 )
-            elif typ == EventTypes.Encryption:
+            elif typ == EventTypes.RoomEncryption:
                 room_state["encryption"] = event_content.get("algorithm")
             elif typ == EventTypes.Name:
                 room_state["name"] = event_content.get("name")
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 7bc186e9a1..7af1495e47 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -744,7 +744,7 @@ class StatsStore(StateDeltasStore):
                     EventTypes.Create,
                     EventTypes.JoinRules,
                     EventTypes.RoomHistoryVisibility,
-                    EventTypes.Encryption,
+                    EventTypes.RoomEncryption,
                     EventTypes.Name,
                     EventTypes.Topic,
                     EventTypes.RoomAvatar,
@@ -816,7 +816,7 @@ class StatsStore(StateDeltasStore):
                 room_state["history_visibility"] = event.content.get(
                     "history_visibility"
                 )
-            elif event.type == EventTypes.Encryption:
+            elif event.type == EventTypes.RoomEncryption:
                 room_state["encryption"] = event.content.get("algorithm")
             elif event.type == EventTypes.Name:
                 room_state["name"] = event.content.get("name")
-- 
cgit 1.4.1


From d88e0ec0802b3b0a49853fb6b777a35b7c195ea6 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 4 Feb 2020 21:31:08 +0000
Subject: Database updates to populate rooms.room_version (#6847)

We're going to need this so that we can figure out how to handle redactions when fetching events from the database.
---
 changelog.d/6847.misc                              |  1 +
 .../delta/57/rooms_version_column_2.sql.postgres   | 35 ++++++++++++++++++++++
 .../delta/57/rooms_version_column_2.sql.sqlite     | 22 ++++++++++++++
 3 files changed, 58 insertions(+)
 create mode 100644 changelog.d/6847.misc
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite

(limited to 'changelog.d')

diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc
new file mode 100644
index 0000000000..094e911adb
--- /dev/null
+++ b/changelog.d/6847.misc
@@ -0,0 +1 @@
+Populate `rooms.room_version` database column at startup, rather than in a background update.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
new file mode 100644
index 0000000000..c601cff6de
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
@@ -0,0 +1,35 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- when we first added the room_version column, it was populated via a background
+-- update. We now need it to be populated before synapse starts, so we populate
+-- any remaining rows with a NULL room version now. For servers which have completed
+-- the background update, this will be pretty quick.
+
+-- the following query will set room_version to NULL if no create event is found for
+-- the room in current_state_events, and will set it to '1' if a create event with no
+-- room_version is found.
+
+UPDATE rooms SET room_version=(
+    SELECT COALESCE(json::json->'content'->>'room_version','1')
+    FROM current_state_events cse INNER JOIN event_json ej USING (event_id)
+    WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key=''
+) WHERE rooms.room_version IS NULL;
+
+-- we still allow the background update to complete: it has the useful side-effect of
+-- populating `rooms` with any missing rooms (based on the current_state_events table).
+
+-- see also rooms_version_column_2.sql.sqlite which has a copy of the above query, using
+-- sqlite syntax for the json extraction.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite
new file mode 100644
index 0000000000..335c6f2074
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite
@@ -0,0 +1,22 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- see rooms_version_column_2.sql.postgres for details of what's going on here.
+
+UPDATE rooms SET room_version=(
+    SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1')
+    FROM current_state_events cse INNER JOIN event_json ej USING (event_id)
+    WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key=''
+) WHERE rooms.room_version IS NULL;
-- 
cgit 1.4.1


From a831d2e4e3c424fb54f186bfa7d83a17965f933e Mon Sep 17 00:00:00 2001
From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com>
Date: Wed, 5 Feb 2020 08:57:38 +0000
Subject: Reduce performance logging to DEBUG (#6833)

* Reduce tnx performance logging to DEBUG
* Changelog.d
---
 changelog.d/6833.misc       | 1 +
 synapse/storage/database.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6833.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc
new file mode 100644
index 0000000000..8a0605f90b
--- /dev/null
+++ b/changelog.d/6833.misc
@@ -0,0 +1 @@
+Reducing log level to DEBUG for synapse.storage.TIME.
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 1003dd84a5..3eeb2f7c04 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -343,7 +343,7 @@ class Database(object):
 
             top_three_counters = self._txn_perf_counters.interval(duration, limit=3)
 
-            perf_logger.info(
+            perf_logger.debug(
                 "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters
             )
 
-- 
cgit 1.4.1


From 60d06724268891ad3b1e9dc6fe7cd080f9ba21b7 Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Tue, 4 Feb 2020 12:03:54 -0500
Subject: Merge pull request #6844 from
 matrix-org/uhoreg/cross_signing_fix_device_fed

add device signatures to device key query results
---
 changelog.d/6844.bugfix                     |  1 +
 synapse/storage/data_stores/main/devices.py | 10 ++++++++++
 2 files changed, 11 insertions(+)
 create mode 100644 changelog.d/6844.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix
new file mode 100644
index 0000000000..e84aa1029f
--- /dev/null
+++ b/changelog.d/6844.bugfix
@@ -0,0 +1 @@
+Fix an issue with cross-signing where device signatures were not sent to remote servers.
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index ea0503476f..b7617efb80 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -320,6 +320,11 @@ class DeviceWorkerStore(SQLBaseStore):
                     device_display_name = device.get("device_display_name", None)
                     if device_display_name:
                         result["device_display_name"] = device_display_name
+                    if "signatures" in device:
+                        for sig_user_id, sigs in device["signatures"].items():
+                            result["keys"].setdefault("signatures", {}).setdefault(
+                                sig_user_id, {}
+                            ).update(sigs)
                 else:
                     result["deleted"] = True
 
@@ -524,6 +529,11 @@ class DeviceWorkerStore(SQLBaseStore):
                 device_display_name = device.get("device_display_name", None)
                 if device_display_name:
                     result["device_display_name"] = device_display_name
+                if "signatures" in device:
+                    for sig_user_id, sigs in device["signatures"].items():
+                        result["keys"].setdefault("signatures", {}).setdefault(
+                            sig_user_id, {}
+                        ).update(sigs)
 
                 results.append(result)
 
-- 
cgit 1.4.1


From a58860e4802c31680ba43e59ec537984af9f5637 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 5 Feb 2020 14:02:39 +0000
Subject: Check sender_key matches on inbound encrypted events. (#6850)

If they don't then the device lists are probably out of sync.
---
 changelog.d/6850.misc          |  1 +
 synapse/handlers/device.py     |  8 ++++-
 synapse/handlers/federation.py | 72 ++++++++++++++++++++++++++++++++++--------
 3 files changed, 67 insertions(+), 14 deletions(-)
 create mode 100644 changelog.d/6850.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc
new file mode 100644
index 0000000000..418569113f
--- /dev/null
+++ b/changelog.d/6850.misc
@@ -0,0 +1 @@
+Detect unexpected sender keys on inbound encrypted events and resync device lists.
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 26ef5e150c..a9bd431486 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -598,7 +598,13 @@ class DeviceListUpdater(object):
             # happens if we've missed updates.
             resync = yield self._need_to_do_resync(user_id, pending_updates)
 
-            logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
+            if logger.isEnabledFor(logging.INFO):
+                logger.info(
+                    "Received device list update for %s, requiring resync: %s. Devices: %s",
+                    user_id,
+                    resync,
+                    ", ".join(u[0] for u in pending_updates),
+                )
 
             if resync:
                 yield self.user_device_resync(user_id)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 488200a2d1..e9441bbeff 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -754,27 +754,73 @@ class FederationHandler(BaseHandler):
         # if we don't then we mark the device cache for that user as stale.
         if event.type == EventTypes.Encrypted:
             device_id = event.content.get("device_id")
+            sender_key = event.content.get("sender_key")
+
+            cached_devices = await self.store.get_cached_devices_for_user(event.sender)
+
+            resync = False  # Whether we should resync device lists.
+
+            device = None
             if device_id is not None:
-                cached_devices = await self.store.get_cached_devices_for_user(
-                    event.sender
-                )
-                if device_id not in cached_devices:
+                device = cached_devices.get(device_id)
+                if device is None:
                     logger.info(
                         "Received event from remote device not in our cache: %s %s",
                         event.sender,
                         device_id,
                     )
-                    await self.store.mark_remote_user_device_cache_as_stale(
-                        event.sender
+                    resync = True
+
+            # We also check if the `sender_key` matches what we expect.
+            if sender_key is not None:
+                # Figure out what sender key we're expecting. If we know the
+                # device and recognize the algorithm then we can work out the
+                # exact key to expect. Otherwise check it matches any key we
+                # have for that device.
+                if device:
+                    keys = device.get("keys", {}).get("keys", {})
+
+                    if event.content.get("algorithm") == "m.megolm.v1.aes-sha2":
+                        # For this algorithm we expect a curve25519 key.
+                        key_name = "curve25519:%s" % (device_id,)
+                        current_keys = [keys.get(key_name)]
+                    else:
+                        # We don't know understand the algorithm, so we just
+                        # check it matches a key for the device.
+                        current_keys = keys.values()
+                elif device_id:
+                    # We don't have any keys for the device ID.
+                    current_keys = []
+                else:
+                    # The event didn't include a device ID, so we just look for
+                    # keys across all devices.
+                    current_keys = (
+                        key
+                        for device in cached_devices
+                        for key in device.get("keys", {}).get("keys", {}).values()
                     )
 
-                    # Immediately attempt a resync in the background
-                    if self.config.worker_app:
-                        return run_in_background(self._user_device_resync, event.sender)
-                    else:
-                        return run_in_background(
-                            self._device_list_updater.user_device_resync, event.sender
-                        )
+                # We now check that the sender key matches (one of) the expected
+                # keys.
+                if sender_key not in current_keys:
+                    logger.info(
+                        "Received event from remote device with unexpected sender key: %s %s: %s",
+                        event.sender,
+                        device_id or "",
+                        sender_key,
+                    )
+                    resync = True
+
+            if resync:
+                await self.store.mark_remote_user_device_cache_as_stale(event.sender)
+
+                # Immediately attempt a resync in the background
+                if self.config.worker_app:
+                    return run_in_background(self._user_device_resync, event.sender)
+                else:
+                    return run_in_background(
+                        self._device_list_updater.user_device_resync, event.sender
+                    )
 
     @log_function
     async def backfill(self, dest, room_id, limit, extremities):
-- 
cgit 1.4.1


From ff70ec0a00bd65d819ce42fdb46d67160c197202 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 31 Jan 2020 15:30:02 +0000
Subject: Newsfile

---
 changelog.d/6823.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6823.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6823.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
-- 
cgit 1.4.1


From 39c2d26e0b80ee0cd5589fc577327f2d3d80a446 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Wed, 5 Feb 2020 12:41:33 -0500
Subject: Add quotes around pip install target (my shell complained without
 them).

---
 README.rst            | 2 +-
 changelog.d/6855.misc | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6855.misc

(limited to 'changelog.d')

diff --git a/README.rst b/README.rst
index 2691dfc23d..4db7d17e94 100644
--- a/README.rst
+++ b/README.rst
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
 
     virtualenv -p python3 env
     source env/bin/activate
-    python -m pip install --no-use-pep517 -e .[all]
+    python -m pip install --no-use-pep517 -e ".[all]"
 
 This will run a process of downloading and installing all the needed
 dependencies into a virtual env.
diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc
new file mode 100644
index 0000000000..904361ddfb
--- /dev/null
+++ b/changelog.d/6855.misc
@@ -0,0 +1 @@
+Update pip install directiosn in readme to avoid error when using zsh.
-- 
cgit 1.4.1


From 6a7e90ad782bddce95fa0c7d93e56291aa31c33d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 6 Feb 2020 10:40:08 +0000
Subject: 1.10.0rc2

---
 CHANGES.md              | 16 ++++++++++++++++
 changelog.d/6844.bugfix |  1 -
 changelog.d/6848.bugfix |  1 -
 changelog.d/6850.misc   |  1 -
 synapse/__init__.py     |  2 +-
 5 files changed, 17 insertions(+), 4 deletions(-)
 delete mode 100644 changelog.d/6844.bugfix
 delete mode 100644 changelog.d/6848.bugfix
 delete mode 100644 changelog.d/6850.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index ab6fce3e7d..ee0e5d25e4 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,19 @@
+Synapse 1.10.0rc2 (2020-02-06)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844))
+- Fix detecting unknown devices from remote encrypted events. ([\#6848](https://github.com/matrix-org/synapse/issues/6848))
+
+
+Internal Changes
+----------------
+
+- Detect unexpected sender keys on inbound encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850))
+
+
 Synapse 1.10.0rc1 (2020-01-31)
 ==============================
 
diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix
deleted file mode 100644
index e84aa1029f..0000000000
--- a/changelog.d/6844.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix an issue with cross-signing where device signatures were not sent to remote servers.
diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix
deleted file mode 100644
index 65688e5d57..0000000000
--- a/changelog.d/6848.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix detecting unknown devices from remote encrypted events.
diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc
deleted file mode 100644
index 418569113f..0000000000
--- a/changelog.d/6850.misc
+++ /dev/null
@@ -1 +0,0 @@
-Detect unexpected sender keys on inbound encrypted events and resync device lists.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index bd942d3e1c..4f1859bd57 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.10.0rc1"
+__version__ = "1.10.0rc2"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From ed630ea17c40d328cc0796e35d37287768c7140d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 6 Feb 2020 13:31:05 +0000
Subject: Reduce amount of logging at INFO level. (#6862)

A lot of the things we log at INFO are now a bit superfluous, so lets
make them DEBUG logs to reduce the amount we log by default.

Co-Authored-By: Brendan Abolivier 
Co-authored-by: Brendan Abolivier 
---
 changelog.d/6862.misc                              |  1 +
 synapse/federation/federation_server.py            |  6 +++---
 synapse/federation/transport/server.py             |  2 +-
 synapse/handlers/room.py                           | 10 +++++-----
 synapse/handlers/stats.py                          |  2 +-
 synapse/handlers/sync.py                           |  6 +++---
 synapse/handlers/user_directory.py                 |  4 ++--
 synapse/http/site.py                               |  2 +-
 synapse/push/httppusher.py                         |  2 +-
 synapse/storage/data_stores/main/user_directory.py |  4 ++--
 synapse/storage/persist_events.py                  |  2 +-
 synapse/util/caches/response_cache.py              |  2 +-
 12 files changed, 22 insertions(+), 21 deletions(-)
 create mode 100644 changelog.d/6862.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc
new file mode 100644
index 0000000000..83626d2939
--- /dev/null
+++ b/changelog.d/6862.misc
@@ -0,0 +1 @@
+Reduce amount we log at `INFO` level.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d92d5e8064..8e3933b6c5 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -573,7 +573,7 @@ class FederationServer(FederationBase):
             origin_host, _ = parse_server_name(origin)
             await self.check_server_matches_acl(origin_host, room_id)
 
-            logger.info(
+            logger.debug(
                 "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                 " limit: %d",
                 earliest_events,
@@ -586,11 +586,11 @@ class FederationServer(FederationBase):
             )
 
             if len(missing_events) < 5:
-                logger.info(
+                logger.debug(
                     "Returning %d events: %r", len(missing_events), missing_events
                 )
             else:
-                logger.info("Returning %d events", len(missing_events))
+                logger.debug("Returning %d events", len(missing_events))
 
             time_now = self._clock.time_msec()
 
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index ae48ba8157..92a9ae2320 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -158,7 +158,7 @@ class Authenticator(object):
             origin, json_request, now, "Incoming request"
         )
 
-        logger.info("Request from %s", origin)
+        logger.debug("Request from %s", origin)
         request.authenticated_entity = origin
 
         # If we get a valid signed request from the other side, its probably
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index b609a65f47..559e3399b8 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -259,7 +259,7 @@ class RoomCreationHandler(BaseHandler):
         for v in ("invite", "events_default"):
             current = int(pl_content.get(v, 0))
             if current < restricted_level:
-                logger.info(
+                logger.debug(
                     "Setting level for %s in %s to %i (was %i)",
                     v,
                     old_room_id,
@@ -269,7 +269,7 @@ class RoomCreationHandler(BaseHandler):
                 pl_content[v] = restricted_level
                 updated = True
             else:
-                logger.info("Not setting level for %s (already %i)", v, current)
+                logger.debug("Not setting level for %s (already %i)", v, current)
 
         if updated:
             try:
@@ -296,7 +296,7 @@ class RoomCreationHandler(BaseHandler):
             EventTypes.Aliases, events_default
         )
 
-        logger.info("Setting correct PLs in new room to %s", new_pl_content)
+        logger.debug("Setting correct PLs in new room to %s", new_pl_content)
         yield self.event_creation_handler.create_and_send_nonmember_event(
             requester,
             {
@@ -782,7 +782,7 @@ class RoomCreationHandler(BaseHandler):
         @defer.inlineCallbacks
         def send(etype, content, **kwargs):
             event = create(etype, content, **kwargs)
-            logger.info("Sending %s in new room", etype)
+            logger.debug("Sending %s in new room", etype)
             yield self.event_creation_handler.create_and_send_nonmember_event(
                 creator, event, ratelimit=False
             )
@@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler):
         creation_content.update({"creator": creator_id})
         yield send(etype=EventTypes.Create, content=creation_content)
 
-        logger.info("Sending %s in new room", EventTypes.Member)
+        logger.debug("Sending %s in new room", EventTypes.Member)
         yield self.room_member_handler.update_membership(
             creator,
             creator.user,
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 68e6edace5..d93a276693 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
                 room_state["guest_access"] = event_content.get("guest_access")
 
         for room_id, state in room_to_state_updates.items():
-            logger.info("Updating room_stats_state for %s: %s", room_id, state)
+            logger.debug("Updating room_stats_state for %s: %s", room_id, state)
             yield self.store.update_room_state(room_id, state)
 
         return room_to_stats_deltas, user_to_stats_deltas
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 5f060241b4..f8d60d32ba 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -968,7 +968,7 @@ class SyncHandler(object):
         # Always use the `now_token` in `SyncResultBuilder`
         now_token = await self.event_sources.get_current_token()
 
-        logger.info(
+        logger.debug(
             "Calculating sync response for %r between %s and %s",
             sync_config.user,
             since_token,
@@ -1498,7 +1498,7 @@ class SyncHandler(object):
         room_entries = []
         invited = []
         for room_id, events in iteritems(mem_change_events_by_room_id):
-            logger.info(
+            logger.debug(
                 "Membership changes in %s: [%s]",
                 room_id,
                 ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
@@ -1892,7 +1892,7 @@ class SyncHandler(object):
 
             if batch.limited and since_token:
                 user_id = sync_result_builder.sync_config.user.to_string()
-                logger.info(
+                logger.debug(
                     "Incremental gappy sync of %s for user %s with %d state events"
                     % (room_id, user_id, len(state))
                 )
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 624f05ab5b..81aa58dc8c 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler):
                     self.pos, room_max_stream_ordering
                 )
 
-                logger.info("Handling %d state deltas", len(deltas))
+                logger.debug("Handling %d state deltas", len(deltas))
                 yield self._handle_deltas(deltas)
 
                 self.pos = max_pos
@@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler):
                         room_id, self.server_name
                     )
                     if not is_in_room:
-                        logger.info("Server left room: %r", room_id)
+                        logger.debug("Server left room: %r", room_id)
                         # Fetch all the users that we marked as being in user
                         # directory due to being in the room and then check if
                         # need to remove those users or not
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 911251c0bc..e092193c9c 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -225,7 +225,7 @@ class SynapseRequest(Request):
             self.start_time, name=servlet_name, method=self.get_method()
         )
 
-        self.site.access_logger.info(
+        self.site.access_logger.debug(
             "%s - %s - Received request: %s %s",
             self.getClientIP(),
             self.site.site_tag,
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index d0879b0490..5bb17d1228 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -398,7 +398,7 @@ class HttpPusher(object):
         Args:
             badge (int): number of unread messages
         """
-        logger.info("Sending updated badge count %d to %s", badge, self.name)
+        logger.debug("Sending updated badge count %d to %s", badge, self.name)
         d = {
             "notification": {
                 "id": "",
diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py
index 90c180ec6d..6b8130bf0f 100644
--- a/synapse/storage/data_stores/main/user_directory.py
+++ b/synapse/storage/data_stores/main/user_directory.py
@@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             )
             return 1
 
-        logger.info(
+        logger.debug(
             "Processing the next %d rooms of %d remaining"
             % (len(rooms_to_work_on), progress["remaining"])
         )
@@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             )
             return 1
 
-        logger.info(
+        logger.debug(
             "Processing the next %d users of %d remaining"
             % (len(users_to_work_on), progress["remaining"])
         )
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index af3fd67ab9..a5370ed527 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -390,7 +390,7 @@ class EventsPersistenceStorage(object):
                                     state_delta_reuse_delta_counter.inc()
                                     break
 
-                        logger.info("Calculating state delta for room %s", room_id)
+                        logger.debug("Calculating state delta for room %s", room_id)
                         with Measure(
                             self._clock, "persist_events.get_new_state_after_events"
                         ):
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 82d3eefe0e..b68f9fe0d4 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -144,7 +144,7 @@ class ResponseCache(object):
         """
         result = self.get(key)
         if not result:
-            logger.info(
+            logger.debug(
                 "[%s]: no cached result for [%s], calculating new one", self._name, key
             )
             d = run_in_background(callback, *args, **kwargs)
-- 
cgit 1.4.1


From 99fcc96289f673f96f2d180a84df84f6b8a85521 Mon Sep 17 00:00:00 2001
From: PeerD 
Date: Thu, 6 Feb 2020 15:15:29 +0100
Subject: Third party event rules Update (#6781)

---
 changelog.d/6781.bugfix             | 1 +
 synapse/events/third_party_rules.py | 7 ++++---
 synapse/handlers/room.py            | 6 +++++-
 3 files changed, 10 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6781.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix
new file mode 100644
index 0000000000..47cd671bff
--- /dev/null
+++ b/changelog.d/6781.bugfix
@@ -0,0 +1 @@
+Fixed third party event rules function `on_create_room`'s return value being ignored.
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 86f7e5f8aa..459132d388 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -74,15 +74,16 @@ class ThirdPartyEventRules(object):
             is_requester_admin (bool): If the requester is an admin
 
         Returns:
-            defer.Deferred
+            defer.Deferred[bool]: Whether room creation is allowed or denied.
         """
 
         if self.third_party_rules is None:
-            return
+            return True
 
-        yield self.third_party_rules.on_create_room(
+        ret = yield self.third_party_rules.on_create_room(
             requester, config, is_requester_admin
         )
+        return ret
 
     @defer.inlineCallbacks
     def check_threepid_can_be_invited(self, medium, address, room_id):
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 559e3399b8..ab07edd2fc 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -579,9 +579,13 @@ class RoomCreationHandler(BaseHandler):
 
         # Check whether the third party rules allows/changes the room create
         # request.
-        yield self.third_party_event_rules.on_create_room(
+        event_allowed = yield self.third_party_event_rules.on_create_room(
             requester, config, is_requester_admin=is_requester_admin
         )
+        if not event_allowed:
+            raise SynapseError(
+                403, "You are not permitted to create rooms", Codes.FORBIDDEN
+            )
 
         if not is_requester_admin and not self.spam_checker.user_may_create_room(
             user_id
-- 
cgit 1.4.1


From bce557175bad82889d303b349e6575636c41b702 Mon Sep 17 00:00:00 2001
From: timfi 
Date: Thu, 6 Feb 2020 15:45:01 +0100
Subject: Allow empty federation_certificate_verification_whitelist (#6849)

---
 changelog.d/6849.bugfix | 1 +
 synapse/config/tls.py   | 2 ++
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6849.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix
new file mode 100644
index 0000000000..d928a26ec6
--- /dev/null
+++ b/changelog.d/6849.bugfix
@@ -0,0 +1 @@
+Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank.
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 2e9e478a2a..2514b0713d 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -109,6 +109,8 @@ class TlsConfig(Config):
         fed_whitelist_entries = config.get(
             "federation_certificate_verification_whitelist", []
         )
+        if fed_whitelist_entries is None:
+            fed_whitelist_entries = []
 
         # Support globs (*) in whitelist values
         self.federation_certificate_verification_whitelist = []  # type: List[str]
-- 
cgit 1.4.1


From b0c8bdd49dd416ed066b12daee95cf5f4828f03b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 6 Feb 2020 15:50:39 +0000
Subject: pass room version into FederationClient.send_join (#6854)

... which allows us to sanity-check the create event.
---
 changelog.d/6854.misc                   |  1 +
 synapse/federation/federation_client.py | 60 ++++++++++++++++++---------------
 synapse/handlers/federation.py          |  3 +-
 3 files changed, 34 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/6854.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6854.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 110e42b9ed..5fb4bd414c 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -516,7 +516,7 @@ class FederationClient(FederationBase):
         )
 
     async def send_join(
-        self, destinations: Iterable[str], pdu: EventBase, event_format_version: int
+        self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
     ) -> Dict[str, Any]:
         """Sends a join event to one of a list of homeservers.
 
@@ -527,7 +527,8 @@ class FederationClient(FederationBase):
             destinations: Candidate homeservers which are probably
                 participating in the room.
             pdu: event to be sent
-            event_format_version: The event format version
+            room_version: the version of the room (according to the server that
+                did the make_join)
 
         Returns:
             a dict with members ``origin`` (a string
@@ -540,58 +541,51 @@ class FederationClient(FederationBase):
             RuntimeError: if no servers were reachable.
         """
 
-        def check_authchain_validity(signed_auth_chain):
-            for e in signed_auth_chain:
-                if e.type == EventTypes.Create:
-                    create_event = e
-                    break
-            else:
-                raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,))
-
-            # the room version should be sane.
-            room_version = create_event.content.get("room_version", "1")
-            if room_version not in KNOWN_ROOM_VERSIONS:
-                # This shouldn't be possible, because the remote server should have
-                # rejected the join attempt during make_join.
-                raise InvalidResponseError(
-                    "room appears to have unsupported version %s" % (room_version,)
-                )
-
         async def send_request(destination) -> Dict[str, Any]:
             content = await self._do_send_join(destination, pdu)
 
             logger.debug("Got content: %s", content)
 
             state = [
-                event_from_pdu_json(p, event_format_version, outlier=True)
+                event_from_pdu_json(p, room_version.event_format, outlier=True)
                 for p in content.get("state", [])
             ]
 
             auth_chain = [
-                event_from_pdu_json(p, event_format_version, outlier=True)
+                event_from_pdu_json(p, room_version.event_format, outlier=True)
                 for p in content.get("auth_chain", [])
             ]
 
             pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
 
-            room_version = None
+            create_event = None
             for e in state:
                 if (e.type, e.state_key) == (EventTypes.Create, ""):
-                    room_version = e.content.get(
-                        "room_version", RoomVersions.V1.identifier
-                    )
+                    create_event = e
                     break
 
-            if room_version is None:
+            if create_event is None:
                 # If the state doesn't have a create event then the room is
                 # invalid, and it would fail auth checks anyway.
                 raise SynapseError(400, "No create event in state")
 
+            # the room version should be sane.
+            create_room_version = create_event.content.get(
+                "room_version", RoomVersions.V1.identifier
+            )
+            if create_room_version != room_version.identifier:
+                # either the server that fulfilled the make_join, or the server that is
+                # handling the send_join, is lying.
+                raise InvalidResponseError(
+                    "Unexpected room version %s in create event"
+                    % (create_room_version,)
+                )
+
             valid_pdus = await self._check_sigs_and_hash_and_fetch(
                 destination,
                 list(pdus.values()),
                 outlier=True,
-                room_version=room_version,
+                room_version=room_version.identifier,
             )
 
             valid_pdus_map = {p.event_id: p for p in valid_pdus}
@@ -615,7 +609,17 @@ class FederationClient(FederationBase):
             for s in signed_state:
                 s.internal_metadata = copy.deepcopy(s.internal_metadata)
 
-            check_authchain_validity(signed_auth)
+            # double-check that the same create event has ended up in the auth chain
+            auth_chain_create_events = [
+                e.event_id
+                for e in signed_auth
+                if (e.type, e.state_key) == (EventTypes.Create, "")
+            ]
+            if auth_chain_create_events != [create_event.event_id]:
+                raise InvalidResponseError(
+                    "Unexpected create event(s) in auth chain"
+                    % (auth_chain_create_events,)
+                )
 
             return {
                 "state": signed_state,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index ef3cc264b7..10e8b6ea4c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1305,9 +1305,8 @@ class FederationHandler(BaseHandler):
             except ValueError:
                 pass
 
-            event_format_version = room_version_obj.event_format
             ret = await self.federation_client.send_join(
-                target_hosts, event, event_format_version
+                target_hosts, event, room_version_obj
             )
 
             origin = ret["origin"]
-- 
cgit 1.4.1


From 928edef9793bf10fa6156a42c4babbfaaaa17f88 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 31 Jan 2020 16:50:13 +0000
Subject: Pass room_version into `event_from_pdu_json`

It's called from all over the shop, so this one's a bit messy.
---
 changelog.d/6856.misc                   |  1 +
 synapse/federation/federation_base.py   | 28 ++++++++++++----------
 synapse/federation/federation_client.py | 35 +++++++++++++---------------
 synapse/federation/federation_server.py | 41 +++++++++++----------------------
 tests/handlers/test_federation.py       |  6 +++--
 5 files changed, 51 insertions(+), 60 deletions(-)
 create mode 100644 changelog.d/6856.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6856.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 0e22183280..ebe8b8e9fe 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList
 
 from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.api.room_versions import (
+    KNOWN_ROOM_VERSIONS,
+    EventFormatVersions,
+    RoomVersion,
+)
 from synapse.crypto.event_signing import check_event_content_hash
-from synapse.events import event_type_from_format_version
+from synapse.events import EventBase, event_type_from_format_version
 from synapse.events.utils import prune_event
 from synapse.http.servlet import assert_params_in_dict
 from synapse.logging.context import (
@@ -33,7 +38,7 @@ from synapse.logging.context import (
     make_deferred_yieldable,
     preserve_fn,
 )
-from synapse.types import get_domain_from_id
+from synapse.types import JsonDict, get_domain_from_id
 from synapse.util import unwrapFirstError
 
 logger = logging.getLogger(__name__)
@@ -342,16 +347,15 @@ def _is_invite_via_3pid(event):
     )
 
 
-def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
-    """Construct a FrozenEvent from an event json received over federation
+def event_from_pdu_json(
+    pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
+) -> EventBase:
+    """Construct an EventBase from an event json received over federation
 
     Args:
-        pdu_json (object): pdu as received over federation
-        event_format_version (int): The event format version
-        outlier (bool): True to mark this event as an outlier
-
-    Returns:
-        FrozenEvent
+        pdu_json: pdu as received over federation
+        room_version: The version of the room this event belongs to
+        outlier: True to mark this event as an outlier
 
     Raises:
         SynapseError: if the pdu is missing required fields or is otherwise
@@ -370,7 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
     elif depth > MAX_DEPTH:
         raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
 
-    event = event_type_from_format_version(event_format_version)(pdu_json)
+    event = event_type_from_format_version(room_version.event_format)(pdu_json)
 
     event.internal_metadata.outlier = outlier
 
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 5fb4bd414c..4870e39652 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -49,7 +49,7 @@ from synapse.api.room_versions import (
     RoomVersion,
     RoomVersions,
 )
-from synapse.events import EventBase, builder, room_version_to_event_format
+from synapse.events import EventBase, builder
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.logging.context import make_deferred_yieldable
 from synapse.logging.utils import log_function
@@ -209,18 +209,18 @@ class FederationClient(FederationBase):
 
         logger.debug("backfill transaction_data=%r", transaction_data)
 
-        room_version = await self.store.get_room_version_id(room_id)
-        format_ver = room_version_to_event_format(room_version)
+        room_version = await self.store.get_room_version(room_id)
 
         pdus = [
-            event_from_pdu_json(p, format_ver, outlier=False)
+            event_from_pdu_json(p, room_version, outlier=False)
             for p in transaction_data["pdus"]
         ]
 
         # FIXME: We should handle signature failures more gracefully.
         pdus[:] = await make_deferred_yieldable(
             defer.gatherResults(
-                self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
+                self._check_sigs_and_hashes(room_version.identifier, pdus),
+                consumeErrors=True,
             ).addErrback(unwrapFirstError)
         )
 
@@ -262,8 +262,6 @@ class FederationClient(FederationBase):
 
         pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
 
-        format_ver = room_version.event_format
-
         signed_pdu = None
         for destination in destinations:
             now = self._clock.time_msec()
@@ -284,7 +282,7 @@ class FederationClient(FederationBase):
                 )
 
                 pdu_list = [
-                    event_from_pdu_json(p, format_ver, outlier=outlier)
+                    event_from_pdu_json(p, room_version, outlier=outlier)
                     for p in transaction_data["pdus"]
                 ]
 
@@ -350,15 +348,15 @@ class FederationClient(FederationBase):
     async def get_event_auth(self, destination, room_id, event_id):
         res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
 
-        room_version = await self.store.get_room_version_id(room_id)
-        format_ver = room_version_to_event_format(room_version)
+        room_version = await self.store.get_room_version(room_id)
 
         auth_chain = [
-            event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"]
+            event_from_pdu_json(p, room_version, outlier=True)
+            for p in res["auth_chain"]
         ]
 
         signed_auth = await self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True, room_version=room_version
+            destination, auth_chain, outlier=True, room_version=room_version.identifier
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -547,12 +545,12 @@ class FederationClient(FederationBase):
             logger.debug("Got content: %s", content)
 
             state = [
-                event_from_pdu_json(p, room_version.event_format, outlier=True)
+                event_from_pdu_json(p, room_version, outlier=True)
                 for p in content.get("state", [])
             ]
 
             auth_chain = [
-                event_from_pdu_json(p, room_version.event_format, outlier=True)
+                event_from_pdu_json(p, room_version, outlier=True)
                 for p in content.get("auth_chain", [])
             ]
 
@@ -677,7 +675,7 @@ class FederationClient(FederationBase):
 
         logger.debug("Got response to send_invite: %s", pdu_dict)
 
-        pdu = event_from_pdu_json(pdu_dict, room_version.event_format)
+        pdu = event_from_pdu_json(pdu_dict, room_version)
 
         # Check signatures are correct.
         pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
@@ -865,15 +863,14 @@ class FederationClient(FederationBase):
                 timeout=timeout,
             )
 
-            room_version = await self.store.get_room_version_id(room_id)
-            format_ver = room_version_to_event_format(room_version)
+            room_version = await self.store.get_room_version(room_id)
 
             events = [
-                event_from_pdu_json(e, format_ver) for e in content.get("events", [])
+                event_from_pdu_json(e, room_version) for e in content.get("events", [])
             ]
 
             signed_events = await self._check_sigs_and_hash_and_fetch(
-                destination, events, outlier=False, room_version=room_version
+                destination, events, outlier=False, room_version=room_version.identifier
             )
         except HttpResponseException as e:
             if not e.code == 400:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 8e3933b6c5..2489832a11 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -38,7 +38,6 @@ from synapse.api.errors import (
     UnsupportedRoomVersionError,
 )
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.events import room_version_to_event_format
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
 from synapse.federation.persistence import TransactionActions
 from synapse.federation.units import Edu, Transaction
@@ -234,24 +233,17 @@ class FederationServer(FederationBase):
                 continue
 
             try:
-                room_version = await self.store.get_room_version_id(room_id)
+                room_version = await self.store.get_room_version(room_id)
             except NotFoundError:
                 logger.info("Ignoring PDU for unknown room_id: %s", room_id)
                 continue
-
-            try:
-                format_ver = room_version_to_event_format(room_version)
-            except UnsupportedRoomVersionError:
+            except UnsupportedRoomVersionError as e:
                 # this can happen if support for a given room version is withdrawn,
                 # so that we still get events for said room.
-                logger.info(
-                    "Ignoring PDU for room %s with unknown version %s",
-                    room_id,
-                    room_version,
-                )
+                logger.info("Ignoring PDU: %s", e)
                 continue
 
-            event = event_from_pdu_json(p, format_ver)
+            event = event_from_pdu_json(p, room_version)
             pdus_by_room.setdefault(room_id, []).append(event)
 
         pdu_results = {}
@@ -407,9 +399,7 @@ class FederationServer(FederationBase):
                 Codes.UNSUPPORTED_ROOM_VERSION,
             )
 
-        format_ver = room_version.event_format
-
-        pdu = event_from_pdu_json(content, format_ver)
+        pdu = event_from_pdu_json(content, room_version)
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
         pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
@@ -420,16 +410,15 @@ class FederationServer(FederationBase):
     async def on_send_join_request(self, origin, content, room_id):
         logger.debug("on_send_join_request: content: %s", content)
 
-        room_version = await self.store.get_room_version_id(room_id)
-        format_ver = room_version_to_event_format(room_version)
-        pdu = event_from_pdu_json(content, format_ver)
+        room_version = await self.store.get_room_version(room_id)
+        pdu = event_from_pdu_json(content, room_version)
 
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
 
         logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
 
-        pdu = await self._check_sigs_and_hash(room_version, pdu)
+        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
 
         res_pdus = await self.handler.on_send_join_request(origin, pdu)
         time_now = self._clock.time_msec()
@@ -451,16 +440,15 @@ class FederationServer(FederationBase):
     async def on_send_leave_request(self, origin, content, room_id):
         logger.debug("on_send_leave_request: content: %s", content)
 
-        room_version = await self.store.get_room_version_id(room_id)
-        format_ver = room_version_to_event_format(room_version)
-        pdu = event_from_pdu_json(content, format_ver)
+        room_version = await self.store.get_room_version(room_id)
+        pdu = event_from_pdu_json(content, room_version)
 
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
 
         logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
 
-        pdu = await self._check_sigs_and_hash(room_version, pdu)
+        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
 
         await self.handler.on_send_leave_request(origin, pdu)
         return {}
@@ -498,15 +486,14 @@ class FederationServer(FederationBase):
             origin_host, _ = parse_server_name(origin)
             await self.check_server_matches_acl(origin_host, room_id)
 
-            room_version = await self.store.get_room_version_id(room_id)
-            format_ver = room_version_to_event_format(room_version)
+            room_version = await self.store.get_room_version(room_id)
 
             auth_chain = [
-                event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
+                event_from_pdu_json(e, room_version) for e in content["auth_chain"]
             ]
 
             signed_auth = await self._check_sigs_and_hash_and_fetch(
-                origin, auth_chain, outlier=True, room_version=room_version
+                origin, auth_chain, outlier=True, room_version=room_version.identifier
             )
 
             ret = await self.handler.on_query_auth(
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index b4d92cf732..132e35651d 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -99,6 +99,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         user_id = self.register_user("kermit", "test")
         tok = self.login("kermit", "test")
         room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+        room_version = self.get_success(self.store.get_room_version(room_id))
 
         # pretend that another server has joined
         join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
@@ -120,7 +121,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 "auth_events": [],
                 "origin_server_ts": self.clock.time_msec(),
             },
-            join_event.format_version,
+            room_version,
         )
 
         with LoggingContext(request="send_rejected"):
@@ -149,6 +150,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         user_id = self.register_user("kermit", "test")
         tok = self.login("kermit", "test")
         room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+        room_version = self.get_success(self.store.get_room_version(room_id))
 
         # pretend that another server has joined
         join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
@@ -171,7 +173,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 "auth_events": [],
                 "origin_server_ts": self.clock.time_msec(),
             },
-            join_event.format_version,
+            room_version,
         )
 
         with LoggingContext(request="send_rejected"):
-- 
cgit 1.4.1


From 7765bf398996002ee461904915de9d8bc2ea951a Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 6 Feb 2020 13:25:24 -0500
Subject: Limit the number of events that can be requested when backfilling
 events (#6864)

Limit the maximum number of events requested when backfilling events.
---
 changelog.d/6864.misc          | 1 +
 synapse/handlers/federation.py | 4 ++++
 2 files changed, 5 insertions(+)
 create mode 100644 changelog.d/6864.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc
new file mode 100644
index 0000000000..d24eb68460
--- /dev/null
+++ b/changelog.d/6864.misc
@@ -0,0 +1 @@
+Limit the number of events that can be requested by the backfill federation API to 100.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 10e8b6ea4c..eb20ef4aec 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1788,6 +1788,9 @@ class FederationHandler(BaseHandler):
         if not in_room:
             raise AuthError(403, "Host not in room.")
 
+        # Synapse asks for 100 events per backfill request. Do not allow more.
+        limit = min(limit, 100)
+
         events = yield self.store.get_backfill_events(room_id, pdu_list, limit)
 
         events = yield filter_events_for_server(self.storage, origin, events)
@@ -2168,6 +2171,7 @@ class FederationHandler(BaseHandler):
         if not in_room:
             raise AuthError(403, "Host not in room.")
 
+        # Only allow up to 20 events to be retrieved per request.
         limit = min(limit, 20)
 
         missing_events = await self.store.get_missing_events(
-- 
cgit 1.4.1


From f4884444c36d92659b9d7a2a90d42324ab786873 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 7 Feb 2020 09:26:57 +0000
Subject: remove unused room_version_to_event_format (#6857)

---
 changelog.d/6857.misc      |  1 +
 synapse/events/__init__.py | 24 +-----------------------
 2 files changed, 2 insertions(+), 23 deletions(-)
 create mode 100644 changelog.d/6857.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6857.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 92f76703b3..89d41d82b6 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -21,8 +21,7 @@ import six
 
 from unpaddedbase64 import encode_base64
 
-from synapse.api.errors import UnsupportedRoomVersionError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.api.room_versions import EventFormatVersions
 from synapse.types import JsonDict
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
@@ -408,27 +407,6 @@ class FrozenEventV3(FrozenEventV2):
         return self._event_id
 
 
-def room_version_to_event_format(room_version):
-    """Converts a room version string to the event format
-
-    Args:
-        room_version (str)
-
-    Returns:
-        int
-
-    Raises:
-        UnsupportedRoomVersionError if the room version is unknown
-    """
-    v = KNOWN_ROOM_VERSIONS.get(room_version)
-
-    if not v:
-        # this can happen if support is withdrawn for a room version
-        raise UnsupportedRoomVersionError()
-
-    return v.event_format
-
-
 def event_type_from_format_version(format_version):
     """Returns the python type to use to construct an Event object for the
     given event format version.
-- 
cgit 1.4.1


From 56ca93ef5941b5dfcda368f373a6bcd80d177acd Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 7 Feb 2020 11:29:36 +0100
Subject: Admin api to add an email address (#6789)

---
 changelog.d/6769.feature          |  1 +
 docs/admin_api/user_admin_api.rst | 11 +++++++++++
 synapse/handlers/admin.py         |  2 ++
 synapse/handlers/auth.py          |  8 ++++++++
 synapse/rest/admin/users.py       | 39 +++++++++++++++++++++++++++++++++++++++
 tests/rest/admin/test_user.py     | 19 +++++++++++++++++--
 6 files changed, 78 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6769.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature
new file mode 100644
index 0000000000..8a60e12907
--- /dev/null
+++ b/changelog.d/6769.feature
@@ -0,0 +1 @@
+Admin API to add or modify threepids of user accounts.
\ No newline at end of file
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 0b3d09d694..eb146095de 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -15,6 +15,16 @@ with a body of:
     {
         "password": "user_password",
         "displayname": "User",
+        "threepids": [
+            {
+                "medium": "email",
+                "address": ""
+            },
+            {
+                "medium": "email",
+                "address": ""
+            }
+        ],
         "avatar_url": "",
         "admin": false,
         "deactivated": false
@@ -23,6 +33,7 @@ with a body of:
 including an ``access_token`` of a server admin.
 
 The parameter ``displayname`` is optional and defaults to ``user_id``.
+The parameter ``threepids`` is optional.
 The parameter ``avatar_url`` is optional.
 The parameter ``admin`` is optional and defaults to 'false'.
 The parameter ``deactivated`` is optional and defaults to 'false'.
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 9205865231..f3c0aeceb6 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -58,8 +58,10 @@ class AdminHandler(BaseHandler):
         ret = await self.store.get_user_by_id(user.to_string())
         if ret:
             profile = await self.store.get_profileinfo(user.localpart)
+            threepids = await self.store.user_get_threepids(user.to_string())
             ret["displayname"] = profile.display_name
             ret["avatar_url"] = profile.avatar_url
+            ret["threepids"] = threepids
         return ret
 
     async def export_user_data(self, user_id, writer):
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 54a71c49d2..48a88d3c2a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -816,6 +816,14 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def add_threepid(self, user_id, medium, address, validated_at):
+        # check if medium has a valid value
+        if medium not in ["email", "msisdn"]:
+            raise SynapseError(
+                code=400,
+                msg=("'%s' is not a valid value for 'medium'" % (medium,)),
+                errcode=Codes.INVALID_PARAM,
+            )
+
         # 'Canonicalise' email addresses down to lower case.
         # We've now moving towards the homeserver being the entity that
         # is responsible for validating threepids used for resetting passwords
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index f1c4434f5c..e75c5f1370 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -136,6 +136,8 @@ class UserRestServletV2(RestServlet):
         self.hs = hs
         self.auth = hs.get_auth()
         self.admin_handler = hs.get_handlers().admin_handler
+        self.store = hs.get_datastore()
+        self.auth_handler = hs.get_auth_handler()
         self.profile_handler = hs.get_profile_handler()
         self.set_password_handler = hs.get_set_password_handler()
         self.deactivate_account_handler = hs.get_deactivate_account_handler()
@@ -163,6 +165,7 @@ class UserRestServletV2(RestServlet):
             raise SynapseError(400, "This endpoint can only be used with local users")
 
         user = await self.admin_handler.get_user(target_user)
+        user_id = target_user.to_string()
 
         if user:  # modify user
             if "displayname" in body:
@@ -170,6 +173,29 @@ class UserRestServletV2(RestServlet):
                     target_user, requester, body["displayname"], True
                 )
 
+            if "threepids" in body:
+                # check for required parameters for each threepid
+                for threepid in body["threepids"]:
+                    assert_params_in_dict(threepid, ["medium", "address"])
+
+                # remove old threepids from user
+                threepids = await self.store.user_get_threepids(user_id)
+                for threepid in threepids:
+                    try:
+                        await self.auth_handler.delete_threepid(
+                            user_id, threepid["medium"], threepid["address"], None
+                        )
+                    except Exception:
+                        logger.exception("Failed to remove threepids")
+                        raise SynapseError(500, "Failed to remove threepids")
+
+                # add new threepids to user
+                current_time = self.hs.get_clock().time_msec()
+                for threepid in body["threepids"]:
+                    await self.auth_handler.add_threepid(
+                        user_id, threepid["medium"], threepid["address"], current_time
+                    )
+
             if "avatar_url" in body:
                 await self.profile_handler.set_avatar_url(
                     target_user, requester, body["avatar_url"], True
@@ -221,6 +247,7 @@ class UserRestServletV2(RestServlet):
             admin = body.get("admin", None)
             user_type = body.get("user_type", None)
             displayname = body.get("displayname", None)
+            threepids = body.get("threepids", None)
 
             if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
                 raise SynapseError(400, "Invalid user type")
@@ -232,6 +259,18 @@ class UserRestServletV2(RestServlet):
                 default_display_name=displayname,
                 user_type=user_type,
             )
+
+            if "threepids" in body:
+                # check for required parameters for each threepid
+                for threepid in body["threepids"]:
+                    assert_params_in_dict(threepid, ["medium", "address"])
+
+                current_time = self.hs.get_clock().time_msec()
+                for threepid in body["threepids"]:
+                    await self.auth_handler.add_threepid(
+                        user_id, threepid["medium"], threepid["address"], current_time
+                    )
+
             if "avatar_url" in body:
                 await self.profile_handler.set_avatar_url(
                     user_id, requester, body["avatar_url"], True
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 8f09f51c61..3b5169b38d 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -407,7 +407,13 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         """
         self.hs.config.registration_shared_secret = None
 
-        body = json.dumps({"password": "abc123", "admin": True})
+        body = json.dumps(
+            {
+                "password": "abc123",
+                "admin": True,
+                "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+            }
+        )
 
         # Create user
         request, channel = self.make_request(
@@ -421,6 +427,8 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["name"])
         self.assertEqual("bob", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
 
         # Get user
         request, channel = self.make_request(
@@ -449,7 +457,13 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # Modify user
-        body = json.dumps({"displayname": "foobar", "deactivated": True})
+        body = json.dumps(
+            {
+                "displayname": "foobar",
+                "deactivated": True,
+                "threepids": [{"medium": "email", "address": "bob2@bob.bob"}],
+            }
+        )
 
         request, channel = self.make_request(
             "PUT",
@@ -463,6 +477,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("@bob:test", channel.json_body["name"])
         self.assertEqual("foobar", channel.json_body["displayname"])
         self.assertEqual(True, channel.json_body["deactivated"])
+        # the user is deactivated, the threepid will be deleted
 
         # Get user
         request, channel = self.make_request(
-- 
cgit 1.4.1


From de2d267375069c2d22bceb0d6ef9c6f5a77380e3 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 7 Feb 2020 11:14:19 +0000
Subject: Allow moving group read APIs to workers (#6866)

---
 changelog.d/6866.feature                         |   1 +
 docs/workers.md                                  |   8 +
 synapse/app/client_reader.py                     |   3 +
 synapse/app/federation_reader.py                 |   2 +
 synapse/groups/groups_server.py                  | 377 +++++-----
 synapse/handlers/groups_local.py                 | 270 +++----
 synapse/replication/slave/storage/groups.py      |  14 +-
 synapse/server.py                                |  14 +-
 synapse/storage/data_stores/main/group_server.py | 880 ++++++++++++-----------
 9 files changed, 802 insertions(+), 767 deletions(-)
 create mode 100644 changelog.d/6866.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature
new file mode 100644
index 0000000000..256feab6ff
--- /dev/null
+++ b/changelog.d/6866.feature
@@ -0,0 +1 @@
+Add ability to run some group APIs on workers.
diff --git a/docs/workers.md b/docs/workers.md
index 09a9d8a7b8..82442d6a0a 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -177,8 +177,13 @@ endpoints matching the following regular expressions:
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
     ^/_matrix/federation/v1/send/
+    ^/_matrix/federation/v1/get_groups_publicised$
     ^/_matrix/key/v2/query
 
+Additionally, the following REST endpoints can be handled for GET requests:
+
+    ^/_matrix/federation/v1/groups/
+
 The above endpoints should all be routed to the federation_reader worker by the
 reverse-proxy configuration.
 
@@ -254,10 +259,13 @@ following regular expressions:
     ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
     ^/_matrix/client/versions$
     ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
+    ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
+    ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$
 
 Additionally, the following REST endpoints can be handled for GET requests:
 
     ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
+    ^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
 
 Additionally, the following REST endpoints can be handled, but all requests must
 be routed to the same instance:
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index ca96da6a4a..7fa91a3b11 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import (
     RoomStateRestServlet,
 )
 from synapse.rest.client.v1.voip import VoipRestServlet
+from synapse.rest.client.v2_alpha import groups
 from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
 from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
@@ -124,6 +125,8 @@ class ClientReaderServer(HomeServer):
                     PushRuleRestServlet(self).register(resource)
                     VersionsRestServlet(self).register(resource)
 
+                    groups.register_servlets(self, resource)
+
                     resources.update({"/_matrix/client": resource})
 
         root_resource = create_resource_tree(resources, NoResource())
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 1f1cea1416..5e17ef1396 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -35,6 +35,7 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
 from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.groups import SlavedGroupServerStore
 from synapse.replication.slave.storage.keys import SlavedKeyStore
 from synapse.replication.slave.storage.profile import SlavedProfileStore
 from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
@@ -66,6 +67,7 @@ class FederationReaderSlavedStore(
     SlavedEventStore,
     SlavedKeyStore,
     SlavedRegistrationStore,
+    SlavedGroupServerStore,
     RoomStore,
     DirectoryStore,
     SlavedTransactionStore,
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 0ec9be3cb5..c106abae21 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -36,7 +36,7 @@ logger = logging.getLogger(__name__)
 # TODO: Flairs
 
 
-class GroupsServerHandler(object):
+class GroupsServerWorkerHandler(object):
     def __init__(self, hs):
         self.hs = hs
         self.store = hs.get_datastore()
@@ -51,9 +51,6 @@ class GroupsServerHandler(object):
         self.transport_client = hs.get_federation_transport_client()
         self.profile_handler = hs.get_profile_handler()
 
-        # Ensure attestations get renewed
-        hs.get_groups_attestation_renewer()
-
     @defer.inlineCallbacks
     def check_group_is_ours(
         self, group_id, requester_user_id, and_exists=False, and_is_admin=None
@@ -167,6 +164,197 @@ class GroupsServerHandler(object):
             "user": membership_info,
         }
 
+    @defer.inlineCallbacks
+    def get_group_categories(self, group_id, requester_user_id):
+        """Get all categories in a group (as seen by user)
+        """
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        categories = yield self.store.get_group_categories(group_id=group_id)
+        return {"categories": categories}
+
+    @defer.inlineCallbacks
+    def get_group_category(self, group_id, requester_user_id, category_id):
+        """Get a specific category in a group (as seen by user)
+        """
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        res = yield self.store.get_group_category(
+            group_id=group_id, category_id=category_id
+        )
+
+        logger.info("group %s", res)
+
+        return res
+
+    @defer.inlineCallbacks
+    def get_group_roles(self, group_id, requester_user_id):
+        """Get all roles in a group (as seen by user)
+        """
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        roles = yield self.store.get_group_roles(group_id=group_id)
+        return {"roles": roles}
+
+    @defer.inlineCallbacks
+    def get_group_role(self, group_id, requester_user_id, role_id):
+        """Get a specific role in a group (as seen by user)
+        """
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
+        return res
+
+    @defer.inlineCallbacks
+    def get_group_profile(self, group_id, requester_user_id):
+        """Get the group profile as seen by requester_user_id
+        """
+
+        yield self.check_group_is_ours(group_id, requester_user_id)
+
+        group = yield self.store.get_group(group_id)
+
+        if group:
+            cols = [
+                "name",
+                "short_description",
+                "long_description",
+                "avatar_url",
+                "is_public",
+            ]
+            group_description = {key: group[key] for key in cols}
+            group_description["is_openly_joinable"] = group["join_policy"] == "open"
+
+            return group_description
+        else:
+            raise SynapseError(404, "Unknown group")
+
+    @defer.inlineCallbacks
+    def get_users_in_group(self, group_id, requester_user_id):
+        """Get the users in group as seen by requester_user_id.
+
+        The ordering is arbitrary at the moment
+        """
+
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        is_user_in_group = yield self.store.is_user_in_group(
+            requester_user_id, group_id
+        )
+
+        user_results = yield self.store.get_users_in_group(
+            group_id, include_private=is_user_in_group
+        )
+
+        chunk = []
+        for user_result in user_results:
+            g_user_id = user_result["user_id"]
+            is_public = user_result["is_public"]
+            is_privileged = user_result["is_admin"]
+
+            entry = {"user_id": g_user_id}
+
+            profile = yield self.profile_handler.get_profile_from_cache(g_user_id)
+            entry.update(profile)
+
+            entry["is_public"] = bool(is_public)
+            entry["is_privileged"] = bool(is_privileged)
+
+            if not self.is_mine_id(g_user_id):
+                attestation = yield self.store.get_remote_attestation(
+                    group_id, g_user_id
+                )
+                if not attestation:
+                    continue
+
+                entry["attestation"] = attestation
+            else:
+                entry["attestation"] = self.attestations.create_attestation(
+                    group_id, g_user_id
+                )
+
+            chunk.append(entry)
+
+        # TODO: If admin add lists of users whose attestations have timed out
+
+        return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
+
+    @defer.inlineCallbacks
+    def get_invited_users_in_group(self, group_id, requester_user_id):
+        """Get the users that have been invited to a group as seen by requester_user_id.
+
+        The ordering is arbitrary at the moment
+        """
+
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        is_user_in_group = yield self.store.is_user_in_group(
+            requester_user_id, group_id
+        )
+
+        if not is_user_in_group:
+            raise SynapseError(403, "User not in group")
+
+        invited_users = yield self.store.get_invited_users_in_group(group_id)
+
+        user_profiles = []
+
+        for user_id in invited_users:
+            user_profile = {"user_id": user_id}
+            try:
+                profile = yield self.profile_handler.get_profile_from_cache(user_id)
+                user_profile.update(profile)
+            except Exception as e:
+                logger.warning("Error getting profile for %s: %s", user_id, e)
+            user_profiles.append(user_profile)
+
+        return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
+
+    @defer.inlineCallbacks
+    def get_rooms_in_group(self, group_id, requester_user_id):
+        """Get the rooms in group as seen by requester_user_id
+
+        This returns rooms in order of decreasing number of joined users
+        """
+
+        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+        is_user_in_group = yield self.store.is_user_in_group(
+            requester_user_id, group_id
+        )
+
+        room_results = yield self.store.get_rooms_in_group(
+            group_id, include_private=is_user_in_group
+        )
+
+        chunk = []
+        for room_result in room_results:
+            room_id = room_result["room_id"]
+
+            joined_users = yield self.store.get_users_in_room(room_id)
+            entry = yield self.room_list_handler.generate_room_entry(
+                room_id, len(joined_users), with_alias=False, allow_private=True
+            )
+
+            if not entry:
+                continue
+
+            entry["is_public"] = bool(room_result["is_public"])
+
+            chunk.append(entry)
+
+        chunk.sort(key=lambda e: -e["num_joined_members"])
+
+        return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
+
+
+class GroupsServerHandler(GroupsServerWorkerHandler):
+    def __init__(self, hs):
+        super(GroupsServerHandler, self).__init__(hs)
+
+        # Ensure attestations get renewed
+        hs.get_groups_attestation_renewer()
+
     @defer.inlineCallbacks
     def update_group_summary_room(
         self, group_id, requester_user_id, room_id, category_id, content
@@ -229,27 +417,6 @@ class GroupsServerHandler(object):
 
         return {}
 
-    @defer.inlineCallbacks
-    def get_group_categories(self, group_id, requester_user_id):
-        """Get all categories in a group (as seen by user)
-        """
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        categories = yield self.store.get_group_categories(group_id=group_id)
-        return {"categories": categories}
-
-    @defer.inlineCallbacks
-    def get_group_category(self, group_id, requester_user_id, category_id):
-        """Get a specific category in a group (as seen by user)
-        """
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        res = yield self.store.get_group_category(
-            group_id=group_id, category_id=category_id
-        )
-
-        return res
-
     @defer.inlineCallbacks
     def update_group_category(self, group_id, requester_user_id, category_id, content):
         """Add/Update a group category
@@ -284,24 +451,6 @@ class GroupsServerHandler(object):
 
         return {}
 
-    @defer.inlineCallbacks
-    def get_group_roles(self, group_id, requester_user_id):
-        """Get all roles in a group (as seen by user)
-        """
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        roles = yield self.store.get_group_roles(group_id=group_id)
-        return {"roles": roles}
-
-    @defer.inlineCallbacks
-    def get_group_role(self, group_id, requester_user_id, role_id):
-        """Get a specific role in a group (as seen by user)
-        """
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
-        return res
-
     @defer.inlineCallbacks
     def update_group_role(self, group_id, requester_user_id, role_id, content):
         """Add/update a role in a group
@@ -370,30 +519,6 @@ class GroupsServerHandler(object):
 
         return {}
 
-    @defer.inlineCallbacks
-    def get_group_profile(self, group_id, requester_user_id):
-        """Get the group profile as seen by requester_user_id
-        """
-
-        yield self.check_group_is_ours(group_id, requester_user_id)
-
-        group = yield self.store.get_group(group_id)
-
-        if group:
-            cols = [
-                "name",
-                "short_description",
-                "long_description",
-                "avatar_url",
-                "is_public",
-            ]
-            group_description = {key: group[key] for key in cols}
-            group_description["is_openly_joinable"] = group["join_policy"] == "open"
-
-            return group_description
-        else:
-            raise SynapseError(404, "Unknown group")
-
     @defer.inlineCallbacks
     def update_group_profile(self, group_id, requester_user_id, content):
         """Update the group profile
@@ -412,124 +537,6 @@ class GroupsServerHandler(object):
 
         yield self.store.update_group_profile(group_id, profile)
 
-    @defer.inlineCallbacks
-    def get_users_in_group(self, group_id, requester_user_id):
-        """Get the users in group as seen by requester_user_id.
-
-        The ordering is arbitrary at the moment
-        """
-
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = yield self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        user_results = yield self.store.get_users_in_group(
-            group_id, include_private=is_user_in_group
-        )
-
-        chunk = []
-        for user_result in user_results:
-            g_user_id = user_result["user_id"]
-            is_public = user_result["is_public"]
-            is_privileged = user_result["is_admin"]
-
-            entry = {"user_id": g_user_id}
-
-            profile = yield self.profile_handler.get_profile_from_cache(g_user_id)
-            entry.update(profile)
-
-            entry["is_public"] = bool(is_public)
-            entry["is_privileged"] = bool(is_privileged)
-
-            if not self.is_mine_id(g_user_id):
-                attestation = yield self.store.get_remote_attestation(
-                    group_id, g_user_id
-                )
-                if not attestation:
-                    continue
-
-                entry["attestation"] = attestation
-            else:
-                entry["attestation"] = self.attestations.create_attestation(
-                    group_id, g_user_id
-                )
-
-            chunk.append(entry)
-
-        # TODO: If admin add lists of users whose attestations have timed out
-
-        return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
-
-    @defer.inlineCallbacks
-    def get_invited_users_in_group(self, group_id, requester_user_id):
-        """Get the users that have been invited to a group as seen by requester_user_id.
-
-        The ordering is arbitrary at the moment
-        """
-
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = yield self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        if not is_user_in_group:
-            raise SynapseError(403, "User not in group")
-
-        invited_users = yield self.store.get_invited_users_in_group(group_id)
-
-        user_profiles = []
-
-        for user_id in invited_users:
-            user_profile = {"user_id": user_id}
-            try:
-                profile = yield self.profile_handler.get_profile_from_cache(user_id)
-                user_profile.update(profile)
-            except Exception as e:
-                logger.warning("Error getting profile for %s: %s", user_id, e)
-            user_profiles.append(user_profile)
-
-        return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
-
-    @defer.inlineCallbacks
-    def get_rooms_in_group(self, group_id, requester_user_id):
-        """Get the rooms in group as seen by requester_user_id
-
-        This returns rooms in order of decreasing number of joined users
-        """
-
-        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = yield self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        room_results = yield self.store.get_rooms_in_group(
-            group_id, include_private=is_user_in_group
-        )
-
-        chunk = []
-        for room_result in room_results:
-            room_id = room_result["room_id"]
-
-            joined_users = yield self.store.get_users_in_room(room_id)
-            entry = yield self.room_list_handler.generate_room_entry(
-                room_id, len(joined_users), with_alias=False, allow_private=True
-            )
-
-            if not entry:
-                continue
-
-            entry["is_public"] = bool(room_result["is_public"])
-
-            chunk.append(entry)
-
-        chunk.sort(key=lambda e: -e["num_joined_members"])
-
-        return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
-
     @defer.inlineCallbacks
     def add_room_to_group(self, group_id, requester_user_id, room_id, content):
         """Add room to group
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 319565510f..ad22415782 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -63,7 +63,7 @@ def _create_rerouter(func_name):
     return f
 
 
-class GroupsLocalHandler(object):
+class GroupsLocalWorkerHandler(object):
     def __init__(self, hs):
         self.hs = hs
         self.store = hs.get_datastore()
@@ -81,40 +81,17 @@ class GroupsLocalHandler(object):
 
         self.profile_handler = hs.get_profile_handler()
 
-        # Ensure attestations get renewed
-        hs.get_groups_attestation_renewer()
-
     # The following functions merely route the query to the local groups server
     # or federation depending on if the group is local or remote
 
     get_group_profile = _create_rerouter("get_group_profile")
-    update_group_profile = _create_rerouter("update_group_profile")
     get_rooms_in_group = _create_rerouter("get_rooms_in_group")
-
     get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
-
-    add_room_to_group = _create_rerouter("add_room_to_group")
-    update_room_in_group = _create_rerouter("update_room_in_group")
-    remove_room_from_group = _create_rerouter("remove_room_from_group")
-
-    update_group_summary_room = _create_rerouter("update_group_summary_room")
-    delete_group_summary_room = _create_rerouter("delete_group_summary_room")
-
-    update_group_category = _create_rerouter("update_group_category")
-    delete_group_category = _create_rerouter("delete_group_category")
     get_group_category = _create_rerouter("get_group_category")
     get_group_categories = _create_rerouter("get_group_categories")
-
-    update_group_summary_user = _create_rerouter("update_group_summary_user")
-    delete_group_summary_user = _create_rerouter("delete_group_summary_user")
-
-    update_group_role = _create_rerouter("update_group_role")
-    delete_group_role = _create_rerouter("delete_group_role")
     get_group_role = _create_rerouter("get_group_role")
     get_group_roles = _create_rerouter("get_group_roles")
 
-    set_group_join_policy = _create_rerouter("set_group_join_policy")
-
     @defer.inlineCallbacks
     def get_group_summary(self, group_id, requester_user_id):
         """Get the group summary for a group.
@@ -169,6 +146,144 @@ class GroupsLocalHandler(object):
 
         return res
 
+    @defer.inlineCallbacks
+    def get_users_in_group(self, group_id, requester_user_id):
+        """Get users in a group
+        """
+        if self.is_mine_id(group_id):
+            res = yield self.groups_server_handler.get_users_in_group(
+                group_id, requester_user_id
+            )
+            return res
+
+        group_server_name = get_domain_from_id(group_id)
+
+        try:
+            res = yield self.transport_client.get_users_in_group(
+                get_domain_from_id(group_id), group_id, requester_user_id
+            )
+        except HttpResponseException as e:
+            raise e.to_synapse_error()
+        except RequestSendFailed:
+            raise SynapseError(502, "Failed to contact group server")
+
+        chunk = res["chunk"]
+        valid_entries = []
+        for entry in chunk:
+            g_user_id = entry["user_id"]
+            attestation = entry.pop("attestation", {})
+            try:
+                if get_domain_from_id(g_user_id) != group_server_name:
+                    yield self.attestations.verify_attestation(
+                        attestation,
+                        group_id=group_id,
+                        user_id=g_user_id,
+                        server_name=get_domain_from_id(g_user_id),
+                    )
+                valid_entries.append(entry)
+            except Exception as e:
+                logger.info("Failed to verify user is in group: %s", e)
+
+        res["chunk"] = valid_entries
+
+        return res
+
+    @defer.inlineCallbacks
+    def get_joined_groups(self, user_id):
+        group_ids = yield self.store.get_joined_groups(user_id)
+        return {"groups": group_ids}
+
+    @defer.inlineCallbacks
+    def get_publicised_groups_for_user(self, user_id):
+        if self.hs.is_mine_id(user_id):
+            result = yield self.store.get_publicised_groups_for_user(user_id)
+
+            # Check AS associated groups for this user - this depends on the
+            # RegExps in the AS registration file (under `users`)
+            for app_service in self.store.get_app_services():
+                result.extend(app_service.get_groups_for_user(user_id))
+
+            return {"groups": result}
+        else:
+            try:
+                bulk_result = yield self.transport_client.bulk_get_publicised_groups(
+                    get_domain_from_id(user_id), [user_id]
+                )
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
+
+            result = bulk_result.get("users", {}).get(user_id)
+            # TODO: Verify attestations
+            return {"groups": result}
+
+    @defer.inlineCallbacks
+    def bulk_get_publicised_groups(self, user_ids, proxy=True):
+        destinations = {}
+        local_users = set()
+
+        for user_id in user_ids:
+            if self.hs.is_mine_id(user_id):
+                local_users.add(user_id)
+            else:
+                destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
+
+        if not proxy and destinations:
+            raise SynapseError(400, "Some user_ids are not local")
+
+        results = {}
+        failed_results = []
+        for destination, dest_user_ids in iteritems(destinations):
+            try:
+                r = yield self.transport_client.bulk_get_publicised_groups(
+                    destination, list(dest_user_ids)
+                )
+                results.update(r["users"])
+            except Exception:
+                failed_results.extend(dest_user_ids)
+
+        for uid in local_users:
+            results[uid] = yield self.store.get_publicised_groups_for_user(uid)
+
+            # Check AS associated groups for this user - this depends on the
+            # RegExps in the AS registration file (under `users`)
+            for app_service in self.store.get_app_services():
+                results[uid].extend(app_service.get_groups_for_user(uid))
+
+        return {"users": results}
+
+
+class GroupsLocalHandler(GroupsLocalWorkerHandler):
+    def __init__(self, hs):
+        super(GroupsLocalHandler, self).__init__(hs)
+
+        # Ensure attestations get renewed
+        hs.get_groups_attestation_renewer()
+
+    # The following functions merely route the query to the local groups server
+    # or federation depending on if the group is local or remote
+
+    update_group_profile = _create_rerouter("update_group_profile")
+
+    add_room_to_group = _create_rerouter("add_room_to_group")
+    update_room_in_group = _create_rerouter("update_room_in_group")
+    remove_room_from_group = _create_rerouter("remove_room_from_group")
+
+    update_group_summary_room = _create_rerouter("update_group_summary_room")
+    delete_group_summary_room = _create_rerouter("delete_group_summary_room")
+
+    update_group_category = _create_rerouter("update_group_category")
+    delete_group_category = _create_rerouter("delete_group_category")
+
+    update_group_summary_user = _create_rerouter("update_group_summary_user")
+    delete_group_summary_user = _create_rerouter("delete_group_summary_user")
+
+    update_group_role = _create_rerouter("update_group_role")
+    delete_group_role = _create_rerouter("delete_group_role")
+
+    set_group_join_policy = _create_rerouter("set_group_join_policy")
+
     @defer.inlineCallbacks
     def create_group(self, group_id, user_id, content):
         """Create a group
@@ -219,48 +334,6 @@ class GroupsLocalHandler(object):
 
         return res
 
-    @defer.inlineCallbacks
-    def get_users_in_group(self, group_id, requester_user_id):
-        """Get users in a group
-        """
-        if self.is_mine_id(group_id):
-            res = yield self.groups_server_handler.get_users_in_group(
-                group_id, requester_user_id
-            )
-            return res
-
-        group_server_name = get_domain_from_id(group_id)
-
-        try:
-            res = yield self.transport_client.get_users_in_group(
-                get_domain_from_id(group_id), group_id, requester_user_id
-            )
-        except HttpResponseException as e:
-            raise e.to_synapse_error()
-        except RequestSendFailed:
-            raise SynapseError(502, "Failed to contact group server")
-
-        chunk = res["chunk"]
-        valid_entries = []
-        for entry in chunk:
-            g_user_id = entry["user_id"]
-            attestation = entry.pop("attestation", {})
-            try:
-                if get_domain_from_id(g_user_id) != group_server_name:
-                    yield self.attestations.verify_attestation(
-                        attestation,
-                        group_id=group_id,
-                        user_id=g_user_id,
-                        server_name=get_domain_from_id(g_user_id),
-                    )
-                valid_entries.append(entry)
-            except Exception as e:
-                logger.info("Failed to verify user is in group: %s", e)
-
-        res["chunk"] = valid_entries
-
-        return res
-
     @defer.inlineCallbacks
     def join_group(self, group_id, user_id, content):
         """Request to join a group
@@ -452,68 +525,3 @@ class GroupsLocalHandler(object):
             group_id, user_id, membership="leave"
         )
         self.notifier.on_new_event("groups_key", token, users=[user_id])
-
-    @defer.inlineCallbacks
-    def get_joined_groups(self, user_id):
-        group_ids = yield self.store.get_joined_groups(user_id)
-        return {"groups": group_ids}
-
-    @defer.inlineCallbacks
-    def get_publicised_groups_for_user(self, user_id):
-        if self.hs.is_mine_id(user_id):
-            result = yield self.store.get_publicised_groups_for_user(user_id)
-
-            # Check AS associated groups for this user - this depends on the
-            # RegExps in the AS registration file (under `users`)
-            for app_service in self.store.get_app_services():
-                result.extend(app_service.get_groups_for_user(user_id))
-
-            return {"groups": result}
-        else:
-            try:
-                bulk_result = yield self.transport_client.bulk_get_publicised_groups(
-                    get_domain_from_id(user_id), [user_id]
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-            result = bulk_result.get("users", {}).get(user_id)
-            # TODO: Verify attestations
-            return {"groups": result}
-
-    @defer.inlineCallbacks
-    def bulk_get_publicised_groups(self, user_ids, proxy=True):
-        destinations = {}
-        local_users = set()
-
-        for user_id in user_ids:
-            if self.hs.is_mine_id(user_id):
-                local_users.add(user_id)
-            else:
-                destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
-
-        if not proxy and destinations:
-            raise SynapseError(400, "Some user_ids are not local")
-
-        results = {}
-        failed_results = []
-        for destination, dest_user_ids in iteritems(destinations):
-            try:
-                r = yield self.transport_client.bulk_get_publicised_groups(
-                    destination, list(dest_user_ids)
-                )
-                results.update(r["users"])
-            except Exception:
-                failed_results.extend(dest_user_ids)
-
-        for uid in local_users:
-            results[uid] = yield self.store.get_publicised_groups_for_user(uid)
-
-            # Check AS associated groups for this user - this depends on the
-            # RegExps in the AS registration file (under `users`)
-            for app_service in self.store.get_app_services():
-                results[uid].extend(app_service.get_groups_for_user(uid))
-
-        return {"users": results}
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 69a4ae42f9..2d4fd08cf5 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -13,15 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage import DataStore
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
+from synapse.storage.data_stores.main.group_server import GroupServerWorkerStore
 from synapse.storage.database import Database
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
-from ._base import BaseSlavedStore, __func__
-from ._slaved_id_tracker import SlavedIdTracker
 
-
-class SlavedGroupServerStore(BaseSlavedStore):
+class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore):
     def __init__(self, database: Database, db_conn, hs):
         super(SlavedGroupServerStore, self).__init__(database, db_conn, hs)
 
@@ -35,9 +34,8 @@ class SlavedGroupServerStore(BaseSlavedStore):
             self._group_updates_id_gen.get_current_token(),
         )
 
-    get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user)
-    get_group_stream_token = __func__(DataStore.get_group_stream_token)
-    get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user)
+    def get_group_stream_token(self):
+        return self._group_updates_id_gen.get_current_token()
 
     def stream_positions(self):
         result = super(SlavedGroupServerStore, self).stream_positions()
diff --git a/synapse/server.py b/synapse/server.py
index 7926867b77..fd2f69e928 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -50,7 +50,7 @@ from synapse.federation.send_queue import FederationRemoteSendQueue
 from synapse.federation.sender import FederationSender
 from synapse.federation.transport.client import TransportLayerClient
 from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
-from synapse.groups.groups_server import GroupsServerHandler
+from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler
 from synapse.handlers import Handlers
 from synapse.handlers.account_validity import AccountValidityHandler
 from synapse.handlers.acme import AcmeHandler
@@ -62,7 +62,7 @@ from synapse.handlers.devicemessage import DeviceMessageHandler
 from synapse.handlers.e2e_keys import E2eKeysHandler
 from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
 from synapse.handlers.events import EventHandler, EventStreamHandler
-from synapse.handlers.groups_local import GroupsLocalHandler
+from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler
 from synapse.handlers.initial_sync import InitialSyncHandler
 from synapse.handlers.message import EventCreationHandler, MessageHandler
 from synapse.handlers.pagination import PaginationHandler
@@ -460,10 +460,16 @@ class HomeServer(object):
         return UserDirectoryHandler(self)
 
     def build_groups_local_handler(self):
-        return GroupsLocalHandler(self)
+        if self.config.worker_app:
+            return GroupsLocalWorkerHandler(self)
+        else:
+            return GroupsLocalHandler(self)
 
     def build_groups_server_handler(self):
-        return GroupsServerHandler(self)
+        if self.config.worker_app:
+            return GroupsServerWorkerHandler(self)
+        else:
+            return GroupsServerHandler(self)
 
     def build_groups_attestation_signing(self):
         return GroupAttestationSigning(self)
diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py
index 6acd45e9f3..0963e6c250 100644
--- a/synapse/storage/data_stores/main/group_server.py
+++ b/synapse/storage/data_stores/main/group_server.py
@@ -27,21 +27,7 @@ _DEFAULT_CATEGORY_ID = ""
 _DEFAULT_ROLE_ID = ""
 
 
-class GroupServerStore(SQLBaseStore):
-    def set_group_join_policy(self, group_id, join_policy):
-        """Set the join policy of a group.
-
-        join_policy can be one of:
-         * "invite"
-         * "open"
-        """
-        return self.db.simple_update_one(
-            table="groups",
-            keyvalues={"group_id": group_id},
-            updatevalues={"join_policy": join_policy},
-            desc="set_group_join_policy",
-        )
-
+class GroupServerWorkerStore(SQLBaseStore):
     def get_group(self, group_id):
         return self.db.simple_select_one(
             table="groups",
@@ -157,6 +143,366 @@ class GroupServerStore(SQLBaseStore):
             "get_rooms_for_summary", _get_rooms_for_summary_txn
         )
 
+    @defer.inlineCallbacks
+    def get_group_categories(self, group_id):
+        rows = yield self.db.simple_select_list(
+            table="group_room_categories",
+            keyvalues={"group_id": group_id},
+            retcols=("category_id", "is_public", "profile"),
+            desc="get_group_categories",
+        )
+
+        return {
+            row["category_id"]: {
+                "is_public": row["is_public"],
+                "profile": json.loads(row["profile"]),
+            }
+            for row in rows
+        }
+
+    @defer.inlineCallbacks
+    def get_group_category(self, group_id, category_id):
+        category = yield self.db.simple_select_one(
+            table="group_room_categories",
+            keyvalues={"group_id": group_id, "category_id": category_id},
+            retcols=("is_public", "profile"),
+            desc="get_group_category",
+        )
+
+        category["profile"] = json.loads(category["profile"])
+
+        return category
+
+    @defer.inlineCallbacks
+    def get_group_roles(self, group_id):
+        rows = yield self.db.simple_select_list(
+            table="group_roles",
+            keyvalues={"group_id": group_id},
+            retcols=("role_id", "is_public", "profile"),
+            desc="get_group_roles",
+        )
+
+        return {
+            row["role_id"]: {
+                "is_public": row["is_public"],
+                "profile": json.loads(row["profile"]),
+            }
+            for row in rows
+        }
+
+    @defer.inlineCallbacks
+    def get_group_role(self, group_id, role_id):
+        role = yield self.db.simple_select_one(
+            table="group_roles",
+            keyvalues={"group_id": group_id, "role_id": role_id},
+            retcols=("is_public", "profile"),
+            desc="get_group_role",
+        )
+
+        role["profile"] = json.loads(role["profile"])
+
+        return role
+
+    def get_local_groups_for_room(self, room_id):
+        """Get all of the local group that contain a given room
+        Args:
+            room_id (str): The ID of a room
+        Returns:
+            Deferred[list[str]]: A twisted.Deferred containing a list of group ids
+                containing this room
+        """
+        return self.db.simple_select_onecol(
+            table="group_rooms",
+            keyvalues={"room_id": room_id},
+            retcol="group_id",
+            desc="get_local_groups_for_room",
+        )
+
+    def get_users_for_summary_by_role(self, group_id, include_private=False):
+        """Get the users and roles that should be included in a summary request
+
+        Returns ([users], [roles])
+        """
+
+        def _get_users_for_summary_txn(txn):
+            keyvalues = {"group_id": group_id}
+            if not include_private:
+                keyvalues["is_public"] = True
+
+            sql = """
+                SELECT user_id, is_public, role_id, user_order
+                FROM group_summary_users
+                WHERE group_id = ?
+            """
+
+            if not include_private:
+                sql += " AND is_public = ?"
+                txn.execute(sql, (group_id, True))
+            else:
+                txn.execute(sql, (group_id,))
+
+            users = [
+                {
+                    "user_id": row[0],
+                    "is_public": row[1],
+                    "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None,
+                    "order": row[3],
+                }
+                for row in txn
+            ]
+
+            sql = """
+                SELECT role_id, is_public, profile, role_order
+                FROM group_summary_roles
+                INNER JOIN group_roles USING (group_id, role_id)
+                WHERE group_id = ?
+            """
+
+            if not include_private:
+                sql += " AND is_public = ?"
+                txn.execute(sql, (group_id, True))
+            else:
+                txn.execute(sql, (group_id,))
+
+            roles = {
+                row[0]: {
+                    "is_public": row[1],
+                    "profile": json.loads(row[2]),
+                    "order": row[3],
+                }
+                for row in txn
+            }
+
+            return users, roles
+
+        return self.db.runInteraction(
+            "get_users_for_summary_by_role", _get_users_for_summary_txn
+        )
+
+    def is_user_in_group(self, user_id, group_id):
+        return self.db.simple_select_one_onecol(
+            table="group_users",
+            keyvalues={"group_id": group_id, "user_id": user_id},
+            retcol="user_id",
+            allow_none=True,
+            desc="is_user_in_group",
+        ).addCallback(lambda r: bool(r))
+
+    def is_user_admin_in_group(self, group_id, user_id):
+        return self.db.simple_select_one_onecol(
+            table="group_users",
+            keyvalues={"group_id": group_id, "user_id": user_id},
+            retcol="is_admin",
+            allow_none=True,
+            desc="is_user_admin_in_group",
+        )
+
+    def is_user_invited_to_local_group(self, group_id, user_id):
+        """Has the group server invited a user?
+        """
+        return self.db.simple_select_one_onecol(
+            table="group_invites",
+            keyvalues={"group_id": group_id, "user_id": user_id},
+            retcol="user_id",
+            desc="is_user_invited_to_local_group",
+            allow_none=True,
+        )
+
+    def get_users_membership_info_in_group(self, group_id, user_id):
+        """Get a dict describing the membership of a user in a group.
+
+        Example if joined:
+
+            {
+                "membership": "join",
+                "is_public": True,
+                "is_privileged": False,
+            }
+
+        Returns an empty dict if the user is not join/invite/etc
+        """
+
+        def _get_users_membership_in_group_txn(txn):
+            row = self.db.simple_select_one_txn(
+                txn,
+                table="group_users",
+                keyvalues={"group_id": group_id, "user_id": user_id},
+                retcols=("is_admin", "is_public"),
+                allow_none=True,
+            )
+
+            if row:
+                return {
+                    "membership": "join",
+                    "is_public": row["is_public"],
+                    "is_privileged": row["is_admin"],
+                }
+
+            row = self.db.simple_select_one_onecol_txn(
+                txn,
+                table="group_invites",
+                keyvalues={"group_id": group_id, "user_id": user_id},
+                retcol="user_id",
+                allow_none=True,
+            )
+
+            if row:
+                return {"membership": "invite"}
+
+            return {}
+
+        return self.db.runInteraction(
+            "get_users_membership_info_in_group", _get_users_membership_in_group_txn
+        )
+
+    def get_publicised_groups_for_user(self, user_id):
+        """Get all groups a user is publicising
+        """
+        return self.db.simple_select_onecol(
+            table="local_group_membership",
+            keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True},
+            retcol="group_id",
+            desc="get_publicised_groups_for_user",
+        )
+
+    def get_attestations_need_renewals(self, valid_until_ms):
+        """Get all attestations that need to be renewed until givent time
+        """
+
+        def _get_attestations_need_renewals_txn(txn):
+            sql = """
+                SELECT group_id, user_id FROM group_attestations_renewals
+                WHERE valid_until_ms <= ?
+            """
+            txn.execute(sql, (valid_until_ms,))
+            return self.db.cursor_to_dict(txn)
+
+        return self.db.runInteraction(
+            "get_attestations_need_renewals", _get_attestations_need_renewals_txn
+        )
+
+    @defer.inlineCallbacks
+    def get_remote_attestation(self, group_id, user_id):
+        """Get the attestation that proves the remote agrees that the user is
+        in the group.
+        """
+        row = yield self.db.simple_select_one(
+            table="group_attestations_remote",
+            keyvalues={"group_id": group_id, "user_id": user_id},
+            retcols=("valid_until_ms", "attestation_json"),
+            desc="get_remote_attestation",
+            allow_none=True,
+        )
+
+        now = int(self._clock.time_msec())
+        if row and now < row["valid_until_ms"]:
+            return json.loads(row["attestation_json"])
+
+        return None
+
+    def get_joined_groups(self, user_id):
+        return self.db.simple_select_onecol(
+            table="local_group_membership",
+            keyvalues={"user_id": user_id, "membership": "join"},
+            retcol="group_id",
+            desc="get_joined_groups",
+        )
+
+    def get_all_groups_for_user(self, user_id, now_token):
+        def _get_all_groups_for_user_txn(txn):
+            sql = """
+                SELECT group_id, type, membership, u.content
+                FROM local_group_updates AS u
+                INNER JOIN local_group_membership USING (group_id, user_id)
+                WHERE user_id = ? AND membership != 'leave'
+                    AND stream_id <= ?
+            """
+            txn.execute(sql, (user_id, now_token))
+            return [
+                {
+                    "group_id": row[0],
+                    "type": row[1],
+                    "membership": row[2],
+                    "content": json.loads(row[3]),
+                }
+                for row in txn
+            ]
+
+        return self.db.runInteraction(
+            "get_all_groups_for_user", _get_all_groups_for_user_txn
+        )
+
+    def get_groups_changes_for_user(self, user_id, from_token, to_token):
+        from_token = int(from_token)
+        has_changed = self._group_updates_stream_cache.has_entity_changed(
+            user_id, from_token
+        )
+        if not has_changed:
+            return defer.succeed([])
+
+        def _get_groups_changes_for_user_txn(txn):
+            sql = """
+                SELECT group_id, membership, type, u.content
+                FROM local_group_updates AS u
+                INNER JOIN local_group_membership USING (group_id, user_id)
+                WHERE user_id = ? AND ? < stream_id AND stream_id <= ?
+            """
+            txn.execute(sql, (user_id, from_token, to_token))
+            return [
+                {
+                    "group_id": group_id,
+                    "membership": membership,
+                    "type": gtype,
+                    "content": json.loads(content_json),
+                }
+                for group_id, membership, gtype, content_json in txn
+            ]
+
+        return self.db.runInteraction(
+            "get_groups_changes_for_user", _get_groups_changes_for_user_txn
+        )
+
+    def get_all_groups_changes(self, from_token, to_token, limit):
+        from_token = int(from_token)
+        has_changed = self._group_updates_stream_cache.has_any_entity_changed(
+            from_token
+        )
+        if not has_changed:
+            return defer.succeed([])
+
+        def _get_all_groups_changes_txn(txn):
+            sql = """
+                SELECT stream_id, group_id, user_id, type, content
+                FROM local_group_updates
+                WHERE ? < stream_id AND stream_id <= ?
+                LIMIT ?
+            """
+            txn.execute(sql, (from_token, to_token, limit))
+            return [
+                (stream_id, group_id, user_id, gtype, json.loads(content_json))
+                for stream_id, group_id, user_id, gtype, content_json in txn
+            ]
+
+        return self.db.runInteraction(
+            "get_all_groups_changes", _get_all_groups_changes_txn
+        )
+
+
+class GroupServerStore(GroupServerWorkerStore):
+    def set_group_join_policy(self, group_id, join_policy):
+        """Set the join policy of a group.
+
+        join_policy can be one of:
+         * "invite"
+         * "open"
+        """
+        return self.db.simple_update_one(
+            table="groups",
+            keyvalues={"group_id": group_id},
+            updatevalues={"join_policy": join_policy},
+            desc="set_group_join_policy",
+        )
+
     def add_room_to_summary(self, group_id, room_id, category_id, order, is_public):
         return self.db.runInteraction(
             "add_room_to_summary",
@@ -299,36 +645,6 @@ class GroupServerStore(SQLBaseStore):
             desc="remove_room_from_summary",
         )
 
-    @defer.inlineCallbacks
-    def get_group_categories(self, group_id):
-        rows = yield self.db.simple_select_list(
-            table="group_room_categories",
-            keyvalues={"group_id": group_id},
-            retcols=("category_id", "is_public", "profile"),
-            desc="get_group_categories",
-        )
-
-        return {
-            row["category_id"]: {
-                "is_public": row["is_public"],
-                "profile": json.loads(row["profile"]),
-            }
-            for row in rows
-        }
-
-    @defer.inlineCallbacks
-    def get_group_category(self, group_id, category_id):
-        category = yield self.db.simple_select_one(
-            table="group_room_categories",
-            keyvalues={"group_id": group_id, "category_id": category_id},
-            retcols=("is_public", "profile"),
-            desc="get_group_category",
-        )
-
-        category["profile"] = json.loads(category["profile"])
-
-        return category
-
     def upsert_group_category(self, group_id, category_id, profile, is_public):
         """Add/update room category for group
         """
@@ -360,36 +676,6 @@ class GroupServerStore(SQLBaseStore):
             desc="remove_group_category",
         )
 
-    @defer.inlineCallbacks
-    def get_group_roles(self, group_id):
-        rows = yield self.db.simple_select_list(
-            table="group_roles",
-            keyvalues={"group_id": group_id},
-            retcols=("role_id", "is_public", "profile"),
-            desc="get_group_roles",
-        )
-
-        return {
-            row["role_id"]: {
-                "is_public": row["is_public"],
-                "profile": json.loads(row["profile"]),
-            }
-            for row in rows
-        }
-
-    @defer.inlineCallbacks
-    def get_group_role(self, group_id, role_id):
-        role = yield self.db.simple_select_one(
-            table="group_roles",
-            keyvalues={"group_id": group_id, "role_id": role_id},
-            retcols=("is_public", "profile"),
-            desc="get_group_role",
-        )
-
-        role["profile"] = json.loads(role["profile"])
-
-        return role
-
     def upsert_group_role(self, group_id, role_id, profile, is_public):
         """Add/remove user role
         """
@@ -469,251 +755,99 @@ class GroupServerStore(SQLBaseStore):
             if not role_exists:
                 raise SynapseError(400, "Role doesn't exist")
 
-            # TODO: Check role is part of the summary already
-            role_exists = self.db.simple_select_one_onecol_txn(
-                txn,
-                table="group_summary_roles",
-                keyvalues={"group_id": group_id, "role_id": role_id},
-                retcol="group_id",
-                allow_none=True,
-            )
-            if not role_exists:
-                # If not, add it with an order larger than all others
-                txn.execute(
-                    """
-                    INSERT INTO group_summary_roles
-                    (group_id, role_id, role_order)
-                    SELECT ?, ?, COALESCE(MAX(role_order), 0) + 1
-                    FROM group_summary_roles
-                    WHERE group_id = ? AND role_id = ?
-                """,
-                    (group_id, role_id, group_id, role_id),
-                )
-
-        existing = self.db.simple_select_one_txn(
-            txn,
-            table="group_summary_users",
-            keyvalues={"group_id": group_id, "user_id": user_id, "role_id": role_id},
-            retcols=("user_order", "is_public"),
-            allow_none=True,
-        )
-
-        if order is not None:
-            # Shuffle other users orders that come after the given order
-            sql = """
-                UPDATE group_summary_users SET user_order = user_order + 1
-                WHERE group_id = ? AND role_id = ? AND user_order >= ?
-            """
-            txn.execute(sql, (group_id, role_id, order))
-        elif not existing:
-            sql = """
-                SELECT COALESCE(MAX(user_order), 0) + 1 FROM group_summary_users
-                WHERE group_id = ? AND role_id = ?
-            """
-            txn.execute(sql, (group_id, role_id))
-            (order,) = txn.fetchone()
-
-        if existing:
-            to_update = {}
-            if order is not None:
-                to_update["user_order"] = order
-            if is_public is not None:
-                to_update["is_public"] = is_public
-            self.db.simple_update_txn(
-                txn,
-                table="group_summary_users",
-                keyvalues={
-                    "group_id": group_id,
-                    "role_id": role_id,
-                    "user_id": user_id,
-                },
-                values=to_update,
-            )
-        else:
-            if is_public is None:
-                is_public = True
-
-            self.db.simple_insert_txn(
-                txn,
-                table="group_summary_users",
-                values={
-                    "group_id": group_id,
-                    "role_id": role_id,
-                    "user_id": user_id,
-                    "user_order": order,
-                    "is_public": is_public,
-                },
-            )
-
-    def remove_user_from_summary(self, group_id, user_id, role_id):
-        if role_id is None:
-            role_id = _DEFAULT_ROLE_ID
-
-        return self.db.simple_delete(
-            table="group_summary_users",
-            keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id},
-            desc="remove_user_from_summary",
-        )
-
-    def get_local_groups_for_room(self, room_id):
-        """Get all of the local group that contain a given room
-        Args:
-            room_id (str): The ID of a room
-        Returns:
-            Deferred[list[str]]: A twisted.Deferred containing a list of group ids
-                containing this room
-        """
-        return self.db.simple_select_onecol(
-            table="group_rooms",
-            keyvalues={"room_id": room_id},
-            retcol="group_id",
-            desc="get_local_groups_for_room",
-        )
-
-    def get_users_for_summary_by_role(self, group_id, include_private=False):
-        """Get the users and roles that should be included in a summary request
-
-        Returns ([users], [roles])
-        """
-
-        def _get_users_for_summary_txn(txn):
-            keyvalues = {"group_id": group_id}
-            if not include_private:
-                keyvalues["is_public"] = True
-
-            sql = """
-                SELECT user_id, is_public, role_id, user_order
-                FROM group_summary_users
-                WHERE group_id = ?
-            """
-
-            if not include_private:
-                sql += " AND is_public = ?"
-                txn.execute(sql, (group_id, True))
-            else:
-                txn.execute(sql, (group_id,))
-
-            users = [
-                {
-                    "user_id": row[0],
-                    "is_public": row[1],
-                    "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None,
-                    "order": row[3],
-                }
-                for row in txn
-            ]
-
-            sql = """
-                SELECT role_id, is_public, profile, role_order
-                FROM group_summary_roles
-                INNER JOIN group_roles USING (group_id, role_id)
-                WHERE group_id = ?
-            """
-
-            if not include_private:
-                sql += " AND is_public = ?"
-                txn.execute(sql, (group_id, True))
-            else:
-                txn.execute(sql, (group_id,))
-
-            roles = {
-                row[0]: {
-                    "is_public": row[1],
-                    "profile": json.loads(row[2]),
-                    "order": row[3],
-                }
-                for row in txn
-            }
-
-            return users, roles
-
-        return self.db.runInteraction(
-            "get_users_for_summary_by_role", _get_users_for_summary_txn
-        )
-
-    def is_user_in_group(self, user_id, group_id):
-        return self.db.simple_select_one_onecol(
-            table="group_users",
-            keyvalues={"group_id": group_id, "user_id": user_id},
-            retcol="user_id",
-            allow_none=True,
-            desc="is_user_in_group",
-        ).addCallback(lambda r: bool(r))
-
-    def is_user_admin_in_group(self, group_id, user_id):
-        return self.db.simple_select_one_onecol(
-            table="group_users",
-            keyvalues={"group_id": group_id, "user_id": user_id},
-            retcol="is_admin",
-            allow_none=True,
-            desc="is_user_admin_in_group",
-        )
-
-    def add_group_invite(self, group_id, user_id):
-        """Record that the group server has invited a user
-        """
-        return self.db.simple_insert(
-            table="group_invites",
-            values={"group_id": group_id, "user_id": user_id},
-            desc="add_group_invite",
-        )
+            # TODO: Check role is part of the summary already
+            role_exists = self.db.simple_select_one_onecol_txn(
+                txn,
+                table="group_summary_roles",
+                keyvalues={"group_id": group_id, "role_id": role_id},
+                retcol="group_id",
+                allow_none=True,
+            )
+            if not role_exists:
+                # If not, add it with an order larger than all others
+                txn.execute(
+                    """
+                    INSERT INTO group_summary_roles
+                    (group_id, role_id, role_order)
+                    SELECT ?, ?, COALESCE(MAX(role_order), 0) + 1
+                    FROM group_summary_roles
+                    WHERE group_id = ? AND role_id = ?
+                """,
+                    (group_id, role_id, group_id, role_id),
+                )
 
-    def is_user_invited_to_local_group(self, group_id, user_id):
-        """Has the group server invited a user?
-        """
-        return self.db.simple_select_one_onecol(
-            table="group_invites",
-            keyvalues={"group_id": group_id, "user_id": user_id},
-            retcol="user_id",
-            desc="is_user_invited_to_local_group",
+        existing = self.db.simple_select_one_txn(
+            txn,
+            table="group_summary_users",
+            keyvalues={"group_id": group_id, "user_id": user_id, "role_id": role_id},
+            retcols=("user_order", "is_public"),
             allow_none=True,
         )
 
-    def get_users_membership_info_in_group(self, group_id, user_id):
-        """Get a dict describing the membership of a user in a group.
-
-        Example if joined:
-
-            {
-                "membership": "join",
-                "is_public": True,
-                "is_privileged": False,
-            }
-
-        Returns an empty dict if the user is not join/invite/etc
-        """
+        if order is not None:
+            # Shuffle other users orders that come after the given order
+            sql = """
+                UPDATE group_summary_users SET user_order = user_order + 1
+                WHERE group_id = ? AND role_id = ? AND user_order >= ?
+            """
+            txn.execute(sql, (group_id, role_id, order))
+        elif not existing:
+            sql = """
+                SELECT COALESCE(MAX(user_order), 0) + 1 FROM group_summary_users
+                WHERE group_id = ? AND role_id = ?
+            """
+            txn.execute(sql, (group_id, role_id))
+            (order,) = txn.fetchone()
 
-        def _get_users_membership_in_group_txn(txn):
-            row = self.db.simple_select_one_txn(
+        if existing:
+            to_update = {}
+            if order is not None:
+                to_update["user_order"] = order
+            if is_public is not None:
+                to_update["is_public"] = is_public
+            self.db.simple_update_txn(
                 txn,
-                table="group_users",
-                keyvalues={"group_id": group_id, "user_id": user_id},
-                retcols=("is_admin", "is_public"),
-                allow_none=True,
+                table="group_summary_users",
+                keyvalues={
+                    "group_id": group_id,
+                    "role_id": role_id,
+                    "user_id": user_id,
+                },
+                values=to_update,
             )
+        else:
+            if is_public is None:
+                is_public = True
 
-            if row:
-                return {
-                    "membership": "join",
-                    "is_public": row["is_public"],
-                    "is_privileged": row["is_admin"],
-                }
-
-            row = self.db.simple_select_one_onecol_txn(
+            self.db.simple_insert_txn(
                 txn,
-                table="group_invites",
-                keyvalues={"group_id": group_id, "user_id": user_id},
-                retcol="user_id",
-                allow_none=True,
+                table="group_summary_users",
+                values={
+                    "group_id": group_id,
+                    "role_id": role_id,
+                    "user_id": user_id,
+                    "user_order": order,
+                    "is_public": is_public,
+                },
             )
 
-            if row:
-                return {"membership": "invite"}
+    def remove_user_from_summary(self, group_id, user_id, role_id):
+        if role_id is None:
+            role_id = _DEFAULT_ROLE_ID
 
-            return {}
+        return self.db.simple_delete(
+            table="group_summary_users",
+            keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id},
+            desc="remove_user_from_summary",
+        )
 
-        return self.db.runInteraction(
-            "get_users_membership_info_in_group", _get_users_membership_in_group_txn
+    def add_group_invite(self, group_id, user_id):
+        """Record that the group server has invited a user
+        """
+        return self.db.simple_insert(
+            table="group_invites",
+            values={"group_id": group_id, "user_id": user_id},
+            desc="add_group_invite",
         )
 
     def add_user_to_group(
@@ -846,16 +980,6 @@ class GroupServerStore(SQLBaseStore):
             "remove_room_from_group", _remove_room_from_group_txn
         )
 
-    def get_publicised_groups_for_user(self, user_id):
-        """Get all groups a user is publicising
-        """
-        return self.db.simple_select_onecol(
-            table="local_group_membership",
-            keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True},
-            retcol="group_id",
-            desc="get_publicised_groups_for_user",
-        )
-
     def update_group_publicity(self, group_id, user_id, publicise):
         """Update whether the user is publicising their membership of the group
         """
@@ -1000,22 +1124,6 @@ class GroupServerStore(SQLBaseStore):
             desc="update_group_profile",
         )
 
-    def get_attestations_need_renewals(self, valid_until_ms):
-        """Get all attestations that need to be renewed until givent time
-        """
-
-        def _get_attestations_need_renewals_txn(txn):
-            sql = """
-                SELECT group_id, user_id FROM group_attestations_renewals
-                WHERE valid_until_ms <= ?
-            """
-            txn.execute(sql, (valid_until_ms,))
-            return self.db.cursor_to_dict(txn)
-
-        return self.db.runInteraction(
-            "get_attestations_need_renewals", _get_attestations_need_renewals_txn
-        )
-
     def update_attestation_renewal(self, group_id, user_id, attestation):
         """Update an attestation that we have renewed
         """
@@ -1054,112 +1162,6 @@ class GroupServerStore(SQLBaseStore):
             desc="remove_attestation_renewal",
         )
 
-    @defer.inlineCallbacks
-    def get_remote_attestation(self, group_id, user_id):
-        """Get the attestation that proves the remote agrees that the user is
-        in the group.
-        """
-        row = yield self.db.simple_select_one(
-            table="group_attestations_remote",
-            keyvalues={"group_id": group_id, "user_id": user_id},
-            retcols=("valid_until_ms", "attestation_json"),
-            desc="get_remote_attestation",
-            allow_none=True,
-        )
-
-        now = int(self._clock.time_msec())
-        if row and now < row["valid_until_ms"]:
-            return json.loads(row["attestation_json"])
-
-        return None
-
-    def get_joined_groups(self, user_id):
-        return self.db.simple_select_onecol(
-            table="local_group_membership",
-            keyvalues={"user_id": user_id, "membership": "join"},
-            retcol="group_id",
-            desc="get_joined_groups",
-        )
-
-    def get_all_groups_for_user(self, user_id, now_token):
-        def _get_all_groups_for_user_txn(txn):
-            sql = """
-                SELECT group_id, type, membership, u.content
-                FROM local_group_updates AS u
-                INNER JOIN local_group_membership USING (group_id, user_id)
-                WHERE user_id = ? AND membership != 'leave'
-                    AND stream_id <= ?
-            """
-            txn.execute(sql, (user_id, now_token))
-            return [
-                {
-                    "group_id": row[0],
-                    "type": row[1],
-                    "membership": row[2],
-                    "content": json.loads(row[3]),
-                }
-                for row in txn
-            ]
-
-        return self.db.runInteraction(
-            "get_all_groups_for_user", _get_all_groups_for_user_txn
-        )
-
-    def get_groups_changes_for_user(self, user_id, from_token, to_token):
-        from_token = int(from_token)
-        has_changed = self._group_updates_stream_cache.has_entity_changed(
-            user_id, from_token
-        )
-        if not has_changed:
-            return defer.succeed([])
-
-        def _get_groups_changes_for_user_txn(txn):
-            sql = """
-                SELECT group_id, membership, type, u.content
-                FROM local_group_updates AS u
-                INNER JOIN local_group_membership USING (group_id, user_id)
-                WHERE user_id = ? AND ? < stream_id AND stream_id <= ?
-            """
-            txn.execute(sql, (user_id, from_token, to_token))
-            return [
-                {
-                    "group_id": group_id,
-                    "membership": membership,
-                    "type": gtype,
-                    "content": json.loads(content_json),
-                }
-                for group_id, membership, gtype, content_json in txn
-            ]
-
-        return self.db.runInteraction(
-            "get_groups_changes_for_user", _get_groups_changes_for_user_txn
-        )
-
-    def get_all_groups_changes(self, from_token, to_token, limit):
-        from_token = int(from_token)
-        has_changed = self._group_updates_stream_cache.has_any_entity_changed(
-            from_token
-        )
-        if not has_changed:
-            return defer.succeed([])
-
-        def _get_all_groups_changes_txn(txn):
-            sql = """
-                SELECT stream_id, group_id, user_id, type, content
-                FROM local_group_updates
-                WHERE ? < stream_id AND stream_id <= ?
-                LIMIT ?
-            """
-            txn.execute(sql, (from_token, to_token, limit))
-            return [
-                (stream_id, group_id, user_id, gtype, json.loads(content_json))
-                for stream_id, group_id, user_id, gtype, content_json in txn
-            ]
-
-        return self.db.runInteraction(
-            "get_all_groups_changes", _get_all_groups_changes_txn
-        )
-
     def get_group_stream_token(self):
         return self._group_updates_id_gen.get_current_token()
 
-- 
cgit 1.4.1


From b08b0a22d505b1555f511e3f38935a62930ea25d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 7 Feb 2020 13:56:38 +0000
Subject: Add typing to synapse.federation.sender (#6871)

---
 changelog.d/6871.misc                              |  1 +
 synapse/federation/federation_server.py            |  7 +-
 synapse/federation/sender/__init__.py              | 99 +++++++++++-----------
 synapse/federation/sender/per_destination_queue.py | 88 +++++++++----------
 synapse/federation/sender/transaction_manager.py   | 16 ++--
 synapse/federation/units.py                        | 23 ++++-
 synapse/server.pyi                                 |  2 +
 tests/handlers/test_typing.py                      |  8 +-
 tox.ini                                            |  1 +
 9 files changed, 138 insertions(+), 107 deletions(-)
 create mode 100644 changelog.d/6871.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc
new file mode 100644
index 0000000000..5161af9983
--- /dev/null
+++ b/changelog.d/6871.misc
@@ -0,0 +1 @@
+Add typing to `synapse.federation.sender` and port to async/await.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 2489832a11..a6c966a393 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -294,7 +294,12 @@ class FederationServer(FederationBase):
         async def _process_edu(edu_dict):
             received_edus_counter.inc()
 
-            edu = Edu(**edu_dict)
+            edu = Edu(
+                origin=origin,
+                destination=self.server_name,
+                edu_type=edu_dict["edu_type"],
+                content=edu_dict["content"],
+            )
             await self.registry.on_edu(edu.edu_type, origin, edu.content)
 
         await concurrently_execute(
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 36c83c3027..233cb33daf 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, Hashable, Iterable, List, Optional, Set
 
 from six import itervalues
 
@@ -23,6 +24,7 @@ from twisted.internet import defer
 
 import synapse
 import synapse.metrics
+from synapse.events import EventBase
 from synapse.federation.sender.per_destination_queue import PerDestinationQueue
 from synapse.federation.sender.transaction_manager import TransactionManager
 from synapse.federation.units import Edu
@@ -39,6 +41,8 @@ from synapse.metrics import (
     events_processed_counter,
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.presence import UserPresenceState
+from synapse.types import ReadReceipt
 from synapse.util.metrics import Measure, measure_func
 
 logger = logging.getLogger(__name__)
@@ -68,7 +72,7 @@ class FederationSender(object):
         self._transaction_manager = TransactionManager(hs)
 
         # map from destination to PerDestinationQueue
-        self._per_destination_queues = {}  # type: dict[str, PerDestinationQueue]
+        self._per_destination_queues = {}  # type: Dict[str, PerDestinationQueue]
 
         LaterGauge(
             "synapse_federation_transaction_queue_pending_destinations",
@@ -84,7 +88,7 @@ class FederationSender(object):
         # Map of user_id -> UserPresenceState for all the pending presence
         # to be sent out by user_id. Entries here get processed and put in
         # pending_presence_by_dest
-        self.pending_presence = {}
+        self.pending_presence = {}  # type: Dict[str, UserPresenceState]
 
         LaterGauge(
             "synapse_federation_transaction_queue_pending_pdus",
@@ -116,20 +120,17 @@ class FederationSender(object):
         # and that there is a pending call to _flush_rrs_for_room in the system.
         self._queues_awaiting_rr_flush_by_room = (
             {}
-        )  # type: dict[str, set[PerDestinationQueue]]
+        )  # type: Dict[str, Set[PerDestinationQueue]]
 
         self._rr_txn_interval_per_room_ms = (
-            1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
+            1000.0 / hs.config.federation_rr_transactions_per_room_per_second
         )
 
-    def _get_per_destination_queue(self, destination):
+    def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
         """Get or create a PerDestinationQueue for the given destination
 
         Args:
-            destination (str): server_name of remote server
-
-        Returns:
-            PerDestinationQueue
+            destination: server_name of remote server
         """
         queue = self._per_destination_queues.get(destination)
         if not queue:
@@ -137,7 +138,7 @@ class FederationSender(object):
             self._per_destination_queues[destination] = queue
         return queue
 
-    def notify_new_events(self, current_id):
+    def notify_new_events(self, current_id: int) -> None:
         """This gets called when we have some new events we might want to
         send out to other servers.
         """
@@ -151,13 +152,12 @@ class FederationSender(object):
             "process_event_queue_for_federation", self._process_event_queue_loop
         )
 
-    @defer.inlineCallbacks
-    def _process_event_queue_loop(self):
+    async def _process_event_queue_loop(self) -> None:
         try:
             self._is_processing = True
             while True:
-                last_token = yield self.store.get_federation_out_pos("events")
-                next_token, events = yield self.store.get_all_new_events_stream(
+                last_token = await self.store.get_federation_out_pos("events")
+                next_token, events = await self.store.get_all_new_events_stream(
                     last_token, self._last_poked_id, limit=100
                 )
 
@@ -166,8 +166,7 @@ class FederationSender(object):
                 if not events and next_token >= self._last_poked_id:
                     break
 
-                @defer.inlineCallbacks
-                def handle_event(event):
+                async def handle_event(event: EventBase) -> None:
                     # Only send events for this server.
                     send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
                     is_mine = self.is_mine_id(event.sender)
@@ -184,7 +183,7 @@ class FederationSender(object):
                         # Otherwise if the last member on a server in a room is
                         # banned then it won't receive the event because it won't
                         # be in the room after the ban.
-                        destinations = yield self.state.get_hosts_in_room_at_events(
+                        destinations = await self.state.get_hosts_in_room_at_events(
                             event.room_id, event_ids=event.prev_event_ids()
                         )
                     except Exception:
@@ -206,17 +205,16 @@ class FederationSender(object):
 
                     self._send_pdu(event, destinations)
 
-                @defer.inlineCallbacks
-                def handle_room_events(events):
+                async def handle_room_events(events: Iterable[EventBase]) -> None:
                     with Measure(self.clock, "handle_room_events"):
                         for event in events:
-                            yield handle_event(event)
+                            await handle_event(event)
 
-                events_by_room = {}
+                events_by_room = {}  # type: Dict[str, List[EventBase]]
                 for event in events:
                     events_by_room.setdefault(event.room_id, []).append(event)
 
-                yield make_deferred_yieldable(
+                await make_deferred_yieldable(
                     defer.gatherResults(
                         [
                             run_in_background(handle_room_events, evs)
@@ -226,11 +224,11 @@ class FederationSender(object):
                     )
                 )
 
-                yield self.store.update_federation_out_pos("events", next_token)
+                await self.store.update_federation_out_pos("events", next_token)
 
                 if events:
                     now = self.clock.time_msec()
-                    ts = yield self.store.get_received_ts(events[-1].event_id)
+                    ts = await self.store.get_received_ts(events[-1].event_id)
 
                     synapse.metrics.event_processing_lag.labels(
                         "federation_sender"
@@ -254,7 +252,7 @@ class FederationSender(object):
         finally:
             self._is_processing = False
 
-    def _send_pdu(self, pdu, destinations):
+    def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
         # We loop through all destinations to see whether we already have
         # a transaction in progress. If we do, stick it in the pending_pdus
         # table and we'll get back to it later.
@@ -276,11 +274,11 @@ class FederationSender(object):
             self._get_per_destination_queue(destination).send_pdu(pdu, order)
 
     @defer.inlineCallbacks
-    def send_read_receipt(self, receipt):
+    def send_read_receipt(self, receipt: ReadReceipt):
         """Send a RR to any other servers in the room
 
         Args:
-            receipt (synapse.types.ReadReceipt): receipt to be sent
+            receipt: receipt to be sent
         """
 
         # Some background on the rate-limiting going on here.
@@ -343,7 +341,7 @@ class FederationSender(object):
             else:
                 queue.flush_read_receipts_for_room(room_id)
 
-    def _schedule_rr_flush_for_room(self, room_id, n_domains):
+    def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
         # that is going to cause approximately len(domains) transactions, so now back
         # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
         backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
@@ -352,7 +350,7 @@ class FederationSender(object):
         self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
         self._queues_awaiting_rr_flush_by_room[room_id] = set()
 
-    def _flush_rrs_for_room(self, room_id):
+    def _flush_rrs_for_room(self, room_id: str) -> None:
         queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
         logger.debug("Flushing RRs in %s to %s", room_id, queues)
 
@@ -368,14 +366,11 @@ class FederationSender(object):
 
     @preserve_fn  # the caller should not yield on this
     @defer.inlineCallbacks
-    def send_presence(self, states):
+    def send_presence(self, states: List[UserPresenceState]):
         """Send the new presence states to the appropriate destinations.
 
         This actually queues up the presence states ready for sending and
         triggers a background task to process them and send out the transactions.
-
-        Args:
-            states (list(UserPresenceState))
         """
         if not self.hs.config.use_presence:
             # No-op if presence is disabled.
@@ -412,11 +407,10 @@ class FederationSender(object):
         finally:
             self._processing_pending_presence = False
 
-    def send_presence_to_destinations(self, states, destinations):
+    def send_presence_to_destinations(
+        self, states: List[UserPresenceState], destinations: List[str]
+    ) -> None:
         """Send the given presence states to the given destinations.
-
-        Args:
-            states (list[UserPresenceState])
             destinations (list[str])
         """
 
@@ -431,12 +425,9 @@ class FederationSender(object):
 
     @measure_func("txnqueue._process_presence")
     @defer.inlineCallbacks
-    def _process_presence_inner(self, states):
+    def _process_presence_inner(self, states: List[UserPresenceState]):
         """Given a list of states populate self.pending_presence_by_dest and
         poke to send a new transaction to each destination
-
-        Args:
-            states (list(UserPresenceState))
         """
         hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
 
@@ -446,14 +437,20 @@ class FederationSender(object):
                     continue
                 self._get_per_destination_queue(destination).send_presence(states)
 
-    def build_and_send_edu(self, destination, edu_type, content, key=None):
+    def build_and_send_edu(
+        self,
+        destination: str,
+        edu_type: str,
+        content: dict,
+        key: Optional[Hashable] = None,
+    ):
         """Construct an Edu object, and queue it for sending
 
         Args:
-            destination (str): name of server to send to
-            edu_type (str): type of EDU to send
-            content (dict): content of EDU
-            key (Any|None): clobbering key for this edu
+            destination: name of server to send to
+            edu_type: type of EDU to send
+            content: content of EDU
+            key: clobbering key for this edu
         """
         if destination == self.server_name:
             logger.info("Not sending EDU to ourselves")
@@ -468,12 +465,12 @@ class FederationSender(object):
 
         self.send_edu(edu, key)
 
-    def send_edu(self, edu, key):
+    def send_edu(self, edu: Edu, key: Optional[Hashable]):
         """Queue an EDU for sending
 
         Args:
-            edu (Edu): edu to send
-            key (Any|None): clobbering key for this edu
+            edu: edu to send
+            key: clobbering key for this edu
         """
         queue = self._get_per_destination_queue(edu.destination)
         if key:
@@ -481,7 +478,7 @@ class FederationSender(object):
         else:
             queue.send_edu(edu)
 
-    def send_device_messages(self, destination):
+    def send_device_messages(self, destination: str):
         if destination == self.server_name:
             logger.warning("Not sending device update to ourselves")
             return
@@ -501,5 +498,5 @@ class FederationSender(object):
 
         self._get_per_destination_queue(destination).attempt_new_transaction()
 
-    def get_current_token(self):
+    def get_current_token(self) -> int:
         return 0
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 5012aaea35..e13cd20ffa 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -15,11 +15,11 @@
 # limitations under the License.
 import datetime
 import logging
+from typing import Dict, Hashable, Iterable, List, Tuple
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
-
+import synapse.server
 from synapse.api.errors import (
     FederationDeniedError,
     HttpResponseException,
@@ -31,7 +31,7 @@ from synapse.handlers.presence import format_user_presence_state
 from synapse.metrics import sent_transactions_counter
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.presence import UserPresenceState
-from synapse.types import StateMap
+from synapse.types import ReadReceipt
 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
 
 # This is defined in the Matrix spec and enforced by the receiver.
@@ -56,13 +56,18 @@ class PerDestinationQueue(object):
     Manages the per-destination transmission queues.
 
     Args:
-        hs (synapse.HomeServer):
-        transaction_sender (TransactionManager):
-        destination (str): the server_name of the destination that we are managing
+        hs
+        transaction_sender
+        destination: the server_name of the destination that we are managing
             transmission for.
     """
 
-    def __init__(self, hs, transaction_manager, destination):
+    def __init__(
+        self,
+        hs: "synapse.server.HomeServer",
+        transaction_manager: "synapse.federation.sender.TransactionManager",
+        destination: str,
+    ):
         self._server_name = hs.hostname
         self._clock = hs.get_clock()
         self._store = hs.get_datastore()
@@ -72,20 +77,20 @@ class PerDestinationQueue(object):
         self.transmission_loop_running = False
 
         # a list of tuples of (pending pdu, order)
-        self._pending_pdus = []  # type: list[tuple[EventBase, int]]
-        self._pending_edus = []  # type: list[Edu]
+        self._pending_pdus = []  # type: List[Tuple[EventBase, int]]
+        self._pending_edus = []  # type: List[Edu]
 
         # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
         # based on their key (e.g. typing events by room_id)
         # Map of (edu_type, key) -> Edu
-        self._pending_edus_keyed = {}  # type: StateMap[Edu]
+        self._pending_edus_keyed = {}  # type: Dict[Tuple[str, Hashable], Edu]
 
         # Map of user_id -> UserPresenceState of pending presence to be sent to this
         # destination
-        self._pending_presence = {}  # type: dict[str, UserPresenceState]
+        self._pending_presence = {}  # type: Dict[str, UserPresenceState]
 
         # room_id -> receipt_type -> user_id -> receipt_dict
-        self._pending_rrs = {}
+        self._pending_rrs = {}  # type: Dict[str, Dict[str, Dict[str, dict]]]
         self._rrs_pending_flush = False
 
         # stream_id of last successfully sent to-device message.
@@ -95,50 +100,50 @@ class PerDestinationQueue(object):
         # stream_id of last successfully sent device list update.
         self._last_device_list_stream_id = 0
 
-    def __str__(self):
+    def __str__(self) -> str:
         return "PerDestinationQueue[%s]" % self._destination
 
-    def pending_pdu_count(self):
+    def pending_pdu_count(self) -> int:
         return len(self._pending_pdus)
 
-    def pending_edu_count(self):
+    def pending_edu_count(self) -> int:
         return (
             len(self._pending_edus)
             + len(self._pending_presence)
             + len(self._pending_edus_keyed)
         )
 
-    def send_pdu(self, pdu, order):
+    def send_pdu(self, pdu: EventBase, order: int) -> None:
         """Add a PDU to the queue, and start the transmission loop if neccessary
 
         Args:
-            pdu (EventBase): pdu to send
-            order (int):
+            pdu: pdu to send
+            order
         """
         self._pending_pdus.append((pdu, order))
         self.attempt_new_transaction()
 
-    def send_presence(self, states):
+    def send_presence(self, states: Iterable[UserPresenceState]) -> None:
         """Add presence updates to the queue. Start the transmission loop if neccessary.
 
         Args:
-            states (iterable[UserPresenceState]): presence to send
+            states: presence to send
         """
         self._pending_presence.update({state.user_id: state for state in states})
         self.attempt_new_transaction()
 
-    def queue_read_receipt(self, receipt):
+    def queue_read_receipt(self, receipt: ReadReceipt) -> None:
         """Add a RR to the list to be sent. Doesn't start the transmission loop yet
         (see flush_read_receipts_for_room)
 
         Args:
-            receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
+            receipt: receipt to be queued
         """
         self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
             receipt.receipt_type, {}
         )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
 
-    def flush_read_receipts_for_room(self, room_id):
+    def flush_read_receipts_for_room(self, room_id: str) -> None:
         # if we don't have any read-receipts for this room, it may be that we've already
         # sent them out, so we don't need to flush.
         if room_id not in self._pending_rrs:
@@ -146,15 +151,15 @@ class PerDestinationQueue(object):
         self._rrs_pending_flush = True
         self.attempt_new_transaction()
 
-    def send_keyed_edu(self, edu, key):
+    def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
         self._pending_edus_keyed[(edu.edu_type, key)] = edu
         self.attempt_new_transaction()
 
-    def send_edu(self, edu):
+    def send_edu(self, edu) -> None:
         self._pending_edus.append(edu)
         self.attempt_new_transaction()
 
-    def attempt_new_transaction(self):
+    def attempt_new_transaction(self) -> None:
         """Try to start a new transaction to this destination
 
         If there is already a transaction in progress to this destination,
@@ -177,23 +182,22 @@ class PerDestinationQueue(object):
             self._transaction_transmission_loop,
         )
 
-    @defer.inlineCallbacks
-    def _transaction_transmission_loop(self):
-        pending_pdus = []
+    async def _transaction_transmission_loop(self) -> None:
+        pending_pdus = []  # type: List[Tuple[EventBase, int]]
         try:
             self.transmission_loop_running = True
 
             # This will throw if we wouldn't retry. We do this here so we fail
             # quickly, but we will later check this again in the http client,
             # hence why we throw the result away.
-            yield get_retry_limiter(self._destination, self._clock, self._store)
+            await get_retry_limiter(self._destination, self._clock, self._store)
 
             pending_pdus = []
             while True:
                 # We have to keep 2 free slots for presence and rr_edus
                 limit = MAX_EDUS_PER_TRANSACTION - 2
 
-                device_update_edus, dev_list_id = yield self._get_device_update_edus(
+                device_update_edus, dev_list_id = await self._get_device_update_edus(
                     limit
                 )
 
@@ -202,7 +206,7 @@ class PerDestinationQueue(object):
                 (
                     to_device_edus,
                     device_stream_id,
-                ) = yield self._get_to_device_message_edus(limit)
+                ) = await self._get_to_device_message_edus(limit)
 
                 pending_edus = device_update_edus + to_device_edus
 
@@ -269,7 +273,7 @@ class PerDestinationQueue(object):
 
                 # END CRITICAL SECTION
 
-                success = yield self._transaction_manager.send_new_transaction(
+                success = await self._transaction_manager.send_new_transaction(
                     self._destination, pending_pdus, pending_edus
                 )
                 if success:
@@ -280,7 +284,7 @@ class PerDestinationQueue(object):
                     # Remove the acknowledged device messages from the database
                     # Only bother if we actually sent some device messages
                     if to_device_edus:
-                        yield self._store.delete_device_msgs_for_remote(
+                        await self._store.delete_device_msgs_for_remote(
                             self._destination, device_stream_id
                         )
 
@@ -289,7 +293,7 @@ class PerDestinationQueue(object):
                         logger.info(
                             "Marking as sent %r %r", self._destination, dev_list_id
                         )
-                        yield self._store.mark_as_sent_devices_by_remote(
+                        await self._store.mark_as_sent_devices_by_remote(
                             self._destination, dev_list_id
                         )
 
@@ -334,7 +338,7 @@ class PerDestinationQueue(object):
             # We want to be *very* sure we clear this after we stop processing
             self.transmission_loop_running = False
 
-    def _get_rr_edus(self, force_flush):
+    def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
         if not self._pending_rrs:
             return
         if not force_flush and not self._rrs_pending_flush:
@@ -351,17 +355,16 @@ class PerDestinationQueue(object):
         self._rrs_pending_flush = False
         yield edu
 
-    def _pop_pending_edus(self, limit):
+    def _pop_pending_edus(self, limit: int) -> List[Edu]:
         pending_edus = self._pending_edus
         pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
         return pending_edus
 
-    @defer.inlineCallbacks
-    def _get_device_update_edus(self, limit):
+    async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
         last_device_list = self._last_device_list_stream_id
 
         # Retrieve list of new device updates to send to the destination
-        now_stream_id, results = yield self._store.get_device_updates_by_remote(
+        now_stream_id, results = await self._store.get_device_updates_by_remote(
             self._destination, last_device_list, limit=limit
         )
         edus = [
@@ -378,11 +381,10 @@ class PerDestinationQueue(object):
 
         return (edus, now_stream_id)
 
-    @defer.inlineCallbacks
-    def _get_to_device_message_edus(self, limit):
+    async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
         last_device_stream_id = self._last_device_stream_id
         to_device_stream_id = self._store.get_to_device_stream_token()
-        contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
+        contents, stream_id = await self._store.get_new_device_msgs_for_remote(
             self._destination, last_device_stream_id, to_device_stream_id, limit
         )
         edus = [
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 5fed626d5b..3c2a02a3b3 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -13,14 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import List
 
 from canonicaljson import json
 
-from twisted.internet import defer
-
+import synapse.server
 from synapse.api.errors import HttpResponseException
+from synapse.events import EventBase
 from synapse.federation.persistence import TransactionActions
-from synapse.federation.units import Transaction
+from synapse.federation.units import Edu, Transaction
 from synapse.logging.opentracing import (
     extract_text_map,
     set_tag,
@@ -39,7 +40,7 @@ class TransactionManager(object):
     shared between PerDestinationQueue objects
     """
 
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self._server_name = hs.hostname
         self.clock = hs.get_clock()  # nb must be called this for @measure_func
         self._store = hs.get_datastore()
@@ -50,8 +51,9 @@ class TransactionManager(object):
         self._next_txn_id = int(self.clock.time_msec())
 
     @measure_func("_send_new_transaction")
-    @defer.inlineCallbacks
-    def send_new_transaction(self, destination, pending_pdus, pending_edus):
+    async def send_new_transaction(
+        self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
+    ):
 
         # Make a transaction-sending opentracing span. This span follows on from
         # all the edus in that transaction. This needs to be done since there is
@@ -127,7 +129,7 @@ class TransactionManager(object):
                 return data
 
             try:
-                response = yield self._transport_layer.send_transaction(
+                response = await self._transport_layer.send_transaction(
                     transaction, json_data_cb
                 )
                 code = 200
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index b4d743cde7..6b32e0dcbf 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -19,11 +19,15 @@ server protocol.
 
 import logging
 
+import attr
+
+from synapse.types import JsonDict
 from synapse.util.jsonobject import JsonEncodedObject
 
 logger = logging.getLogger(__name__)
 
 
+@attr.s(slots=True)
 class Edu(JsonEncodedObject):
     """ An Edu represents a piece of data sent from one homeserver to another.
 
@@ -32,11 +36,24 @@ class Edu(JsonEncodedObject):
     internal ID or previous references graph.
     """
 
-    valid_keys = ["origin", "destination", "edu_type", "content"]
+    edu_type = attr.ib(type=str)
+    content = attr.ib(type=dict)
+    origin = attr.ib(type=str)
+    destination = attr.ib(type=str)
 
-    required_keys = ["edu_type"]
+    def get_dict(self) -> JsonDict:
+        return {
+            "edu_type": self.edu_type,
+            "content": self.content,
+        }
 
-    internal_keys = ["origin", "destination"]
+    def get_internal_dict(self) -> JsonDict:
+        return {
+            "edu_type": self.edu_type,
+            "content": self.content,
+            "origin": self.origin,
+            "destination": self.destination,
+        }
 
     def get_context(self):
         return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 90347ac23e..40eabfe5d9 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -107,3 +107,5 @@ class HomeServer(object):
         self,
     ) -> synapse.replication.tcp.client.ReplicationClientHandler:
         pass
+    def is_mine_id(self, domain_id: str) -> bool:
+        pass
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 68b9847bd2..2767b0497a 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -111,7 +111,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             retry_timings_res
         )
 
-        self.datastore.get_device_updates_by_remote.return_value = (0, [])
+        self.datastore.get_device_updates_by_remote.return_value = defer.succeed(
+            (0, [])
+        )
 
         def get_received_txn_response(*args):
             return defer.succeed(None)
@@ -144,7 +146,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         self.datastore.get_current_state_deltas.return_value = (0, None)
 
         self.datastore.get_to_device_stream_token = lambda: 0
-        self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
+        self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: defer.succeed(
+            ([], 0)
+        )
         self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
         self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed(
             None
diff --git a/tox.ini b/tox.ini
index ef22368cf1..f8229eba88 100644
--- a/tox.ini
+++ b/tox.ini
@@ -179,6 +179,7 @@ extras = all
 commands = mypy \
             synapse/api \
             synapse/config/ \
+            synapse/federation/sender \
             synapse/federation/transport \
             synapse/handlers/sync.py \
             synapse/handlers/ui_auth \
-- 
cgit 1.4.1


From 799001f2c0b31d72b95a252a3808da25987e1ed3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 7 Feb 2020 15:30:04 +0000
Subject: Add a `make_event_from_dict` method (#6858)

... and use it in places where it's trivial to do so.

This will make it easier to pass room versions into the FrozenEvent
constructors.
---
 changelog.d/6858.misc                          |  1 +
 synapse/events/__init__.py                     | 16 ++++++++++++++--
 synapse/events/builder.py                      | 10 +++-------
 synapse/federation/federation_base.py          |  5 ++---
 tests/api/test_filtering.py                    |  4 ++--
 tests/crypto/test_event_signing.py             |  6 +++---
 tests/events/test_utils.py                     |  9 +++++----
 tests/federation/test_federation_server.py     |  4 ++--
 tests/replication/slave/storage/test_events.py | 12 ++++++++----
 tests/state/test_v2.py                         |  4 ++--
 tests/test_event_auth.py                       | 10 +++++-----
 tests/test_federation.py                       |  6 +++---
 tests/test_state.py                            |  4 ++--
 13 files changed, 52 insertions(+), 39 deletions(-)
 create mode 100644 changelog.d/6858.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6858.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 89d41d82b6..a842661a90 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -16,12 +16,13 @@
 
 import os
 from distutils.util import strtobool
+from typing import Optional, Type
 
 import six
 
 from unpaddedbase64 import encode_base64
 
-from synapse.api.room_versions import EventFormatVersions
+from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
 from synapse.types import JsonDict
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
@@ -407,7 +408,7 @@ class FrozenEventV3(FrozenEventV2):
         return self._event_id
 
 
-def event_type_from_format_version(format_version):
+def event_type_from_format_version(format_version: int) -> Type[EventBase]:
     """Returns the python type to use to construct an Event object for the
     given event format version.
 
@@ -427,3 +428,14 @@ def event_type_from_format_version(format_version):
         return FrozenEventV3
     else:
         raise Exception("No event format %r" % (format_version,))
+
+
+def make_event_from_dict(
+    event_dict: JsonDict,
+    room_version: RoomVersion = RoomVersions.V1,
+    internal_metadata_dict: JsonDict = {},
+    rejected_reason: Optional[str] = None,
+) -> EventBase:
+    """Construct an EventBase from the given event dict"""
+    event_type = event_type_from_format_version(room_version.event_format)
+    return event_type(event_dict, internal_metadata_dict, rejected_reason)
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 8d63ad6dc3..a0c4a40c27 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -28,11 +28,7 @@ from synapse.api.room_versions import (
     RoomVersion,
 )
 from synapse.crypto.event_signing import add_hashes_and_signatures
-from synapse.events import (
-    EventBase,
-    _EventInternalMetadata,
-    event_type_from_format_version,
-)
+from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
 from synapse.types import EventID, JsonDict
 from synapse.util import Clock
 from synapse.util.stringutils import random_string
@@ -256,8 +252,8 @@ def create_local_event_from_event_dict(
     event_dict.setdefault("signatures", {})
 
     add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
-    return event_type_from_format_version(format_version)(
-        event_dict, internal_metadata_dict=internal_metadata_dict
+    return make_event_from_dict(
+        event_dict, room_version, internal_metadata_dict=internal_metadata_dict
     )
 
 
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index ebe8b8e9fe..eea64c1c9f 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -29,7 +29,7 @@ from synapse.api.room_versions import (
     RoomVersion,
 )
 from synapse.crypto.event_signing import check_event_content_hash
-from synapse.events import EventBase, event_type_from_format_version
+from synapse.events import EventBase, make_event_from_dict
 from synapse.events.utils import prune_event
 from synapse.http.servlet import assert_params_in_dict
 from synapse.logging.context import (
@@ -374,8 +374,7 @@ def event_from_pdu_json(
     elif depth > MAX_DEPTH:
         raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
 
-    event = event_type_from_format_version(room_version.event_format)(pdu_json)
-
+    event = make_event_from_dict(pdu_json, room_version)
     event.internal_metadata.outlier = outlier
 
     return event
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 63d8633582..4e67503cf0 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -25,7 +25,7 @@ from twisted.internet import defer
 from synapse.api.constants import EventContentFields
 from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 
 from tests import unittest
 from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver
@@ -38,7 +38,7 @@ def MockEvent(**kwargs):
         kwargs["event_id"] = "fake_event_id"
     if "type" not in kwargs:
         kwargs["type"] = "fake_type"
-    return FrozenEvent(kwargs)
+    return make_event_from_dict(kwargs)
 
 
 class FilteringTestCase(unittest.TestCase):
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
index 6143a50ab2..62f639a18d 100644
--- a/tests/crypto/test_event_signing.py
+++ b/tests/crypto/test_event_signing.py
@@ -19,7 +19,7 @@ from unpaddedbase64 import decode_base64
 
 from synapse.api.room_versions import RoomVersions
 from synapse.crypto.event_signing import add_hashes_and_signatures
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 
 from tests import unittest
 
@@ -54,7 +54,7 @@ class EventSigningTestCase(unittest.TestCase):
             RoomVersions.V1, event_dict, HOSTNAME, self.signing_key
         )
 
-        event = FrozenEvent(event_dict)
+        event = make_event_from_dict(event_dict)
 
         self.assertTrue(hasattr(event, "hashes"))
         self.assertIn("sha256", event.hashes)
@@ -88,7 +88,7 @@ class EventSigningTestCase(unittest.TestCase):
             RoomVersions.V1, event_dict, HOSTNAME, self.signing_key
         )
 
-        event = FrozenEvent(event_dict)
+        event = make_event_from_dict(event_dict)
 
         self.assertTrue(hasattr(event, "hashes"))
         self.assertIn("sha256", event.hashes)
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 2b13980dfd..45d55b9e94 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -13,8 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 from synapse.events.utils import (
     copy_power_levels_contents,
     prune_event,
@@ -30,7 +29,7 @@ def MockEvent(**kwargs):
         kwargs["event_id"] = "fake_event_id"
     if "type" not in kwargs:
         kwargs["type"] = "fake_type"
-    return FrozenEvent(kwargs)
+    return make_event_from_dict(kwargs)
 
 
 class PruneEventTestCase(unittest.TestCase):
@@ -38,7 +37,9 @@ class PruneEventTestCase(unittest.TestCase):
     `matchdict` when it is redacted. """
 
     def run_test(self, evdict, matchdict):
-        self.assertEquals(prune_event(FrozenEvent(evdict)).get_dict(), matchdict)
+        self.assertEquals(
+            prune_event(make_event_from_dict(evdict)).get_dict(), matchdict
+        )
 
     def test_minimal(self):
         self.run_test(
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 1ec8c40901..e7d8699040 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 import logging
 
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 from synapse.federation.federation_server import server_matches_acl_event
 from synapse.rest import admin
 from synapse.rest.client.v1 import login, room
@@ -105,7 +105,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase):
 
 
 def _create_acl_event(content):
-    return FrozenEvent(
+    return make_event_from_dict(
         {
             "room_id": "!a:b",
             "event_id": "$a:b",
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index b1b037006d..d31210fbe4 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -15,7 +15,7 @@ import logging
 
 from canonicaljson import encode_canonical_json
 
-from synapse.events import FrozenEvent, _EventInternalMetadata
+from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict
 from synapse.events.snapshot import EventContext
 from synapse.handlers.room import RoomEventSource
 from synapse.replication.slave.storage.events import SlavedEventStore
@@ -90,7 +90,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         msg_dict["content"] = {}
         msg_dict["unsigned"]["redacted_by"] = redaction.event_id
         msg_dict["unsigned"]["redacted_because"] = redaction
-        redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
+        redacted = make_event_from_dict(
+            msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
+        )
         self.check("get_event", [msg.event_id], redacted)
 
     def test_backfilled_redactions(self):
@@ -110,7 +112,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         msg_dict["content"] = {}
         msg_dict["unsigned"]["redacted_by"] = redaction.event_id
         msg_dict["unsigned"]["redacted_because"] = redaction
-        redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
+        redacted = make_event_from_dict(
+            msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
+        )
         self.check("get_event", [msg.event_id], redacted)
 
     def test_invites(self):
@@ -345,7 +349,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         if redacts is not None:
             event_dict["redacts"] = redacts
 
-        event = FrozenEvent(event_dict, internal_metadata_dict=internal)
+        event = make_event_from_dict(event_dict, internal_metadata_dict=internal)
 
         self.event_id += 1
 
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 0f341d3ac3..5bafad9f19 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -22,7 +22,7 @@ import attr
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.api.room_versions import RoomVersions
 from synapse.event_auth import auth_types_for_event
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store
 from synapse.types import EventID
 
@@ -89,7 +89,7 @@ class FakeEvent(object):
         if self.state_key is not None:
             event_dict["state_key"] = self.state_key
 
-        return FrozenEvent(event_dict)
+        return make_event_from_dict(event_dict)
 
 
 # All graphs start with this set of events
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index ca20b085a2..bfa5d6f510 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -18,7 +18,7 @@ import unittest
 from synapse import event_auth
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 
 
 class EventAuthTestCase(unittest.TestCase):
@@ -94,7 +94,7 @@ TEST_ROOM_ID = "!test:room"
 
 
 def _create_event(user_id):
-    return FrozenEvent(
+    return make_event_from_dict(
         {
             "room_id": TEST_ROOM_ID,
             "event_id": _get_event_id(),
@@ -106,7 +106,7 @@ def _create_event(user_id):
 
 
 def _join_event(user_id):
-    return FrozenEvent(
+    return make_event_from_dict(
         {
             "room_id": TEST_ROOM_ID,
             "event_id": _get_event_id(),
@@ -119,7 +119,7 @@ def _join_event(user_id):
 
 
 def _power_levels_event(sender, content):
-    return FrozenEvent(
+    return make_event_from_dict(
         {
             "room_id": TEST_ROOM_ID,
             "event_id": _get_event_id(),
@@ -132,7 +132,7 @@ def _power_levels_event(sender, content):
 
 
 def _random_state_event(sender):
-    return FrozenEvent(
+    return make_event_from_dict(
         {
             "room_id": TEST_ROOM_ID,
             "event_id": _get_event_id(),
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 68684460c6..9b5cf562f3 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -2,7 +2,7 @@ from mock import Mock
 
 from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed
 
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 from synapse.logging.context import LoggingContext
 from synapse.types import Requester, UserID
 from synapse.util import Clock
@@ -43,7 +43,7 @@ class MessageAcceptTests(unittest.TestCase):
             )
         )[0]
 
-        join_event = FrozenEvent(
+        join_event = make_event_from_dict(
             {
                 "room_id": self.room_id,
                 "sender": "@baduser:test.serv",
@@ -105,7 +105,7 @@ class MessageAcceptTests(unittest.TestCase):
         )[0]
 
         # Now lie about an event
-        lying_event = FrozenEvent(
+        lying_event = make_event_from_dict(
             {
                 "room_id": self.room_id,
                 "sender": "@baduser:test.serv",
diff --git a/tests/test_state.py b/tests/test_state.py
index 1e4449fa1c..d1578fe581 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -20,7 +20,7 @@ from twisted.internet import defer
 from synapse.api.auth import Auth
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.room_versions import RoomVersions
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
 from synapse.events.snapshot import EventContext
 from synapse.state import StateHandler, StateResolutionHandler
 
@@ -66,7 +66,7 @@ def create_event(
 
     d.update(kwargs)
 
-    event = FrozenEvent(d)
+    event = make_event_from_dict(d)
 
     return event
 
-- 
cgit 1.4.1


From e1d858984d71b6edf56e1024f1475224bfa49054 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 7 Feb 2020 15:30:26 +0000
Subject: Remove unused `get_room_stats_state` method. (#6869)

---
 changelog.d/6869.misc                     |  1 +
 synapse/storage/data_stores/main/stats.py | 25 -------------------------
 2 files changed, 1 insertion(+), 25 deletions(-)
 create mode 100644 changelog.d/6869.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc
new file mode 100644
index 0000000000..14f88f9bb7
--- /dev/null
+++ b/changelog.d/6869.misc
@@ -0,0 +1 @@
+Remove unused `get_room_stats_state` method.
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 7af1495e47..380c1ec7da 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -271,31 +271,6 @@ class StatsStore(StateDeltasStore):
 
         return slice_list
 
-    def get_room_stats_state(self, room_id):
-        """
-        Returns the current room_stats_state for a room.
-
-        Args:
-            room_id (str): The ID of the room to return state for.
-
-        Returns (dict):
-            Dictionary containing these keys:
-                "name", "topic", "canonical_alias", "avatar", "join_rules",
-                "history_visibility"
-        """
-        return self.db.simple_select_one(
-            "room_stats_state",
-            {"room_id": room_id},
-            retcols=(
-                "name",
-                "topic",
-                "canonical_alias",
-                "avatar",
-                "join_rules",
-                "history_visibility",
-            ),
-        )
-
     @cached()
     def get_earliest_token_for_stats(self, stats_type, id):
         """
-- 
cgit 1.4.1


From 21db35f77e4718cfe6d6b292baada9dd02ef8280 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 7 Feb 2020 15:45:39 +0000
Subject: Add support for putting fed user query API on workers (#6873)

---
 changelog.d/6873.feature                |  1 +
 docs/workers.md                         |  1 +
 synapse/app/federation_reader.py        |  2 ++
 synapse/federation/federation_server.py |  7 +++++--
 synapse/handlers/device.py              | 35 +++++++++++++++------------------
 5 files changed, 25 insertions(+), 21 deletions(-)
 create mode 100644 changelog.d/6873.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature
new file mode 100644
index 0000000000..bbedf8f7f0
--- /dev/null
+++ b/changelog.d/6873.feature
@@ -0,0 +1 @@
+Add ability to route federation user device queries to workers.
diff --git a/docs/workers.md b/docs/workers.md
index 82442d6a0a..6f7ec58780 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -176,6 +176,7 @@ endpoints matching the following regular expressions:
     ^/_matrix/federation/v1/query_auth/
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
+    ^/_matrix/federation/v1/user/devices/
     ^/_matrix/federation/v1/send/
     ^/_matrix/federation/v1/get_groups_publicised$
     ^/_matrix/key/v2/query
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 5e17ef1396..d055d11b23 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -33,6 +33,7 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
+from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
 from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.groups import SlavedGroupServerStore
@@ -68,6 +69,7 @@ class FederationReaderSlavedStore(
     SlavedKeyStore,
     SlavedRegistrationStore,
     SlavedGroupServerStore,
+    SlavedDeviceStore,
     RoomStore,
     DirectoryStore,
     SlavedTransactionStore,
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index a6c966a393..7f9da49326 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -81,6 +81,8 @@ class FederationServer(FederationBase):
         self.handler = hs.get_handlers().federation_handler
         self.state = hs.get_state_handler()
 
+        self.device_handler = hs.get_device_handler()
+
         self._server_linearizer = Linearizer("fed_server")
         self._transaction_linearizer = Linearizer("fed_txn_handler")
 
@@ -523,8 +525,9 @@ class FederationServer(FederationBase):
     def on_query_client_keys(self, origin, content):
         return self.on_query_request("client_keys", content)
 
-    def on_query_user_devices(self, origin, user_id):
-        return self.on_query_request("user_devices", user_id)
+    async def on_query_user_devices(self, origin: str, user_id: str):
+        keys = await self.device_handler.on_federation_query_user_devices(user_id)
+        return 200, keys
 
     @trace
     async def on_claim_client_keys(self, origin, content):
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index a9bd431486..6d8e48ed39 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -225,6 +225,22 @@ class DeviceWorkerHandler(BaseHandler):
 
         return result
 
+    @defer.inlineCallbacks
+    def on_federation_query_user_devices(self, user_id):
+        stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
+        master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
+        self_signing_key = yield self.store.get_e2e_cross_signing_key(
+            user_id, "self_signing"
+        )
+
+        return {
+            "user_id": user_id,
+            "stream_id": stream_id,
+            "devices": devices,
+            "master_key": master_key,
+            "self_signing_key": self_signing_key,
+        }
+
 
 class DeviceHandler(DeviceWorkerHandler):
     def __init__(self, hs):
@@ -239,9 +255,6 @@ class DeviceHandler(DeviceWorkerHandler):
         federation_registry.register_edu_handler(
             "m.device_list_update", self.device_list_updater.incoming_device_list_update
         )
-        federation_registry.register_query_handler(
-            "user_devices", self.on_federation_query_user_devices
-        )
 
         hs.get_distributor().observe("user_left_room", self.user_left_room)
 
@@ -456,22 +469,6 @@ class DeviceHandler(DeviceWorkerHandler):
 
         self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
 
-    @defer.inlineCallbacks
-    def on_federation_query_user_devices(self, user_id):
-        stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
-        master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
-        self_signing_key = yield self.store.get_e2e_cross_signing_key(
-            user_id, "self_signing"
-        )
-
-        return {
-            "user_id": user_id,
-            "stream_id": stream_id,
-            "devices": devices,
-            "master_key": master_key,
-            "self_signing_key": self_signing_key,
-        }
-
     @defer.inlineCallbacks
     def user_left_room(self, user, room_id):
         user_id = user.to_string()
-- 
cgit 1.4.1


From fe73f0d533dcdcf11f069d89ebbff2ce88d16bb3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 10 Feb 2020 00:41:20 +0000
Subject: Update setuptools for python 3.5 tests (#6880)

Workaround for jaraco/zipp#40
---
 .buildkite/scripts/test_old_deps.sh | 18 ++++++++++++++++++
 changelog.d/6880.misc               |  1 +
 2 files changed, 19 insertions(+)
 create mode 100755 .buildkite/scripts/test_old_deps.sh
 create mode 100644 changelog.d/6880.misc

(limited to 'changelog.d')

diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh
new file mode 100755
index 0000000000..dfd71b2511
--- /dev/null
+++ b/.buildkite/scripts/test_old_deps.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# this script is run by buildkite in a plain `xenial` container; it installs the
+# minimal requirements for tox and hands over to the py35-old tox environment.
+
+set -ex
+
+apt-get update
+apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
+
+# workaround for https://github.com/jaraco/zipp/issues/40
+python3.5 -m pip install 'setuptools>=34.4.0'
+
+python3.5 -m pip install tox
+
+export LANG="C.UTF-8"
+
+exec tox -e py35-old,combine
diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc
new file mode 100644
index 0000000000..8344a6ed1e
--- /dev/null
+++ b/changelog.d/6880.misc
@@ -0,0 +1 @@
+Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library.
-- 
cgit 1.4.1


From 8e64c5a24c26a733c0cfd3e997ea4079ae457096 Mon Sep 17 00:00:00 2001
From: Matthew Hodgson 
Date: Mon, 10 Feb 2020 09:36:23 +0000
Subject: filter out m.room.aliases from the CS API until a better solution is
 specced (#6878)

We're in the middle of properly mitigating spam caused by malicious aliases being added to a room. However, until this work fully lands, we temporarily filter out all m.room.aliases events from /sync and /messages on the CS API, to remove abusive aliases. This is considered acceptable as m.room.aliases events were never a reliable record of the given alias->id mapping and were purely informational, and in their current state do more harm than good.
---
 changelog.d/6878.feature | 1 +
 synapse/visibility.py    | 7 +++++++
 2 files changed, 8 insertions(+)
 create mode 100644 changelog.d/6878.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature
new file mode 100644
index 0000000000..af3e958a43
--- /dev/null
+++ b/changelog.d/6878.feature
@@ -0,0 +1 @@
+Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced.
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 100dc47a8a..d0abd8f04f 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -122,6 +122,13 @@ def filter_events_for_client(
         if not event.is_state() and event.sender in ignore_list:
             return None
 
+        # Until MSC2261 has landed we can't redact malicious alias events, so for
+        # now we temporarily filter out m.room.aliases entirely to mitigate
+        # abuse, while we spec a better solution to advertising aliases
+        # on rooms.
+        if event.type == EventTypes.Aliases:
+            return None
+
         # Don't try to apply the room's retention policy if the event is a state event, as
         # MSC1763 states that retention is only considered for non-state events.
         if apply_retention_policies and not event.is_state():
-- 
cgit 1.4.1


From 3de57e706209d98a331265e6d5a51bfd24939a3b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 10 Feb 2020 09:56:42 +0000
Subject: 1.10.0rc3

---
 CHANGES.md               | 15 +++++++++++++++
 changelog.d/6878.feature |  1 -
 changelog.d/6880.misc    |  1 -
 synapse/__init__.py      |  2 +-
 4 files changed, 16 insertions(+), 3 deletions(-)
 delete mode 100644 changelog.d/6878.feature
 delete mode 100644 changelog.d/6880.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index c2aa735908..4a81a04627 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,18 @@
+Synapse 1.10.0rc3 (2020-02-10)
+==============================
+
+Features
+--------
+
+- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878))
+
+
+Internal Changes
+----------------
+
+- Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. ([\#6880](https://github.com/matrix-org/synapse/issues/6880))
+
+
 Synapse 1.10.0rc2 (2020-02-06)
 ==============================
 
diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature
deleted file mode 100644
index af3e958a43..0000000000
--- a/changelog.d/6878.feature
+++ /dev/null
@@ -1 +0,0 @@
-Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced.
diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc
deleted file mode 100644
index 8344a6ed1e..0000000000
--- a/changelog.d/6880.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 4f1859bd57..36c0cf557a 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.10.0rc2"
+__version__ = "1.10.0rc3"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 3a3118f4ecb631dec3cc44a928a3666b734f5dcb Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Mon, 10 Feb 2020 11:47:18 -0500
Subject: Add an additional test to the SyTest blacklist for worker mode.
 (#6883)

---
 .buildkite/worker-blacklist | 2 ++
 changelog.d/6883.misc       | 1 +
 2 files changed, 3 insertions(+)
 create mode 100644 changelog.d/6883.misc

(limited to 'changelog.d')

diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist
index 158ab79154..094b6c94da 100644
--- a/.buildkite/worker-blacklist
+++ b/.buildkite/worker-blacklist
@@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update
 
 # this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
 Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
+
+Can get rooms/{roomId}/members at a given point
diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc
new file mode 100644
index 0000000000..e0837d7987
--- /dev/null
+++ b/changelog.d/6883.misc
@@ -0,0 +1 @@
+Add an additional entry to the SyTest blacklist for worker mode.
-- 
cgit 1.4.1


From a92e703ab9d78aecc062e797f941bb7e206650a5 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Mon, 10 Feb 2020 16:35:26 -0500
Subject: Reject device display names that are too long (#6882)

* Reject device display names that are too long.

Too long is currently defined as 100 characters in length.

* Add a regression test for rejecting a too long device display name.
---
 changelog.d/6882.misc         |  1 +
 synapse/handlers/device.py    | 14 +++++++++++++-
 tests/handlers/test_device.py | 18 ++++++++++++++++++
 3 files changed, 32 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6882.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc
new file mode 100644
index 0000000000..e8382e36ae
--- /dev/null
+++ b/changelog.d/6882.misc
@@ -0,0 +1 @@
+Reject device display names over 100 characters in length.
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 6d8e48ed39..50cea3f378 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -26,6 +26,7 @@ from synapse.api.errors import (
     FederationDeniedError,
     HttpResponseException,
     RequestSendFailed,
+    SynapseError,
 )
 from synapse.logging.opentracing import log_kv, set_tag, trace
 from synapse.types import RoomStreamToken, get_domain_from_id
@@ -39,6 +40,8 @@ from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
+MAX_DEVICE_DISPLAY_NAME_LEN = 100
+
 
 class DeviceWorkerHandler(BaseHandler):
     def __init__(self, hs):
@@ -404,9 +407,18 @@ class DeviceHandler(DeviceWorkerHandler):
             defer.Deferred:
         """
 
+        # Reject a new displayname which is too long.
+        new_display_name = content.get("display_name")
+        if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN:
+            raise SynapseError(
+                400,
+                "Device display name is too long (max %i)"
+                % (MAX_DEVICE_DISPLAY_NAME_LEN,),
+            )
+
         try:
             yield self.store.update_device(
-                user_id, device_id, new_display_name=content.get("display_name")
+                user_id, device_id, new_display_name=new_display_name
             )
             yield self.notify_device_update(user_id, [device_id])
         except errors.StoreError as e:
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index a3aa0a1cf2..62b47f6574 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -160,6 +160,24 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         res = self.get_success(self.handler.get_device(user1, "abc"))
         self.assertEqual(res["display_name"], "new display")
 
+    def test_update_device_too_long_display_name(self):
+        """Update a device with a display name that is invalid (too long)."""
+        self._record_users()
+
+        # Request to update a device display name with a new value that is longer than allowed.
+        update = {
+            "display_name": "a"
+            * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
+        }
+        self.get_failure(
+            self.handler.update_device(user1, "abc", update),
+            synapse.api.errors.SynapseError,
+        )
+
+        # Ensure the display name was not updated.
+        res = self.get_success(self.handler.get_device(user1, "abc"))
+        self.assertEqual(res["display_name"], "display 2")
+
     def test_update_unknown_device(self):
         update = {"display_name": "new_display"}
         res = self.handler.update_device("user_id", "unknown_device_id", update)
-- 
cgit 1.4.1


From 705c978366146e85280af0dcf216e78d521352a3 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 11 Feb 2020 17:38:27 +0000
Subject: Changelog

---
 changelog.d/6891.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6891.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc
new file mode 100644
index 0000000000..f3afbbccda
--- /dev/null
+++ b/changelog.d/6891.doc
@@ -0,0 +1 @@
+Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support.
-- 
cgit 1.4.1


From a0c4769f1ae6d6aaf1a548b869f857836efbfb18 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 11 Feb 2020 17:56:42 +0000
Subject: Update the changelog file

---
 changelog.d/6891.doc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc
index f3afbbccda..2f46c385b7 100644
--- a/changelog.d/6891.doc
+++ b/changelog.d/6891.doc
@@ -1 +1 @@
-Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support.
+Spell out that the last event sent to a room won't be deleted by a purge. 
-- 
cgit 1.4.1


From ba547ec3a94b17cfb634758deb4cdbc98fc840a9 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Wed, 12 Feb 2020 07:02:19 -0500
Subject: Use BSD-compatible in-place editing for sed. (#6887)

---
 changelog.d/6887.misc      | 1 +
 scripts-dev/config-lint.sh | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6887.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6887.misc b/changelog.d/6887.misc
new file mode 100644
index 0000000000..b351d47c7b
--- /dev/null
+++ b/changelog.d/6887.misc
@@ -0,0 +1 @@
+Fix the use of sed in the linting scripts when using BSD sed.
diff --git a/scripts-dev/config-lint.sh b/scripts-dev/config-lint.sh
index 677a854c85..189ca66535 100755
--- a/scripts-dev/config-lint.sh
+++ b/scripts-dev/config-lint.sh
@@ -3,7 +3,8 @@
 # Exits with 0 if there are no problems, or another code otherwise.
 
 # Fix non-lowercase true/false values
-sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
+sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
+rm docs/sample_config.yaml.bak
 
 # Check if anything changed
 git diff --exit-code docs/sample_config.yaml
-- 
cgit 1.4.1


From d8994942f28f5028e560f6aba52512fae3ca1a6a Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 12 Feb 2020 18:14:10 +0000
Subject: Return a 404 for admin api user lookup if user not found (#6901)

---
 changelog.d/6901.misc         |  1 +
 synapse/rest/admin/users.py   |  5 ++++-
 tests/rest/admin/test_user.py | 16 ++++++++++++++++
 3 files changed, 21 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6901.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6901.misc b/changelog.d/6901.misc
new file mode 100644
index 0000000000..b2f12bbe86
--- /dev/null
+++ b/changelog.d/6901.misc
@@ -0,0 +1 @@
+Return a 404 instead of 200 for querying information of a non-existant user through the admin API.
\ No newline at end of file
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index e75c5f1370..2107b5dc56 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -21,7 +21,7 @@ from six import text_type
 from six.moves import http_client
 
 from synapse.api.constants import UserTypes
-from synapse.api.errors import Codes, SynapseError
+from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
@@ -152,6 +152,9 @@ class UserRestServletV2(RestServlet):
 
         ret = await self.admin_handler.get_user(target_user)
 
+        if not ret:
+            raise NotFoundError("User not found")
+
         return 200, ret
 
     async def on_PUT(self, request, user_id):
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 3b5169b38d..490ce8f55d 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -401,6 +401,22 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("You are not a server admin", channel.json_body["error"])
 
+    def test_user_does_not_exist(self):
+        """
+        Tests that a lookup for a user that does not exist returns a 404
+        """
+        self.hs.config.registration_shared_secret = None
+
+        request, channel = self.make_request(
+            "GET",
+            "/_synapse/admin/v2/users/@unknown_person:test",
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, channel.code, msg=channel.json_body)
+        self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
+
     def test_requester_is_admin(self):
         """
         If the user is a server admin, a new user is created.
-- 
cgit 1.4.1


From e88a5dd108f607c9ec99356a2601147e41a20533 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 12 Feb 2020 20:15:41 +0000
Subject: Changelog

---
 changelog.d/6905.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6905.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc
new file mode 100644
index 0000000000..ca10b38301
--- /dev/null
+++ b/changelog.d/6905.doc
@@ -0,0 +1 @@
+Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins.
-- 
cgit 1.4.1


From dc3f9987061e24bec77ad9e26a7679a1907cee4f Mon Sep 17 00:00:00 2001
From: Aaron Raimist 
Date: Thu, 13 Feb 2020 06:02:32 -0600
Subject: Remove m.lazy_load_members from unstable features since it is in CS
 r0.5.0 (#6877)

Fixes #5528
---
 changelog.d/6877.removal        | 1 +
 synapse/rest/client/versions.py | 1 -
 2 files changed, 1 insertion(+), 1 deletion(-)
 create mode 100644 changelog.d/6877.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6877.removal b/changelog.d/6877.removal
new file mode 100644
index 0000000000..9545e31fbe
--- /dev/null
+++ b/changelog.d/6877.removal
@@ -0,0 +1 @@
+Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0.
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 3d0fefb4df..3eeb3607f4 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -52,7 +52,6 @@ class VersionsRestServlet(RestServlet):
                 ],
                 # as per MSC1497:
                 "unstable_features": {
-                    "m.lazy_load_members": True,
                     # as per MSC2190, as amended by MSC2264
                     # to be removed in r0.6.0
                     "m.id_access_token": True,
-- 
cgit 1.4.1


From 361de49c90fd1f35adc4a6bca8206e50e7f15454 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 13 Feb 2020 07:40:57 -0500
Subject: Add documentation for the spam checker module (#6906)

Add documentation for the spam checker.
---
 changelog.d/6906.doc |  1 +
 docs/spam_checker.md | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 86 insertions(+)
 create mode 100644 changelog.d/6906.doc
 create mode 100644 docs/spam_checker.md

(limited to 'changelog.d')

diff --git a/changelog.d/6906.doc b/changelog.d/6906.doc
new file mode 100644
index 0000000000..053b2436ae
--- /dev/null
+++ b/changelog.d/6906.doc
@@ -0,0 +1 @@
+Add documentation for the spam checker.
diff --git a/docs/spam_checker.md b/docs/spam_checker.md
new file mode 100644
index 0000000000..97ff17f952
--- /dev/null
+++ b/docs/spam_checker.md
@@ -0,0 +1,85 @@
+# Handling spam in Synapse
+
+Synapse has support to customize spam checking behavior. It can plug into a
+variety of events and affect how they are presented to users on your homeserver.
+
+The spam checking behavior is implemented as a Python class, which must be
+able to be imported by the running Synapse.
+
+## Python spam checker class
+
+The Python class is instantiated with two objects:
+
+* Any configuration (see below).
+* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
+
+It then implements methods which return a boolean to alter behavior in Synapse.
+
+There's a generic method for checking every event (`check_event_for_spam`), as
+well as some specific methods:
+
+* `user_may_invite`
+* `user_may_create_room`
+* `user_may_create_room_alias`
+* `user_may_publish_room`
+
+The details of the each of these methods (as well as their inputs and outputs)
+are documented in the `synapse.events.spamcheck.SpamChecker` class.
+
+The `SpamCheckerApi` class provides a way for the custom spam checker class to
+call back into the homeserver internals. It currently implements the following
+methods:
+
+* `get_state_events_in_room`
+
+### Example
+
+```python
+class ExampleSpamChecker:
+    def __init__(self, config, api):
+        self.config = config
+        self.api = api
+
+    def check_event_for_spam(self, foo):
+        return False  # allow all events
+
+    def user_may_invite(self, inviter_userid, invitee_userid, room_id):
+        return True  # allow all invites
+
+    def user_may_create_room(self, userid):
+        return True  # allow all room creations
+
+    def user_may_create_room_alias(self, userid, room_alias):
+        return True  # allow all room aliases
+
+    def user_may_publish_room(self, userid, room_id):
+        return True  # allow publishing of all rooms
+```
+
+## Configuration
+
+Modify the `spam_checker` section of your `homeserver.yaml` in the following
+manner:
+
+`module` should point to the fully qualified Python class that implements your
+custom logic, e.g. `my_module.ExampleSpamChecker`.
+
+`config` is a dictionary that gets passed to the spam checker class.
+
+### Example
+
+This section might look like:
+
+```yaml
+spam_checker:
+  module: my_module.ExampleSpamChecker
+  config:
+    # Enable or disable a specific option in ExampleSpamChecker.
+    my_custom_option: true
+```
+
+## Examples
+
+The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
+example using the Synapse spam checking API, including a bot for dynamic
+configuration.
-- 
cgit 1.4.1


From f3f142259e5c882598b7426f36c26c4aca03c5d6 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 13 Feb 2020 16:10:16 +0000
Subject: Changelog

---
 changelog.d/6907.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6907.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6907.doc b/changelog.d/6907.doc
new file mode 100644
index 0000000000..be0e698af8
--- /dev/null
+++ b/changelog.d/6907.doc
@@ -0,0 +1 @@
+Update Synapse's documentation to warn about the deprecation of ACME v1.
-- 
cgit 1.4.1


From df1c98c22a9ada46a2a103184aab3b5e08539b19 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 13 Feb 2020 16:12:20 +0000
Subject: Update changelog for #6905 to group it with upcoming PRs

---
 changelog.d/6905.doc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc
index ca10b38301..be0e698af8 100644
--- a/changelog.d/6905.doc
+++ b/changelog.d/6905.doc
@@ -1 +1 @@
-Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins.
+Update Synapse's documentation to warn about the deprecation of ACME v1.
-- 
cgit 1.4.1


From 79460ce9c987195afeb9453a33386240ffc0af3f Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Thu, 13 Feb 2020 17:24:14 +0000
Subject: Changelog

---
 changelog.d/6909.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6909.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6909.doc b/changelog.d/6909.doc
new file mode 100644
index 0000000000..be0e698af8
--- /dev/null
+++ b/changelog.d/6909.doc
@@ -0,0 +1 @@
+Update Synapse's documentation to warn about the deprecation of ACME v1.
-- 
cgit 1.4.1


From 49f877d32efc79cb40b2766cb052cf35bad31de5 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 14 Feb 2020 07:17:54 -0500
Subject: Filter the results of user directory searching via the spam checker
 (#6888)

Add a method to the spam checker to filter the user directory results.
---
 changelog.d/6888.feature              |  1 +
 docs/spam_checker.md                  |  3 ++
 synapse/events/spamcheck.py           | 27 ++++++++++
 synapse/handlers/user_directory.py    | 14 +++++-
 tests/handlers/test_user_directory.py | 92 +++++++++++++++++++++++++++++++++++
 5 files changed, 135 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6888.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6888.feature b/changelog.d/6888.feature
new file mode 100644
index 0000000000..1b7ac0c823
--- /dev/null
+++ b/changelog.d/6888.feature
@@ -0,0 +1 @@
+The result of a user directory search can now be filtered via the spam checker.
diff --git a/docs/spam_checker.md b/docs/spam_checker.md
index 97ff17f952..5b5f5000b7 100644
--- a/docs/spam_checker.md
+++ b/docs/spam_checker.md
@@ -54,6 +54,9 @@ class ExampleSpamChecker:
 
     def user_may_publish_room(self, userid, room_id):
         return True  # allow publishing of all rooms
+
+    def check_username_for_spam(self, user_profile):
+        return False  # allow all usernames
 ```
 
 ## Configuration
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 5a907718d6..0a13fca9a4 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 
 import inspect
+from typing import Dict
 
 from synapse.spam_checker_api import SpamCheckerApi
 
@@ -125,3 +126,29 @@ class SpamChecker(object):
             return True
 
         return self.spam_checker.user_may_publish_room(userid, room_id)
+
+    def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool:
+        """Checks if a user ID or display name are considered "spammy" by this server.
+
+        If the server considers a username spammy, then it will not be included in
+        user directory results.
+
+        Args:
+            user_profile: The user information to check, it contains the keys:
+                * user_id
+                * display_name
+                * avatar_url
+
+        Returns:
+            True if the user is spammy.
+        """
+        if self.spam_checker is None:
+            return False
+
+        # For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering.
+        checker = getattr(self.spam_checker, "check_username_for_spam", None)
+        if not checker:
+            return False
+        # Make a copy of the user profile object to ensure the spam checker
+        # cannot modify it.
+        return checker(user_profile.copy())
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 81aa58dc8c..722760c59d 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -52,6 +52,7 @@ class UserDirectoryHandler(StateDeltasHandler):
         self.is_mine_id = hs.is_mine_id
         self.update_user_directory = hs.config.update_user_directory
         self.search_all_users = hs.config.user_directory_search_all_users
+        self.spam_checker = hs.get_spam_checker()
         # The current position in the current_state_delta stream
         self.pos = None
 
@@ -65,7 +66,7 @@ class UserDirectoryHandler(StateDeltasHandler):
             # we start populating the user directory
             self.clock.call_later(0, self.notify_new_event)
 
-    def search_users(self, user_id, search_term, limit):
+    async def search_users(self, user_id, search_term, limit):
         """Searches for users in directory
 
         Returns:
@@ -82,7 +83,16 @@ class UserDirectoryHandler(StateDeltasHandler):
                     ]
                 }
         """
-        return self.store.search_user_dir(user_id, search_term, limit)
+        results = await self.store.search_user_dir(user_id, search_term, limit)
+
+        # Remove any spammy users from the results.
+        results["results"] = [
+            user
+            for user in results["results"]
+            if not self.spam_checker.check_username_for_spam(user)
+        ]
+
+        return results
 
     def notify_new_event(self):
         """Called when there may be more deltas to process
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 26071059d2..0a4765fff4 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -147,6 +147,98 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         s = self.get_success(self.handler.search_users(u1, "user3", 10))
         self.assertEqual(len(s["results"]), 0)
 
+    def test_spam_checker(self):
+        """
+        A user which fails to the spam checks will not appear in search results.
+        """
+        u1 = self.register_user("user1", "pass")
+        u1_token = self.login(u1, "pass")
+        u2 = self.register_user("user2", "pass")
+        u2_token = self.login(u2, "pass")
+
+        # We do not add users to the directory until they join a room.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 0)
+
+        room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
+        self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
+        self.helper.join(room, user=u2, tok=u2_token)
+
+        # Check we have populated the database correctly.
+        shares_private = self.get_users_who_share_private_rooms()
+        public_users = self.get_users_in_public_rooms()
+
+        self.assertEqual(
+            self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
+        )
+        self.assertEqual(public_users, [])
+
+        # We get one search result when searching for user2 by user1.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 1)
+
+        # Configure a spam checker that does not filter any users.
+        spam_checker = self.hs.get_spam_checker()
+
+        class AllowAll(object):
+            def check_username_for_spam(self, user_profile):
+                # Allow all users.
+                return False
+
+        spam_checker.spam_checker = AllowAll()
+
+        # The results do not change:
+        # We get one search result when searching for user2 by user1.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 1)
+
+        # Configure a spam checker that filters all users.
+        class BlockAll(object):
+            def check_username_for_spam(self, user_profile):
+                # All users are spammy.
+                return True
+
+        spam_checker.spam_checker = BlockAll()
+
+        # User1 now gets no search results for any of the other users.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 0)
+
+    def test_legacy_spam_checker(self):
+        """
+        A spam checker without the expected method should be ignored.
+        """
+        u1 = self.register_user("user1", "pass")
+        u1_token = self.login(u1, "pass")
+        u2 = self.register_user("user2", "pass")
+        u2_token = self.login(u2, "pass")
+
+        # We do not add users to the directory until they join a room.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 0)
+
+        room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
+        self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
+        self.helper.join(room, user=u2, tok=u2_token)
+
+        # Check we have populated the database correctly.
+        shares_private = self.get_users_who_share_private_rooms()
+        public_users = self.get_users_in_public_rooms()
+
+        self.assertEqual(
+            self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
+        )
+        self.assertEqual(public_users, [])
+
+        # Configure a spam checker.
+        spam_checker = self.hs.get_spam_checker()
+        # The spam checker doesn't need any methods, so create a bare object.
+        spam_checker.spam_checker = object()
+
+        # We get one search result when searching for user2 by user1.
+        s = self.get_success(self.handler.search_users(u1, "user2", 10))
+        self.assertEqual(len(s["results"]), 1)
+
     def _compress_shared(self, shared):
         """
         Compress a list of users who share rooms dicts to a list of tuples.
-- 
cgit 1.4.1


From 02e89021f58f931068ab0337de039181cc7f6569 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 14 Feb 2020 09:05:43 -0500
Subject: Convert the directory handler tests to use HomeserverTestCase (#6919)

Convert directory handler tests to use HomeserverTestCase.
---
 changelog.d/6919.misc            |  1 +
 tests/handlers/test_directory.py | 41 +++++++++++++++++-----------------------
 2 files changed, 18 insertions(+), 24 deletions(-)
 create mode 100644 changelog.d/6919.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6919.misc b/changelog.d/6919.misc
new file mode 100644
index 0000000000..aa2cd89998
--- /dev/null
+++ b/changelog.d/6919.misc
@@ -0,0 +1 @@
+Convert the directory handler tests to use HomeserverTestCase.
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 91c7a17070..ee88cf5a4b 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -19,24 +19,16 @@ from mock import Mock
 from twisted.internet import defer
 
 from synapse.config.room_directory import RoomDirectoryConfig
-from synapse.handlers.directory import DirectoryHandler
 from synapse.rest.client.v1 import directory, room
 from synapse.types import RoomAlias
 
 from tests import unittest
-from tests.utils import setup_test_homeserver
 
 
-class DirectoryHandlers(object):
-    def __init__(self, hs):
-        self.directory_handler = DirectoryHandler(hs)
-
-
-class DirectoryTestCase(unittest.TestCase):
+class DirectoryTestCase(unittest.HomeserverTestCase):
     """ Tests the directory service. """
 
-    @defer.inlineCallbacks
-    def setUp(self):
+    def make_homeserver(self, reactor, clock):
         self.mock_federation = Mock()
         self.mock_registry = Mock()
 
@@ -47,14 +39,12 @@ class DirectoryTestCase(unittest.TestCase):
 
         self.mock_registry.register_query_handler = register_query_handler
 
-        hs = yield setup_test_homeserver(
-            self.addCleanup,
+        hs = self.setup_test_homeserver(
             http_client=None,
             resource_for_federation=Mock(),
             federation_client=self.mock_federation,
             federation_registry=self.mock_registry,
         )
-        hs.handlers = DirectoryHandlers(hs)
 
         self.handler = hs.get_handlers().directory_handler
 
@@ -64,23 +54,25 @@ class DirectoryTestCase(unittest.TestCase):
         self.your_room = RoomAlias.from_string("#your-room:test")
         self.remote_room = RoomAlias.from_string("#another:remote")
 
-    @defer.inlineCallbacks
+        return hs
+
     def test_get_local_association(self):
-        yield self.store.create_room_alias_association(
-            self.my_room, "!8765qwer:test", ["test"]
+        self.get_success(
+            self.store.create_room_alias_association(
+                self.my_room, "!8765qwer:test", ["test"]
+            )
         )
 
-        result = yield self.handler.get_association(self.my_room)
+        result = self.get_success(self.handler.get_association(self.my_room))
 
         self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result)
 
-    @defer.inlineCallbacks
     def test_get_remote_association(self):
         self.mock_federation.make_query.return_value = defer.succeed(
             {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
         )
 
-        result = yield self.handler.get_association(self.remote_room)
+        result = self.get_success(self.handler.get_association(self.remote_room))
 
         self.assertEquals(
             {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}, result
@@ -93,14 +85,15 @@ class DirectoryTestCase(unittest.TestCase):
             ignore_backoff=True,
         )
 
-    @defer.inlineCallbacks
     def test_incoming_fed_query(self):
-        yield self.store.create_room_alias_association(
-            self.your_room, "!8765asdf:test", ["test"]
+        self.get_success(
+            self.store.create_room_alias_association(
+                self.your_room, "!8765asdf:test", ["test"]
+            )
         )
 
-        response = yield self.query_handlers["directory"](
-            {"room_alias": "#your-room:test"}
+        response = self.get_success(
+            self.handler.on_directory_query({"room_alias": "#your-room:test"})
         )
 
         self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
-- 
cgit 1.4.1


From 97a42bbc3a4789620c48746f8e87291446f6f5ac Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 14 Feb 2020 16:22:30 +0000
Subject: Add a warning about indentation to generated config (#6920)

Fixes #6916.
---
 changelog.d/6920.misc           |  1 +
 docs/.sample_config_header.yaml |  4 +++-
 docs/sample_config.yaml         | 12 +++++++++++-
 synapse/config/_base.py         | 16 ++++++++++++++--
 4 files changed, 29 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6920.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6920.misc b/changelog.d/6920.misc
new file mode 100644
index 0000000000..d333add990
--- /dev/null
+++ b/changelog.d/6920.misc
@@ -0,0 +1 @@
+Add a warning about indentation to generated configuration files.
diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml
index e001ef5983..35a591d042 100644
--- a/docs/.sample_config_header.yaml
+++ b/docs/.sample_config_header.yaml
@@ -1,4 +1,4 @@
-# The config is maintained as an up-to-date snapshot of the default
+# This file is maintained as an up-to-date snapshot of the default
 # homeserver.yaml configuration generated by Synapse.
 #
 # It is intended to act as a reference for the default configuration,
@@ -10,3 +10,5 @@
 # homeserver.yaml. Instead, if you are starting from scratch, please generate
 # a fresh config using Synapse by following the instructions in INSTALL.md.
 
+################################################################################
+
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 8e8cf513b0..93236daddc 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1,4 +1,4 @@
-# The config is maintained as an up-to-date snapshot of the default
+# This file is maintained as an up-to-date snapshot of the default
 # homeserver.yaml configuration generated by Synapse.
 #
 # It is intended to act as a reference for the default configuration,
@@ -10,6 +10,16 @@
 # homeserver.yaml. Instead, if you are starting from scratch, please generate
 # a fresh config using Synapse by following the instructions in INSTALL.md.
 
+################################################################################
+
+# Configuration file for Synapse.
+#
+# This is a YAML file: see [1] for a quick introduction. Note in particular
+# that *indentation is important*: all the elements of a list or dictionary
+# should have the same indentation.
+#
+# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
+
 ## Server ##
 
 # The domain name of the server, with optional explicit port.
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 08619404bb..ba846042c4 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -53,6 +53,18 @@ Missing mandatory `server_name` config option.
 """
 
 
+CONFIG_FILE_HEADER = """\
+# Configuration file for Synapse.
+#
+# This is a YAML file: see [1] for a quick introduction. Note in particular
+# that *indentation is important*: all the elements of a list or dictionary
+# should have the same indentation.
+#
+# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
+
+"""
+
+
 def path_exists(file_path):
     """Check if a file exists
 
@@ -344,7 +356,7 @@ class RootConfig(object):
             str: the yaml config file
         """
 
-        return "\n\n".join(
+        return CONFIG_FILE_HEADER + "\n\n".join(
             dedent(conf)
             for conf in self.invoke_all(
                 "generate_config_section",
@@ -574,8 +586,8 @@ class RootConfig(object):
                 if not path_exists(config_dir_path):
                     os.makedirs(config_dir_path)
                 with open(config_path, "w") as config_file:
-                    config_file.write("# vim:ft=yaml\n\n")
                     config_file.write(config_str)
+                    config_file.write("\n\n# vim:ft=yaml")
 
                 config_dict = yaml.safe_load(config_str)
                 obj.generate_missing_files(config_dict, config_dir_path)
-- 
cgit 1.4.1


From 32873efa87055518b08d9b3b001b3bf9b60437f9 Mon Sep 17 00:00:00 2001
From: Fridtjof Mund <2780577+fridtjof@users.noreply.github.com>
Date: Fri, 14 Feb 2020 17:27:29 +0100
Subject: contrib/docker: Ensure correct encoding and locale settings on DB
 creation (#6921)

Signed-off-by: Fridtjof Mund 
---
 changelog.d/6921.docker           | 1 +
 contrib/docker/docker-compose.yml | 3 +++
 2 files changed, 4 insertions(+)
 create mode 100644 changelog.d/6921.docker

(limited to 'changelog.d')

diff --git a/changelog.d/6921.docker b/changelog.d/6921.docker
new file mode 100644
index 0000000000..152e723339
--- /dev/null
+++ b/changelog.d/6921.docker
@@ -0,0 +1 @@
+Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 2b044baf78..5df29379c8 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -56,6 +56,9 @@ services:
     environment:
       - POSTGRES_USER=synapse
       - POSTGRES_PASSWORD=changeme
+      # ensure the database gets created correctly
+      # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
+      - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
     volumes:
       # You may store the database tables in a local folder..
       - ./schemas:/var/lib/postgresql/data
-- 
cgit 1.4.1


From 5a78f47f6e21505f84490c3b8da49e96ac8e3483 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 7 Feb 2020 13:27:58 +0000
Subject: changelog

---
 changelog.d/6872.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6872.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6872.misc b/changelog.d/6872.misc
new file mode 100644
index 0000000000..215a0c82c3
--- /dev/null
+++ b/changelog.d/6872.misc
@@ -0,0 +1 @@
+Refactor _EventInternalMetadata object to improve type safety.
-- 
cgit 1.4.1


From 10027c80b031f1e62b47cd61c534420673f49a71 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 14 Feb 2020 12:49:40 -0500
Subject: Add type hints to the spam check module (#6915)

Add typing information to the spam checker modules.
---
 changelog.d/6915.misc                |  1 +
 synapse/events/spamcheck.py          | 44 +++++++++++++++++++++---------------
 synapse/spam_checker_api/__init__.py | 12 ++++++----
 tox.ini                              |  1 +
 4 files changed, 36 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/6915.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6915.misc b/changelog.d/6915.misc
new file mode 100644
index 0000000000..3a181ef243
--- /dev/null
+++ b/changelog.d/6915.misc
@@ -0,0 +1 @@
+Add type hints to the spam checker module.
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 0a13fca9a4..a23b6b7b61 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -19,9 +19,13 @@ from typing import Dict
 
 from synapse.spam_checker_api import SpamCheckerApi
 
+MYPY = False
+if MYPY:
+    import synapse.server
+
 
 class SpamChecker(object):
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.spam_checker = None
 
         module = None
@@ -41,7 +45,7 @@ class SpamChecker(object):
             else:
                 self.spam_checker = module(config=config)
 
-    def check_event_for_spam(self, event):
+    def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool:
         """Checks if a given event is considered "spammy" by this server.
 
         If the server considers an event spammy, then it will be rejected if
@@ -49,26 +53,30 @@ class SpamChecker(object):
         users receive a blank event.
 
         Args:
-            event (synapse.events.EventBase): the event to be checked
+            event: the event to be checked
 
         Returns:
-            bool: True if the event is spammy.
+            True if the event is spammy.
         """
         if self.spam_checker is None:
             return False
 
         return self.spam_checker.check_event_for_spam(event)
 
-    def user_may_invite(self, inviter_userid, invitee_userid, room_id):
+    def user_may_invite(
+        self, inviter_userid: str, invitee_userid: str, room_id: str
+    ) -> bool:
         """Checks if a given user may send an invite
 
         If this method returns false, the invite will be rejected.
 
         Args:
-            userid (string): The sender's user ID
+            inviter_userid: The user ID of the sender of the invitation
+            invitee_userid: The user ID targeted in the invitation
+            room_id: The room ID
 
         Returns:
-            bool: True if the user may send an invite, otherwise False
+            True if the user may send an invite, otherwise False
         """
         if self.spam_checker is None:
             return True
@@ -77,50 +85,50 @@ class SpamChecker(object):
             inviter_userid, invitee_userid, room_id
         )
 
-    def user_may_create_room(self, userid):
+    def user_may_create_room(self, userid: str) -> bool:
         """Checks if a given user may create a room
 
         If this method returns false, the creation request will be rejected.
 
         Args:
-            userid (string): The sender's user ID
+            userid: The ID of the user attempting to create a room
 
         Returns:
-            bool: True if the user may create a room, otherwise False
+            True if the user may create a room, otherwise False
         """
         if self.spam_checker is None:
             return True
 
         return self.spam_checker.user_may_create_room(userid)
 
-    def user_may_create_room_alias(self, userid, room_alias):
+    def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
         """Checks if a given user may create a room alias
 
         If this method returns false, the association request will be rejected.
 
         Args:
-            userid (string): The sender's user ID
-            room_alias (string): The alias to be created
+            userid: The ID of the user attempting to create a room alias
+            room_alias: The alias to be created
 
         Returns:
-            bool: True if the user may create a room alias, otherwise False
+            True if the user may create a room alias, otherwise False
         """
         if self.spam_checker is None:
             return True
 
         return self.spam_checker.user_may_create_room_alias(userid, room_alias)
 
-    def user_may_publish_room(self, userid, room_id):
+    def user_may_publish_room(self, userid: str, room_id: str) -> bool:
         """Checks if a given user may publish a room to the directory
 
         If this method returns false, the publish request will be rejected.
 
         Args:
-            userid (string): The sender's user ID
-            room_id (string): The ID of the room that would be published
+            userid: The user ID attempting to publish the room
+            room_id: The ID of the room that would be published
 
         Returns:
-            bool: True if the user may publish the room, otherwise False
+            True if the user may publish the room, otherwise False
         """
         if self.spam_checker is None:
             return True
diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py
index efcc10f808..9b78924d96 100644
--- a/synapse/spam_checker_api/__init__.py
+++ b/synapse/spam_checker_api/__init__.py
@@ -18,6 +18,10 @@ from twisted.internet import defer
 
 from synapse.storage.state import StateFilter
 
+MYPY = False
+if MYPY:
+    import synapse.server
+
 logger = logging.getLogger(__name__)
 
 
@@ -26,18 +30,18 @@ class SpamCheckerApi(object):
     access to rooms and other relevant information.
     """
 
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.hs = hs
 
         self._store = hs.get_datastore()
 
     @defer.inlineCallbacks
-    def get_state_events_in_room(self, room_id, types):
+    def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred:
         """Gets state events for the given room.
 
         Args:
-            room_id (string): The room ID to get state events in.
-            types (tuple): The event type and state key (using None
+            room_id: The room ID to get state events in.
+            types: The event type and state key (using None
                 to represent 'any') of the room state to acquire.
 
         Returns:
diff --git a/tox.ini b/tox.ini
index f8229eba88..b9132a3177 100644
--- a/tox.ini
+++ b/tox.ini
@@ -179,6 +179,7 @@ extras = all
 commands = mypy \
             synapse/api \
             synapse/config/ \
+            synapse/events/spamcheck.py \
             synapse/federation/sender \
             synapse/federation/transport \
             synapse/handlers/sync.py \
-- 
cgit 1.4.1


From 46fa66bbfd367b2c1fbdf585107cec75fa1bb193 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 17 Feb 2020 11:30:50 +0000
Subject: wait for current_state_events_membership before
 delete_old_current_state_events (#6924)

---
 changelog.d/6924.bugfix                                            | 1 +
 .../main/schema/delta/57/delete_old_current_state_events.sql       | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6924.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix
new file mode 100644
index 0000000000..33e6611929
--- /dev/null
+++ b/changelog.d/6924.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
index a133d87a19..aec06c8261 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
+++ b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
@@ -15,5 +15,8 @@
 
 -- Add background update to go and delete current state events for rooms the
 -- server is no longer in.
-INSERT into background_updates (update_name, progress_json)
-    VALUES ('delete_old_current_state_events', '{}');
+--
+-- this relies on the 'membership' column of current_state_events, so make sure
+-- that's populated first!
+INSERT into background_updates (update_name, progress_json, depends_on)
+    VALUES ('delete_old_current_state_events', '{}', 'current_state_events_membership');
-- 
cgit 1.4.1


From 3404ad289b1d2e5bc5c7f277f519b9698dbdaa15 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Mon, 17 Feb 2020 13:23:37 +0000
Subject: Raise the default power levels for invites, tombstones and server
 acls (#6834)

---
 changelog.d/6834.misc              |  1 +
 synapse/handlers/room.py           | 10 +++++++++-
 tests/rest/client/v1/test_rooms.py |  4 +++-
 3 files changed, 13 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6834.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6834.misc b/changelog.d/6834.misc
new file mode 100644
index 0000000000..79acebe516
--- /dev/null
+++ b/changelog.d/6834.misc
@@ -0,0 +1 @@
+Change the default power levels of invites, tombstones and server ACLs for new rooms.
\ No newline at end of file
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index ab07edd2fc..033083acac 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -64,18 +64,21 @@ class RoomCreationHandler(BaseHandler):
             "history_visibility": "shared",
             "original_invitees_have_ops": False,
             "guest_can_join": True,
+            "power_level_content_override": {"invite": 0},
         },
         RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
             "join_rules": JoinRules.INVITE,
             "history_visibility": "shared",
             "original_invitees_have_ops": True,
             "guest_can_join": True,
+            "power_level_content_override": {"invite": 0},
         },
         RoomCreationPreset.PUBLIC_CHAT: {
             "join_rules": JoinRules.PUBLIC,
             "history_visibility": "shared",
             "original_invitees_have_ops": False,
             "guest_can_join": False,
+            "power_level_content_override": {},
         },
     }
 
@@ -829,19 +832,24 @@ class RoomCreationHandler(BaseHandler):
                     # This will be reudundant on pre-MSC2260 rooms, since the
                     # aliases event is special-cased.
                     EventTypes.Aliases: 0,
+                    EventTypes.Tombstone: 100,
+                    EventTypes.ServerACL: 100,
                 },
                 "events_default": 0,
                 "state_default": 50,
                 "ban": 50,
                 "kick": 50,
                 "redact": 50,
-                "invite": 0,
+                "invite": 50,
             }
 
             if config["original_invitees_have_ops"]:
                 for invitee in invite_list:
                     power_level_content["users"][invitee] = 100
 
+            # Power levels overrides are defined per chat preset
+            power_level_content.update(config["power_level_content_override"])
+
             if power_level_content_override:
                 power_level_content.update(power_level_content_override)
 
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index e3af280ba6..fb681a1db9 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -1612,7 +1612,9 @@ class ContextTestCase(unittest.HomeserverTestCase):
     def prepare(self, reactor, clock, homeserver):
         self.user_id = self.register_user("user", "password")
         self.tok = self.login("user", "password")
-        self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
+        self.room_id = self.helper.create_room_as(
+            self.user_id, tok=self.tok, is_public=False
+        )
 
         self.other_user_id = self.register_user("user2", "password")
         self.other_tok = self.login("user2", "password")
-- 
cgit 1.4.1


From bc831d1d9a380efd4fc063565d5f1eda341e9644 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 17 Feb 2020 16:34:13 +0000
Subject: #6924 has been released in 1.10.1

---
 changelog.d/6924.bugfix | 1 -
 1 file changed, 1 deletion(-)
 delete mode 100644 changelog.d/6924.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix
deleted file mode 100644
index 33e6611929..0000000000
--- a/changelog.d/6924.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0.
-- 
cgit 1.4.1


From 3be2abd0a9a089a147b23c6d58fc26dde63faa27 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 18 Feb 2020 11:41:53 +0000
Subject: Kill off deprecated "config-on-the-fly" docker mode (#6918)

Lots of people seem to get confused by this mode, and it's been deprecated
since Synapse 1.1.0. It's time for it to go.
---
 changelog.d/6918.docker |  1 +
 docker/README.md        | 23 ++++++++++++++---------
 docker/start.py         | 49 ++++++++++++++++++-------------------------------
 3 files changed, 33 insertions(+), 40 deletions(-)
 create mode 100644 changelog.d/6918.docker

(limited to 'changelog.d')

diff --git a/changelog.d/6918.docker b/changelog.d/6918.docker
new file mode 100644
index 0000000000..cc2db5e071
--- /dev/null
+++ b/changelog.d/6918.docker
@@ -0,0 +1 @@
+The deprecated "generate-config-on-the-fly" mode is no longer supported.
diff --git a/docker/README.md b/docker/README.md
index 9f112a01d0..8c337149ca 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -110,12 +110,12 @@ argument to `docker run`.
 
 ## Legacy dynamic configuration file support
 
-For backwards-compatibility only, the docker image supports creating a dynamic
-configuration file based on environment variables. This is now deprecated, but
-is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is
-not given).
+The docker image used to support creating a dynamic configuration file based
+on environment variables. This is no longer supported, and an error will be
+raised if you try to run synapse without a config file.
 
-To migrate from a dynamic configuration file to a static one, run the docker
+It is, however, possible to generate a static configuration file based on
+the environment variables that were previously used. To do this, run the docker
 container once with the environment variables set, and `migrate_config`
 command line option. For example:
 
@@ -127,15 +127,20 @@ docker run -it --rm \
     matrixdotorg/synapse:latest migrate_config
 ```
 
-This will generate the same configuration file as the legacy mode used, but
-will store it in `/data/homeserver.yaml` instead of a temporary location. You
-can then use it as shown above at [Running synapse](#running-synapse).
+This will generate the same configuration file as the legacy mode used, and
+will store it in `/data/homeserver.yaml`. You can then use it as shown above at
+[Running synapse](#running-synapse).
+
+Note that the defaults used in this configuration file may be different to
+those when generating a new config file with `generate`: for example, TLS is
+enabled by default in this mode. You are encouraged to inspect the generated
+configuration file and edit it to ensure it meets your needs.
 
 ## Building the image
 
 If you need to build the image from a Synapse checkout, use the following `docker
  build` command from the repo's root:
- 
+
 ```
 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
 ```
diff --git a/docker/start.py b/docker/start.py
index 97fd247f8f..2a25c9380e 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -188,11 +188,6 @@ def main(args, environ):
     else:
         ownership = "{}:{}".format(desired_uid, desired_gid)
 
-    log(
-        "Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
-        % (os.getuid(), os.getgid(), desired_uid, desired_gid)
-    )
-
     if ownership is None:
         log("Will not perform chmod/su-exec as UserID already matches request")
 
@@ -213,38 +208,30 @@ def main(args, environ):
     if mode is not None:
         error("Unknown execution mode '%s'" % (mode,))
 
-    if "SYNAPSE_SERVER_NAME" in environ:
-        # backwards-compatibility generate-a-config-on-the-fly mode
-        if "SYNAPSE_CONFIG_PATH" in environ:
+    config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
+    config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
+
+    if not os.path.exists(config_path):
+        if "SYNAPSE_SERVER_NAME" in environ:
             error(
-                "SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
-                "in `generate` or `migrate_config` mode. To start synapse using a "
-                "config file, unset the SYNAPSE_SERVER_NAME environment variable."
+                """\
+Config file '%s' does not exist.
+
+The synapse docker image no longer supports generating a config file on-the-fly
+based on environment variables. You can migrate to a static config file by
+running with 'migrate_config'. See the README for more details.
+"""
+                % (config_path,)
             )
 
-        config_path = "/compiled/homeserver.yaml"
-        log(
-            "Generating config file '%s' on-the-fly from environment variables.\n"
-            "Note that this mode is deprecated. You can migrate to a static config\n"
-            "file by running with 'migrate_config'. See the README for more details."
+        error(
+            "Config file '%s' does not exist. You should either create a new "
+            "config file by running with the `generate` argument (and then edit "
+            "the resulting file before restarting) or specify the path to an "
+            "existing config file with the SYNAPSE_CONFIG_PATH variable."
             % (config_path,)
         )
 
-        generate_config_from_template("/compiled", config_path, environ, ownership)
-    else:
-        config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
-        config_path = environ.get(
-            "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
-        )
-        if not os.path.exists(config_path):
-            error(
-                "Config file '%s' does not exist. You should either create a new "
-                "config file by running with the `generate` argument (and then edit "
-                "the resulting file before restarting) or specify the path to an "
-                "existing config file with the SYNAPSE_CONFIG_PATH variable."
-                % (config_path,)
-            )
-
     log("Starting synapse with config file " + config_path)
 
     args = ["python", "-m", synapse_worker, "--config-path", config_path]
-- 
cgit 1.4.1


From fe3941f6e33a17fa7cdf209a4370f4e805341db4 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Tue, 18 Feb 2020 07:29:44 -0500
Subject: Stop sending events when creating or deleting aliases (#6904)

Stop sending events when creating or deleting associations (room aliases). Send an updated canonical alias event if one of the alt_aliases is deleted.
---
 changelog.d/6904.removal         |   1 +
 synapse/handlers/directory.py    |  75 ++++++++++---------
 synapse/handlers/room.py         |   6 +-
 tests/handlers/test_directory.py | 154 ++++++++++++++++++++++++++++++++++++++-
 4 files changed, 194 insertions(+), 42 deletions(-)
 create mode 100644 changelog.d/6904.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6904.removal b/changelog.d/6904.removal
new file mode 100644
index 0000000000..a5cc0c3605
--- /dev/null
+++ b/changelog.d/6904.removal
@@ -0,0 +1 @@
+Stop sending alias events during adding / removing aliases. Check alt_aliases in the latest canonical aliases event when deleting an alias.
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 8c5980cb0c..f718388884 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -81,13 +81,7 @@ class DirectoryHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def create_association(
-        self,
-        requester,
-        room_alias,
-        room_id,
-        servers=None,
-        send_event=True,
-        check_membership=True,
+        self, requester, room_alias, room_id, servers=None, check_membership=True,
     ):
         """Attempt to create a new alias
 
@@ -97,7 +91,6 @@ class DirectoryHandler(BaseHandler):
             room_id (str)
             servers (list[str]|None): List of servers that others servers
                 should try and join via
-            send_event (bool): Whether to send an updated m.room.aliases event
             check_membership (bool): Whether to check if the user is in the room
                 before the alias can be set (if the server's config requires it).
 
@@ -150,16 +143,9 @@ class DirectoryHandler(BaseHandler):
                 )
 
         yield self._create_association(room_alias, room_id, servers, creator=user_id)
-        if send_event:
-            try:
-                yield self.send_room_alias_update_event(requester, room_id)
-            except AuthError as e:
-                # sending the aliases event may fail due to the user not having
-                # permission in the room; this is permitted.
-                logger.info("Skipping updating aliases event due to auth error %s", e)
 
     @defer.inlineCallbacks
-    def delete_association(self, requester, room_alias, send_event=True):
+    def delete_association(self, requester, room_alias):
         """Remove an alias from the directory
 
         (this is only meant for human users; AS users should call
@@ -168,9 +154,6 @@ class DirectoryHandler(BaseHandler):
         Args:
             requester (Requester):
             room_alias (RoomAlias):
-            send_event (bool): Whether to send an updated m.room.aliases event.
-                Note that, if we delete the canonical alias, we will always attempt
-                to send an m.room.canonical_alias event
 
         Returns:
             Deferred[unicode]: room id that the alias used to point to
@@ -206,9 +189,6 @@ class DirectoryHandler(BaseHandler):
         room_id = yield self._delete_association(room_alias)
 
         try:
-            if send_event:
-                yield self.send_room_alias_update_event(requester, room_id)
-
             yield self._update_canonical_alias(
                 requester, requester.user.to_string(), room_id, room_alias
             )
@@ -319,25 +299,50 @@ class DirectoryHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
+        """
+        Send an updated canonical alias event if the removed alias was set as
+        the canonical alias or listed in the alt_aliases field.
+        """
         alias_event = yield self.state.get_current_state(
             room_id, EventTypes.CanonicalAlias, ""
         )
 
-        alias_str = room_alias.to_string()
-        if not alias_event or alias_event.content.get("alias", "") != alias_str:
+        # There is no canonical alias, nothing to do.
+        if not alias_event:
             return
 
-        yield self.event_creation_handler.create_and_send_nonmember_event(
-            requester,
-            {
-                "type": EventTypes.CanonicalAlias,
-                "state_key": "",
-                "room_id": room_id,
-                "sender": user_id,
-                "content": {},
-            },
-            ratelimit=False,
-        )
+        # Obtain a mutable version of the event content.
+        content = dict(alias_event.content)
+        send_update = False
+
+        # Remove the alias property if it matches the removed alias.
+        alias_str = room_alias.to_string()
+        if alias_event.content.get("alias", "") == alias_str:
+            send_update = True
+            content.pop("alias", "")
+
+        # Filter alt_aliases for the removed alias.
+        alt_aliases = content.pop("alt_aliases", None)
+        # If the aliases are not a list (or not found) do not attempt to modify
+        # the list.
+        if isinstance(alt_aliases, list):
+            send_update = True
+            alt_aliases = [alias for alias in alt_aliases if alias != alias_str]
+            if alt_aliases:
+                content["alt_aliases"] = alt_aliases
+
+        if send_update:
+            yield self.event_creation_handler.create_and_send_nonmember_event(
+                requester,
+                {
+                    "type": EventTypes.CanonicalAlias,
+                    "state_key": "",
+                    "room_id": room_id,
+                    "sender": user_id,
+                    "content": content,
+                },
+                ratelimit=False,
+            )
 
     @defer.inlineCallbacks
     def get_association_from_room_alias(self, room_alias):
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 033083acac..49ec2f48bc 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -478,9 +478,7 @@ class RoomCreationHandler(BaseHandler):
         for alias_str in aliases:
             alias = RoomAlias.from_string(alias_str)
             try:
-                yield directory_handler.delete_association(
-                    requester, alias, send_event=False
-                )
+                yield directory_handler.delete_association(requester, alias)
                 removed_aliases.append(alias_str)
             except SynapseError as e:
                 logger.warning("Unable to remove alias %s from old room: %s", alias, e)
@@ -511,7 +509,6 @@ class RoomCreationHandler(BaseHandler):
                     RoomAlias.from_string(alias),
                     new_room_id,
                     servers=(self.hs.hostname,),
-                    send_event=False,
                     check_membership=False,
                 )
                 logger.info("Moved alias %s to new room", alias)
@@ -664,7 +661,6 @@ class RoomCreationHandler(BaseHandler):
                 room_id=room_id,
                 room_alias=room_alias,
                 servers=[self.hs.hostname],
-                send_event=False,
                 check_membership=False,
             )
 
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index ee88cf5a4b..27b916aed4 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -18,9 +18,11 @@ from mock import Mock
 
 from twisted.internet import defer
 
+import synapse.api.errors
+from synapse.api.constants import EventTypes
 from synapse.config.room_directory import RoomDirectoryConfig
-from synapse.rest.client.v1 import directory, room
-from synapse.types import RoomAlias
+from synapse.rest.client.v1 import directory, login, room
+from synapse.types import RoomAlias, create_requester
 
 from tests import unittest
 
@@ -85,6 +87,38 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
             ignore_backoff=True,
         )
 
+    def test_delete_alias_not_allowed(self):
+        room_id = "!8765qwer:test"
+        self.get_success(
+            self.store.create_room_alias_association(self.my_room, room_id, ["test"])
+        )
+
+        self.get_failure(
+            self.handler.delete_association(
+                create_requester("@user:test"), self.my_room
+            ),
+            synapse.api.errors.AuthError,
+        )
+
+    def test_delete_alias(self):
+        room_id = "!8765qwer:test"
+        user_id = "@user:test"
+        self.get_success(
+            self.store.create_room_alias_association(
+                self.my_room, room_id, ["test"], user_id
+            )
+        )
+
+        result = self.get_success(
+            self.handler.delete_association(create_requester(user_id), self.my_room)
+        )
+        self.assertEquals(room_id, result)
+
+        # The alias should not be found.
+        self.get_failure(
+            self.handler.get_association(self.my_room), synapse.api.errors.SynapseError
+        )
+
     def test_incoming_fed_query(self):
         self.get_success(
             self.store.create_room_alias_association(
@@ -99,6 +133,122 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
 
 
+class CanonicalAliasTestCase(unittest.HomeserverTestCase):
+    """Test modifications of the canonical alias when delete aliases.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        directory.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+        self.handler = hs.get_handlers().directory_handler
+        self.state_handler = hs.get_state_handler()
+
+        # Create user
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        # Create a test room
+        self.room_id = self.helper.create_room_as(
+            self.admin_user, tok=self.admin_user_tok
+        )
+
+        self.test_alias = "#test:test"
+        self.room_alias = RoomAlias.from_string(self.test_alias)
+
+        # Create a new alias to this room.
+        self.get_success(
+            self.store.create_room_alias_association(
+                self.room_alias, self.room_id, ["test"], self.admin_user
+            )
+        )
+
+    def test_remove_alias(self):
+        """Removing an alias that is the canonical alias should remove it there too."""
+        # Set this new alias as the canonical alias for this room
+        self.helper.send_state(
+            self.room_id,
+            "m.room.canonical_alias",
+            {"alias": self.test_alias, "alt_aliases": [self.test_alias]},
+            tok=self.admin_user_tok,
+        )
+
+        data = self.get_success(
+            self.state_handler.get_current_state(
+                self.room_id, EventTypes.CanonicalAlias, ""
+            )
+        )
+        self.assertEqual(data["content"]["alias"], self.test_alias)
+        self.assertEqual(data["content"]["alt_aliases"], [self.test_alias])
+
+        # Finally, delete the alias.
+        self.get_success(
+            self.handler.delete_association(
+                create_requester(self.admin_user), self.room_alias
+            )
+        )
+
+        data = self.get_success(
+            self.state_handler.get_current_state(
+                self.room_id, EventTypes.CanonicalAlias, ""
+            )
+        )
+        self.assertNotIn("alias", data["content"])
+        self.assertNotIn("alt_aliases", data["content"])
+
+    def test_remove_other_alias(self):
+        """Removing an alias listed as in alt_aliases should remove it there too."""
+        # Create a second alias.
+        other_test_alias = "#test2:test"
+        other_room_alias = RoomAlias.from_string(other_test_alias)
+        self.get_success(
+            self.store.create_room_alias_association(
+                other_room_alias, self.room_id, ["test"], self.admin_user
+            )
+        )
+
+        # Set the alias as the canonical alias for this room.
+        self.helper.send_state(
+            self.room_id,
+            "m.room.canonical_alias",
+            {
+                "alias": self.test_alias,
+                "alt_aliases": [self.test_alias, other_test_alias],
+            },
+            tok=self.admin_user_tok,
+        )
+
+        data = self.get_success(
+            self.state_handler.get_current_state(
+                self.room_id, EventTypes.CanonicalAlias, ""
+            )
+        )
+        self.assertEqual(data["content"]["alias"], self.test_alias)
+        self.assertEqual(
+            data["content"]["alt_aliases"], [self.test_alias, other_test_alias]
+        )
+
+        # Delete the second alias.
+        self.get_success(
+            self.handler.delete_association(
+                create_requester(self.admin_user), other_room_alias
+            )
+        )
+
+        data = self.get_success(
+            self.state_handler.get_current_state(
+                self.room_id, EventTypes.CanonicalAlias, ""
+            )
+        )
+        self.assertEqual(data["content"]["alias"], self.test_alias)
+        self.assertEqual(data["content"]["alt_aliases"], [self.test_alias])
+
+
 class TestCreateAliasACL(unittest.HomeserverTestCase):
     user_id = "@test:test"
 
-- 
cgit 1.4.1


From b1255077f584260a2296d4f3b7b78b54596a76b5 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 18 Feb 2020 14:27:57 +0000
Subject: Changelog

---
 changelog.d/6940.doc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6940.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6940.doc b/changelog.d/6940.doc
new file mode 100644
index 0000000000..8c75f48d3d
--- /dev/null
+++ b/changelog.d/6940.doc
@@ -0,0 +1 @@
+Clean up and update docs on setting up federation.
-- 
cgit 1.4.1


From 818def82486627513dc95e64c46c0bb452651e7e Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 18 Feb 2020 15:27:45 +0000
Subject: Fix worker docs to point `/publicised_groups` API correctly. (#6938)

---
 changelog.d/6938.doc | 1 +
 docs/workers.md      | 7 ++++---
 2 files changed, 5 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6938.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6938.doc b/changelog.d/6938.doc
new file mode 100644
index 0000000000..117f76f48a
--- /dev/null
+++ b/changelog.d/6938.doc
@@ -0,0 +1 @@
+Fix worker docs to point `/publicised_groups` API correctly.
diff --git a/docs/workers.md b/docs/workers.md
index 6f7ec58780..0d84a58958 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -261,7 +261,8 @@ following regular expressions:
     ^/_matrix/client/versions$
     ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
     ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
-    ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$
+    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
+    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
 
 Additionally, the following REST endpoints can be handled for GET requests:
 
@@ -287,8 +288,8 @@ the following regular expressions:
 
     ^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
 
-When using this worker you must also set `update_user_directory: False` in the 
-shared configuration file to stop the main synapse running background 
+When using this worker you must also set `update_user_directory: False` in the
+shared configuration file to stop the main synapse running background
 jobs related to updating the user directory.
 
 ### `synapse.app.frontend_proxy`
-- 
cgit 1.4.1


From 8a380d0fe24edd746256d652836ec27003a05e7e Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 18 Feb 2020 15:39:09 +0000
Subject: Increase perf of `get_auth_chain_ids` used in state res v2. (#6937)

We do this by moving the recursive query to be fully in the DB.
---
 changelog.d/6937.misc                              |  1 +
 .../storage/data_stores/main/event_federation.py   | 23 ++++++++++++++++++++++
 2 files changed, 24 insertions(+)
 create mode 100644 changelog.d/6937.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6937.misc b/changelog.d/6937.misc
new file mode 100644
index 0000000000..6d00e58654
--- /dev/null
+++ b/changelog.d/6937.misc
@@ -0,0 +1 @@
+Increase perf of `get_auth_chain_ids` used in state res v2.
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 60c67457b4..1746f40adf 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -26,6 +26,7 @@ from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
 from synapse.storage.database import Database
+from synapse.storage.engines import PostgresEngine
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -61,6 +62,28 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         )
 
     def _get_auth_chain_ids_txn(self, txn, event_ids, include_given):
+        if isinstance(self.database_engine, PostgresEngine):
+            # For efficiency we make the database do this if we can.
+            sql = """
+                WITH RECURSIVE auth_chain(event_id) AS (
+                    SELECT auth_id FROM event_auth WHERE event_id = ANY(?)
+                    UNION
+                    SELECT auth_id FROM event_auth
+                    INNER JOIN auth_chain USING (event_id)
+                )
+                SELECT event_id FROM auth_chain
+            """
+            txn.execute(sql, (list(event_ids),))
+
+            results = set(event_id for event_id, in txn)
+
+            if include_given:
+                results.update(event_ids)
+
+            return list(results)
+
+        # Database doesn't necessarily support recursive CTE, so we fall
+        # back to do doing it manually.
         if include_given:
             results = set(event_ids)
         else:
-- 
cgit 1.4.1


From adfaea8c698a38ffe14ac682a946abc9f8152635 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 18 Feb 2020 16:23:25 +0000
Subject: Implement GET /_matrix/client/r0/rooms/{roomId}/aliases (#6939)

per matrix-org/matrix-doc#2432
---
 changelog.d/6939.feature           |  1 +
 synapse/handlers/directory.py      | 17 ++++++++-
 synapse/rest/client/v1/room.py     | 23 +++++++++++++
 tests/rest/client/v1/test_rooms.py | 70 +++++++++++++++++++++++++++++++++++++-
 tests/unittest.py                  | 28 ++++++++++-----
 5 files changed, 128 insertions(+), 11 deletions(-)
 create mode 100644 changelog.d/6939.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6939.feature b/changelog.d/6939.feature
new file mode 100644
index 0000000000..40fe7fc9a9
--- /dev/null
+++ b/changelog.d/6939.feature
@@ -0,0 +1 @@
+Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index f718388884..3f8c792149 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -16,6 +16,7 @@
 
 import logging
 import string
+from typing import List
 
 from twisted.internet import defer
 
@@ -28,7 +29,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
-from synapse.types import RoomAlias, UserID, get_domain_from_id
+from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
 
 from ._base import BaseHandler
 
@@ -452,3 +453,17 @@ class DirectoryHandler(BaseHandler):
         yield self.store.set_room_is_public_appservice(
             room_id, appservice_id, network_id, visibility == "public"
         )
+
+    async def get_aliases_for_room(
+        self, requester: Requester, room_id: str
+    ) -> List[str]:
+        """
+        Get a list of the aliases that currently point to this room on this server
+        """
+        # allow access to server admins and current members of the room
+        is_admin = await self.auth.is_server_admin(requester.user)
+        if not is_admin:
+            await self.auth.check_joined_room(room_id, requester.user.to_string())
+
+        aliases = await self.store.get_aliases_for_room(room_id)
+        return aliases
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 6f31584c51..143dc738c6 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -45,6 +45,10 @@ from synapse.storage.state import StateFilter
 from synapse.streams.config import PaginationConfig
 from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
 
+MYPY = False
+if MYPY:
+    import synapse.server
+
 logger = logging.getLogger(__name__)
 
 
@@ -843,6 +847,24 @@ class RoomTypingRestServlet(RestServlet):
         return 200, {}
 
 
+class RoomAliasListServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P[^/]*)/aliases", unstable=False)
+
+    def __init__(self, hs: "synapse.server.HomeServer"):
+        super().__init__()
+        self.auth = hs.get_auth()
+        self.directory_handler = hs.get_handlers().directory_handler
+
+    async def on_GET(self, request, room_id):
+        requester = await self.auth.get_user_by_req(request)
+
+        alias_list = await self.directory_handler.get_aliases_for_room(
+            requester, room_id
+        )
+
+        return 200, {"aliases": alias_list}
+
+
 class SearchRestServlet(RestServlet):
     PATTERNS = client_patterns("/search$", v1=True)
 
@@ -931,6 +953,7 @@ def register_servlets(hs, http_server):
     JoinedRoomsRestServlet(hs).register(http_server)
     RoomEventServlet(hs).register(http_server)
     RoomEventContextServlet(hs).register(http_server)
+    RoomAliasListServlet(hs).register(http_server)
 
 
 def register_deprecated_servlets(hs, http_server):
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index fb681a1db9..fb08a45d27 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -28,8 +28,9 @@ from twisted.internet import defer
 import synapse.rest.admin
 from synapse.api.constants import EventContentFields, EventTypes, Membership
 from synapse.handlers.pagination import PurgeStatus
-from synapse.rest.client.v1 import login, profile, room
+from synapse.rest.client.v1 import directory, login, profile, room
 from synapse.rest.client.v2_alpha import account
+from synapse.types import JsonDict, RoomAlias
 from synapse.util.stringutils import random_string
 
 from tests import unittest
@@ -1726,3 +1727,70 @@ class ContextTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(events_after), 2, events_after)
         self.assertDictEqual(events_after[0].get("content"), {}, events_after[0])
         self.assertEqual(events_after[1].get("content"), {}, events_after[1])
+
+
+class DirectoryTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        directory.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, homeserver):
+        self.room_owner = self.register_user("room_owner", "test")
+        self.room_owner_tok = self.login("room_owner", "test")
+
+        self.room_id = self.helper.create_room_as(
+            self.room_owner, tok=self.room_owner_tok
+        )
+
+    def test_no_aliases(self):
+        res = self._get_aliases(self.room_owner_tok)
+        self.assertEqual(res["aliases"], [])
+
+    def test_not_in_room(self):
+        self.register_user("user", "test")
+        user_tok = self.login("user", "test")
+        res = self._get_aliases(user_tok, expected_code=403)
+        self.assertEqual(res["errcode"], "M_FORBIDDEN")
+
+    def test_with_aliases(self):
+        alias1 = self._random_alias()
+        alias2 = self._random_alias()
+
+        self._set_alias_via_directory(alias1)
+        self._set_alias_via_directory(alias2)
+
+        res = self._get_aliases(self.room_owner_tok)
+        self.assertEqual(set(res["aliases"]), {alias1, alias2})
+
+    def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict:
+        """Calls the endpoint under test. returns the json response object."""
+        request, channel = self.make_request(
+            "GET",
+            "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,),
+            access_token=access_token,
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
+        res = channel.json_body
+        self.assertIsInstance(res, dict)
+        if expected_code == 200:
+            self.assertIsInstance(res["aliases"], list)
+        return res
+
+    def _random_alias(self) -> str:
+        return RoomAlias(random_string(5), self.hs.hostname).to_string()
+
+    def _set_alias_via_directory(self, alias: str, expected_code: int = 200):
+        url = "/_matrix/client/r0/directory/room/" + alias
+        data = {"room_id": self.room_id}
+        request_data = json.dumps(data)
+
+        request, channel = self.make_request(
+            "PUT", url, request_data, access_token=self.room_owner_tok
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
diff --git a/tests/unittest.py b/tests/unittest.py
index 98bf27d39c..8816a4d152 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -21,6 +21,7 @@ import hmac
 import inspect
 import logging
 import time
+from typing import Optional, Tuple, Type, TypeVar, Union
 
 from mock import Mock
 
@@ -42,7 +43,13 @@ from synapse.server import HomeServer
 from synapse.types import Requester, UserID, create_requester
 from synapse.util.ratelimitutils import FederationRateLimiter
 
-from tests.server import get_clock, make_request, render, setup_test_homeserver
+from tests.server import (
+    FakeChannel,
+    get_clock,
+    make_request,
+    render,
+    setup_test_homeserver,
+)
 from tests.test_utils.logging_setup import setup_logging
 from tests.utils import default_config, setupdb
 
@@ -71,6 +78,9 @@ def around(target):
     return _around
 
 
+T = TypeVar("T")
+
+
 class TestCase(unittest.TestCase):
     """A subclass of twisted.trial's TestCase which looks for 'loglevel'
     attributes on both itself and its individual test methods, to override the
@@ -334,14 +344,14 @@ class HomeserverTestCase(TestCase):
 
     def make_request(
         self,
-        method,
-        path,
-        content=b"",
-        access_token=None,
-        request=SynapseRequest,
-        shorthand=True,
-        federation_auth_origin=None,
-    ):
+        method: Union[bytes, str],
+        path: Union[bytes, str],
+        content: Union[bytes, dict] = b"",
+        access_token: Optional[str] = None,
+        request: Type[T] = SynapseRequest,
+        shorthand: bool = True,
+        federation_auth_origin: str = None,
+    ) -> Tuple[T, FakeChannel]:
         """
         Create a SynapseRequest at the path using the method and containing the
         given content.
-- 
cgit 1.4.1


From 771d70e89cde1645650bce0b3ec5d1ac4b8bd8f5 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 18 Feb 2020 17:31:02 +0000
Subject: Changelog

---
 changelog.d/6945.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6945.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix
new file mode 100644
index 0000000000..78470a0ef6
--- /dev/null
+++ b/changelog.d/6945.bugfix
@@ -0,0 +1 @@
+Fix bogus log in the purge jobs related to the message retention policies support.
-- 
cgit 1.4.1


From 603618c002eaf0b763f376e27477792b38ea00ef Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 18 Feb 2020 23:20:16 +0000
Subject: changelog

---
 changelog.d/6949.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6949.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6949.feature b/changelog.d/6949.feature
new file mode 100644
index 0000000000..40fe7fc9a9
--- /dev/null
+++ b/changelog.d/6949.feature
@@ -0,0 +1 @@
+Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
-- 
cgit 1.4.1


From 5a5abd55e8b47a7c1620c298a72817ccf73f90b0 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 09:39:27 +0000
Subject: Limit size of get_auth_chain_ids query (#6947)

---
 changelog.d/6947.misc                              |  1 +
 .../storage/data_stores/main/event_federation.py   | 41 ++++++++++++----------
 2 files changed, 24 insertions(+), 18 deletions(-)
 create mode 100644 changelog.d/6947.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6947.misc b/changelog.d/6947.misc
new file mode 100644
index 0000000000..6d00e58654
--- /dev/null
+++ b/changelog.d/6947.misc
@@ -0,0 +1 @@
+Increase perf of `get_auth_chain_ids` used in state res v2.
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 1746f40adf..dcc375b840 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -62,32 +62,37 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         )
 
     def _get_auth_chain_ids_txn(self, txn, event_ids, include_given):
+        if include_given:
+            results = set(event_ids)
+        else:
+            results = set()
+
         if isinstance(self.database_engine, PostgresEngine):
             # For efficiency we make the database do this if we can.
-            sql = """
-                WITH RECURSIVE auth_chain(event_id) AS (
-                    SELECT auth_id FROM event_auth WHERE event_id = ANY(?)
-                    UNION
-                    SELECT auth_id FROM event_auth
-                    INNER JOIN auth_chain USING (event_id)
-                )
-                SELECT event_id FROM auth_chain
-            """
-            txn.execute(sql, (list(event_ids),))
-
-            results = set(event_id for event_id, in txn)
 
-            if include_given:
-                results.update(event_ids)
+            # We need to be a little careful with querying large amounts at
+            # once, for some reason postgres really doesn't like it. We do this
+            # by only asking for auth chain of 500 events at a time.
+            event_ids = list(event_ids)
+            chunks = [event_ids[x : x + 500] for x in range(0, len(event_ids), 500)]
+            for chunk in chunks:
+                sql = """
+                    WITH RECURSIVE auth_chain(event_id) AS (
+                        SELECT auth_id FROM event_auth WHERE event_id = ANY(?)
+                        UNION
+                        SELECT auth_id FROM event_auth
+                        INNER JOIN auth_chain USING (event_id)
+                    )
+                    SELECT event_id FROM auth_chain
+                """
+                txn.execute(sql, (chunk,))
+
+                results.update(event_id for event_id, in txn)
 
             return list(results)
 
         # Database doesn't necessarily support recursive CTE, so we fall
         # back to do doing it manually.
-        if include_given:
-            results = set(event_ids)
-        else:
-            results = set()
 
         base_sql = "SELECT auth_id FROM event_auth WHERE "
 
-- 
cgit 1.4.1


From fa64f836ec661a234cfb240afcb0a65bcae4cbf5 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 19 Feb 2020 09:54:13 +0000
Subject: Update changelog.d/6945.bugfix

Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
 changelog.d/6945.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix
index 78470a0ef6..8561be16a4 100644
--- a/changelog.d/6945.bugfix
+++ b/changelog.d/6945.bugfix
@@ -1 +1 @@
-Fix bogus log in the purge jobs related to the message retention policies support.
+Fix errors from logging in the purge jobs related to the message retention policies support.
-- 
cgit 1.4.1


From 0d0bc35792aac0490e35cd3514b76d7aada7c8e0 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 10:15:49 +0000
Subject: Increase DB/CPU perf of `_is_server_still_joined` check. (#6936)

* Increase DB/CPU perf of `_is_server_still_joined` check.

For rooms with large amount of state a single user leaving could cause
us to go and load a lot of membership events and then pull out
membership state in a large number of batches.

* Newsfile

* Update synapse/storage/persist_events.py

Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>

* Fix adding if too soon

* Update docstring

* Review comments

* Woops typo

Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
 changelog.d/6936.misc                          |  1 +
 synapse/storage/data_stores/main/roommember.py | 31 +++++++++++++++++++
 synapse/storage/persist_events.py              | 43 +++++++++++++++++---------
 3 files changed, 60 insertions(+), 15 deletions(-)
 create mode 100644 changelog.d/6936.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6936.misc b/changelog.d/6936.misc
new file mode 100644
index 0000000000..9400725017
--- /dev/null
+++ b/changelog.d/6936.misc
@@ -0,0 +1 @@
+Increase DB/CPU perf of `_is_server_still_joined` check.
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 042289f0e0..d5ced05701 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -868,6 +868,37 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             desc="get_membership_from_event_ids",
         )
 
+    async def is_local_host_in_room_ignoring_users(
+        self, room_id: str, ignore_users: Collection[str]
+    ) -> bool:
+        """Check if there are any local users, excluding those in the given
+        list, in the room.
+        """
+
+        clause, args = make_in_list_sql_clause(
+            self.database_engine, "user_id", ignore_users
+        )
+
+        sql = """
+            SELECT 1 FROM local_current_membership
+            WHERE
+                room_id = ? AND membership = ?
+                AND NOT (%s)
+                LIMIT 1
+        """ % (
+            clause,
+        )
+
+        def _is_local_host_in_room_ignoring_users_txn(txn):
+            txn.execute(sql, (room_id, Membership.JOIN, *args))
+
+            return bool(txn.fetchone())
+
+        return await self.db.runInteraction(
+            "is_local_host_in_room_ignoring_users",
+            _is_local_host_in_room_ignoring_users_txn,
+        )
+
 
 class RoomMemberBackgroundUpdateStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index a5370ed527..b950550f23 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -727,6 +727,7 @@ class EventsPersistenceStorage(object):
 
         # Check if any of the given events are a local join that appear in the
         # current state
+        events_to_check = []  # Event IDs that aren't an event we're persisting
         for (typ, state_key), event_id in delta.to_insert.items():
             if typ != EventTypes.Member or not self.is_mine_id(state_key):
                 continue
@@ -736,8 +737,33 @@ class EventsPersistenceStorage(object):
                     if event.membership == Membership.JOIN:
                         return True
 
-        # There's been a change of membership but we don't have a local join
-        # event in the new events, so we need to check the full state.
+            # The event is not in `ev_ctx_rm`, so we need to pull it out of
+            # the DB.
+            events_to_check.append(event_id)
+
+        # Check if any of the changes that we don't have events for are joins.
+        if events_to_check:
+            rows = await self.main_store.get_membership_from_event_ids(events_to_check)
+            is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
+            if is_still_joined:
+                return True
+
+        # None of the new state events are local joins, so we check the database
+        # to see if there are any other local users in the room. We ignore users
+        # whose state has changed as we've already their new state above.
+        users_to_ignore = [
+            state_key
+            for _, state_key in itertools.chain(delta.to_insert, delta.to_delete)
+            if self.is_mine_id(state_key)
+        ]
+
+        if await self.main_store.is_local_host_in_room_ignoring_users(
+            room_id, users_to_ignore
+        ):
+            return True
+
+        # The server will leave the room, so we go and find out which remote
+        # users will still be joined when we leave.
         if current_state is None:
             current_state = await self.main_store.get_current_state_ids(room_id)
             current_state = dict(current_state)
@@ -746,19 +772,6 @@ class EventsPersistenceStorage(object):
 
             current_state.update(delta.to_insert)
 
-        event_ids = [
-            event_id
-            for (typ, state_key,), event_id in current_state.items()
-            if typ == EventTypes.Member and self.is_mine_id(state_key)
-        ]
-
-        rows = await self.main_store.get_membership_from_event_ids(event_ids)
-        is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
-        if is_still_joined:
-            return True
-
-        # The server will leave the room, so we go and find out which remote
-        # users will still be joined when we leave.
         remote_event_ids = [
             event_id
             for (typ, state_key,), event_id in current_state.items()
-- 
cgit 1.4.1


From abf1e5c52669bd41ad803d4645809b6efdfcd61d Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 19 Feb 2020 10:38:20 +0000
Subject: Tiny optimisation for _get_handler_for_request (#6950)

we have hundreds of path_regexes (see #5118), so let's not convert the same
bytes to str for each of them.
---
 changelog.d/6950.misc  | 1 +
 synapse/http/server.py | 4 +++-
 2 files changed, 4 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6950.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6950.misc b/changelog.d/6950.misc
new file mode 100644
index 0000000000..1c88936b8b
--- /dev/null
+++ b/changelog.d/6950.misc
@@ -0,0 +1 @@
+Tiny optimisation for incoming HTTP request dispatch.
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 04bc2385a2..042a605198 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -353,10 +353,12 @@ class JsonResource(HttpServer, resource.Resource):
         if request.method == b"OPTIONS":
             return _options_handler, "options_request_handler", {}
 
+        request_path = request.path.decode("ascii")
+
         # Loop through all the registered callbacks to check if the method
         # and path regex match
         for path_entry in self.path_regexs.get(request.method, []):
-            m = path_entry.pattern.match(request.path.decode("ascii"))
+            m = path_entry.pattern.match(request_path)
             if m:
                 # We found a match!
                 return path_entry.callback, path_entry.servlet_classname, m.groupdict()
-- 
cgit 1.4.1


From 880aaac1d82695b1a89f22f1f86c7f295ca205e0 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 19 Feb 2020 10:40:27 +0000
Subject: Move MSC2432 stuff onto unstable prefix (#6948)

it's not in the spec yet, so needs to be unstable. Also add a feature flag for it. Also add a test for admin users.
---
 changelog.d/6948.feature           |  1 +
 synapse/rest/client/v1/room.py     |  8 +++++++-
 synapse/rest/client/versions.py    |  2 ++
 tests/rest/client/v1/test_rooms.py | 16 +++++++++++++---
 4 files changed, 23 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6948.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6948.feature b/changelog.d/6948.feature
new file mode 100644
index 0000000000..40fe7fc9a9
--- /dev/null
+++ b/changelog.d/6948.feature
@@ -0,0 +1 @@
+Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 143dc738c6..64f51406fb 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -16,6 +16,7 @@
 
 """ This module contains REST servlets to do with rooms: /rooms/ """
 import logging
+import re
 from typing import List, Optional
 
 from six.moves.urllib import parse as urlparse
@@ -848,7 +849,12 @@ class RoomTypingRestServlet(RestServlet):
 
 
 class RoomAliasListServlet(RestServlet):
-    PATTERNS = client_patterns("/rooms/(?P[^/]*)/aliases", unstable=False)
+    PATTERNS = [
+        re.compile(
+            r"^/_matrix/client/unstable/org\.matrix\.msc2432"
+            r"/rooms/(?P[^/]*)/aliases"
+        ),
+    ]
 
     def __init__(self, hs: "synapse.server.HomeServer"):
         super().__init__()
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 3eeb3607f4..d90a6a890b 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -72,6 +72,8 @@ class VersionsRestServlet(RestServlet):
                     "org.matrix.label_based_filtering": True,
                     # Implements support for cross signing as described in MSC1756
                     "org.matrix.e2e_cross_signing": True,
+                    # Implements additional endpoints as described in MSC2432
+                    "org.matrix.msc2432": True,
                 },
             },
         )
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index fb08a45d27..f82655677c 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -1729,8 +1729,7 @@ class ContextTestCase(unittest.HomeserverTestCase):
         self.assertEqual(events_after[1].get("content"), {}, events_after[1])
 
 
-class DirectoryTestCase(unittest.HomeserverTestCase):
-
+class RoomAliasListTestCase(unittest.HomeserverTestCase):
     servlets = [
         synapse.rest.admin.register_servlets_for_client_rest_resource,
         directory.register_servlets,
@@ -1756,6 +1755,16 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         res = self._get_aliases(user_tok, expected_code=403)
         self.assertEqual(res["errcode"], "M_FORBIDDEN")
 
+    def test_admin_user(self):
+        alias1 = self._random_alias()
+        self._set_alias_via_directory(alias1)
+
+        self.register_user("user", "test", admin=True)
+        user_tok = self.login("user", "test")
+
+        res = self._get_aliases(user_tok)
+        self.assertEqual(res["aliases"], [alias1])
+
     def test_with_aliases(self):
         alias1 = self._random_alias()
         alias2 = self._random_alias()
@@ -1770,7 +1779,8 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         """Calls the endpoint under test. returns the json response object."""
         request, channel = self.make_request(
             "GET",
-            "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,),
+            "/_matrix/client/unstable/org.matrix.msc2432/rooms/%s/aliases"
+            % (self.room_id,),
             access_token=access_token,
         )
         self.render(request)
-- 
cgit 1.4.1


From 099c96b89b54b58ecb9b6b6ed781f66f97dea112 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 11:37:35 +0000
Subject: Revert `get_auth_chain_ids` changes (#6951)

---
 changelog.d/6951.misc                              |  1 +
 .../storage/data_stores/main/event_federation.py   | 28 ----------------------
 2 files changed, 1 insertion(+), 28 deletions(-)
 create mode 100644 changelog.d/6951.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6951.misc b/changelog.d/6951.misc
new file mode 100644
index 0000000000..378f52f0a7
--- /dev/null
+++ b/changelog.d/6951.misc
@@ -0,0 +1 @@
+Revert #6937.
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index dcc375b840..60c67457b4 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -26,7 +26,6 @@ from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
 from synapse.storage.database import Database
-from synapse.storage.engines import PostgresEngine
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -67,33 +66,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         else:
             results = set()
 
-        if isinstance(self.database_engine, PostgresEngine):
-            # For efficiency we make the database do this if we can.
-
-            # We need to be a little careful with querying large amounts at
-            # once, for some reason postgres really doesn't like it. We do this
-            # by only asking for auth chain of 500 events at a time.
-            event_ids = list(event_ids)
-            chunks = [event_ids[x : x + 500] for x in range(0, len(event_ids), 500)]
-            for chunk in chunks:
-                sql = """
-                    WITH RECURSIVE auth_chain(event_id) AS (
-                        SELECT auth_id FROM event_auth WHERE event_id = ANY(?)
-                        UNION
-                        SELECT auth_id FROM event_auth
-                        INNER JOIN auth_chain USING (event_id)
-                    )
-                    SELECT event_id FROM auth_chain
-                """
-                txn.execute(sql, (chunk,))
-
-                results.update(event_id for event_id, in txn)
-
-            return list(results)
-
-        # Database doesn't necessarily support recursive CTE, so we fall
-        # back to do doing it manually.
-
         base_sql = "SELECT auth_id FROM event_auth WHERE "
 
         front = set(event_ids)
-- 
cgit 1.4.1


From 197b08de35a7cca7e45deec70312c36aa70a1662 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Wed, 19 Feb 2020 13:48:32 +0000
Subject: 1.11.0rc1

---
 CHANGES.md               | 73 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/6769.feature |  1 -
 changelog.d/6781.bugfix  |  1 -
 changelog.d/6821.misc    |  1 -
 changelog.d/6823.misc    |  1 -
 changelog.d/6825.bugfix  |  1 -
 changelog.d/6827.misc    |  1 -
 changelog.d/6833.misc    |  1 -
 changelog.d/6834.misc    |  1 -
 changelog.d/6836.misc    |  1 -
 changelog.d/6837.misc    |  1 -
 changelog.d/6840.misc    |  1 -
 changelog.d/6844.bugfix  |  1 -
 changelog.d/6846.doc     |  1 -
 changelog.d/6847.misc    |  1 -
 changelog.d/6849.bugfix  |  1 -
 changelog.d/6854.misc    |  1 -
 changelog.d/6855.misc    |  1 -
 changelog.d/6856.misc    |  1 -
 changelog.d/6857.misc    |  1 -
 changelog.d/6858.misc    |  1 -
 changelog.d/6862.misc    |  1 -
 changelog.d/6864.misc    |  1 -
 changelog.d/6866.feature |  1 -
 changelog.d/6869.misc    |  1 -
 changelog.d/6871.misc    |  1 -
 changelog.d/6872.misc    |  1 -
 changelog.d/6873.feature |  1 -
 changelog.d/6877.removal |  1 -
 changelog.d/6882.misc    |  1 -
 changelog.d/6883.misc    |  1 -
 changelog.d/6887.misc    |  1 -
 changelog.d/6888.feature |  1 -
 changelog.d/6891.doc     |  1 -
 changelog.d/6901.misc    |  1 -
 changelog.d/6904.removal |  1 -
 changelog.d/6905.doc     |  1 -
 changelog.d/6906.doc     |  1 -
 changelog.d/6907.doc     |  1 -
 changelog.d/6909.doc     |  1 -
 changelog.d/6915.misc    |  1 -
 changelog.d/6918.docker  |  1 -
 changelog.d/6919.misc    |  1 -
 changelog.d/6920.misc    |  1 -
 changelog.d/6921.docker  |  1 -
 changelog.d/6936.misc    |  1 -
 changelog.d/6937.misc    |  1 -
 changelog.d/6938.doc     |  1 -
 changelog.d/6939.feature |  1 -
 changelog.d/6940.doc     |  1 -
 changelog.d/6945.bugfix  |  1 -
 changelog.d/6947.misc    |  1 -
 changelog.d/6948.feature |  1 -
 changelog.d/6949.feature |  1 -
 changelog.d/6950.misc    |  1 -
 changelog.d/6951.misc    |  1 -
 synapse/__init__.py      |  2 +-
 57 files changed, 74 insertions(+), 56 deletions(-)
 delete mode 100644 changelog.d/6769.feature
 delete mode 100644 changelog.d/6781.bugfix
 delete mode 100644 changelog.d/6821.misc
 delete mode 100644 changelog.d/6823.misc
 delete mode 100644 changelog.d/6825.bugfix
 delete mode 100644 changelog.d/6827.misc
 delete mode 100644 changelog.d/6833.misc
 delete mode 100644 changelog.d/6834.misc
 delete mode 100644 changelog.d/6836.misc
 delete mode 100644 changelog.d/6837.misc
 delete mode 100644 changelog.d/6840.misc
 delete mode 100644 changelog.d/6844.bugfix
 delete mode 100644 changelog.d/6846.doc
 delete mode 100644 changelog.d/6847.misc
 delete mode 100644 changelog.d/6849.bugfix
 delete mode 100644 changelog.d/6854.misc
 delete mode 100644 changelog.d/6855.misc
 delete mode 100644 changelog.d/6856.misc
 delete mode 100644 changelog.d/6857.misc
 delete mode 100644 changelog.d/6858.misc
 delete mode 100644 changelog.d/6862.misc
 delete mode 100644 changelog.d/6864.misc
 delete mode 100644 changelog.d/6866.feature
 delete mode 100644 changelog.d/6869.misc
 delete mode 100644 changelog.d/6871.misc
 delete mode 100644 changelog.d/6872.misc
 delete mode 100644 changelog.d/6873.feature
 delete mode 100644 changelog.d/6877.removal
 delete mode 100644 changelog.d/6882.misc
 delete mode 100644 changelog.d/6883.misc
 delete mode 100644 changelog.d/6887.misc
 delete mode 100644 changelog.d/6888.feature
 delete mode 100644 changelog.d/6891.doc
 delete mode 100644 changelog.d/6901.misc
 delete mode 100644 changelog.d/6904.removal
 delete mode 100644 changelog.d/6905.doc
 delete mode 100644 changelog.d/6906.doc
 delete mode 100644 changelog.d/6907.doc
 delete mode 100644 changelog.d/6909.doc
 delete mode 100644 changelog.d/6915.misc
 delete mode 100644 changelog.d/6918.docker
 delete mode 100644 changelog.d/6919.misc
 delete mode 100644 changelog.d/6920.misc
 delete mode 100644 changelog.d/6921.docker
 delete mode 100644 changelog.d/6936.misc
 delete mode 100644 changelog.d/6937.misc
 delete mode 100644 changelog.d/6938.doc
 delete mode 100644 changelog.d/6939.feature
 delete mode 100644 changelog.d/6940.doc
 delete mode 100644 changelog.d/6945.bugfix
 delete mode 100644 changelog.d/6947.misc
 delete mode 100644 changelog.d/6948.feature
 delete mode 100644 changelog.d/6949.feature
 delete mode 100644 changelog.d/6950.misc
 delete mode 100644 changelog.d/6951.misc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index 37b650a848..4032db792e 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,76 @@
+Synapse 1.11.0rc1 (2020-02-19)
+==============================
+
+Features
+--------
+
+- Admin API to add or modify threepids of user accounts. ([\#6769](https://github.com/matrix-org/synapse/issues/6769))
+- Limit the number of events that can be requested by the backfill federation API to 100. ([\#6864](https://github.com/matrix-org/synapse/issues/6864))
+- Add ability to run some group APIs on workers. ([\#6866](https://github.com/matrix-org/synapse/issues/6866))
+- Reject device display names over 100 characters in length. ([\#6882](https://github.com/matrix-org/synapse/issues/6882))
+- Add ability to route federation user device queries to workers. ([\#6873](https://github.com/matrix-org/synapse/issues/6873))
+- The result of a user directory search can now be filtered via the spam checker. ([\#6888](https://github.com/matrix-org/synapse/issues/6888))
+- Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949))
+- Stop sending `m.room.alias` events wheng adding / removing aliases. Check `alt_aliases` in the latest `m.room.canonical_alias` event when deleting an alias. ([\#6904](https://github.com/matrix-org/synapse/issues/6904))
+
+
+Bugfixes
+--------
+
+- Fixed third party event rules function `on_create_room`'s return value being ignored. ([\#6781](https://github.com/matrix-org/synapse/issues/6781))
+- Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825))
+- Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849))
+- Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945))
+- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901))
+
+
+Updates to the Docker image
+---------------------------
+
+- The deprecated "generate-config-on-the-fly" mode is no longer supported. ([\#6918](https://github.com/matrix-org/synapse/issues/6918))
+
+
+Improved Documentation
+----------------------
+
+- Add details of PR merge strategy to contributing docs. ([\#6846](https://github.com/matrix-org/synapse/issues/6846))
+- Spell out that the last event sent to a room won't be deleted by a purge. ([\#6891](https://github.com/matrix-org/synapse/issues/6891))
+- Update Synapse's documentation to warn about the deprecation of ACME v1. ([\#6905](https://github.com/matrix-org/synapse/issues/6905), [\#6907](https://github.com/matrix-org/synapse/issues/6907), [\#6909](https://github.com/matrix-org/synapse/issues/6909))
+- Add documentation for the spam checker. ([\#6906](https://github.com/matrix-org/synapse/issues/6906))
+- Fix worker docs to point `/publicised_groups` API correctly. ([\#6938](https://github.com/matrix-org/synapse/issues/6938))
+- Clean up and update docs on setting up federation. ([\#6940](https://github.com/matrix-org/synapse/issues/6940))
+- Add a warning about indentation to generated configuration files. ([\#6920](https://github.com/matrix-org/synapse/issues/6920))
+- Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. ([\#6921](https://github.com/matrix-org/synapse/issues/6921))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. ([\#6877](https://github.com/matrix-org/synapse/issues/6877))
+
+
+Internal Changes
+----------------
+
+- Add type hints to `SyncHandler`. ([\#6821](https://github.com/matrix-org/synapse/issues/6821))
+- Refactoring work in preparation for changing the event redaction algorithm. ([\#6823](https://github.com/matrix-org/synapse/issues/6823), [\#6827](https://github.com/matrix-org/synapse/issues/6827), [\#6854](https://github.com/matrix-org/synapse/issues/6854), [\#6856](https://github.com/matrix-org/synapse/issues/6856), [\#6857](https://github.com/matrix-org/synapse/issues/6857), [\#6858](https://github.com/matrix-org/synapse/issues/6858))
+- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834))
+- Fix stacktraces when using `ObservableDeferred` and async/await. ([\#6836](https://github.com/matrix-org/synapse/issues/6836))
+- Port much of `synapse.handlers.federation` to async/await. ([\#6837](https://github.com/matrix-org/synapse/issues/6837), [\#6840](https://github.com/matrix-org/synapse/issues/6840))
+- Populate `rooms.room_version` database column at startup, rather than in a background update. ([\#6847](https://github.com/matrix-org/synapse/issues/6847))
+- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855))
+- Reduce amount we log at `INFO` level. ([\#6833](https://github.com/matrix-org/synapse/issues/6833), [\#6862](https://github.com/matrix-org/synapse/issues/6862))
+- Remove unused `get_room_stats_state` method. ([\#6869](https://github.com/matrix-org/synapse/issues/6869))
+- Add typing to `synapse.federation.sender` and port to async/await. ([\#6871](https://github.com/matrix-org/synapse/issues/6871))
+- Refactor _EventInternalMetadata object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872))
+- Add an additional entry to the SyTest blacklist for worker mode. ([\#6883](https://github.com/matrix-org/synapse/issues/6883))
+- Fix the use of sed in the linting scripts when using BSD sed. ([\#6887](https://github.com/matrix-org/synapse/issues/6887))
+- Add type hints to the spam checker module. ([\#6915](https://github.com/matrix-org/synapse/issues/6915))
+- Convert the directory handler tests to use HomeserverTestCase. ([\#6919](https://github.com/matrix-org/synapse/issues/6919))
+- Increase DB/CPU perf of `_is_server_still_joined` check. ([\#6936](https://github.com/matrix-org/synapse/issues/6936))
+- Tiny optimisation for incoming HTTP request dispatch. ([\#6950](https://github.com/matrix-org/synapse/issues/6950))
+
+
 Synapse 1.10.1 (2020-02-17)
 ===========================
 
diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature
deleted file mode 100644
index 8a60e12907..0000000000
--- a/changelog.d/6769.feature
+++ /dev/null
@@ -1 +0,0 @@
-Admin API to add or modify threepids of user accounts.
\ No newline at end of file
diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix
deleted file mode 100644
index 47cd671bff..0000000000
--- a/changelog.d/6781.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixed third party event rules function `on_create_room`'s return value being ignored.
diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc
deleted file mode 100644
index 1d5265d5e2..0000000000
--- a/changelog.d/6821.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to `SyncHandler`.
diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6823.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6825.bugfix b/changelog.d/6825.bugfix
deleted file mode 100644
index d3cacd6d9a..0000000000
--- a/changelog.d/6825.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting.
\ No newline at end of file
diff --git a/changelog.d/6827.misc b/changelog.d/6827.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6827.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc
deleted file mode 100644
index 8a0605f90b..0000000000
--- a/changelog.d/6833.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reducing log level to DEBUG for synapse.storage.TIME.
diff --git a/changelog.d/6834.misc b/changelog.d/6834.misc
deleted file mode 100644
index 79acebe516..0000000000
--- a/changelog.d/6834.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change the default power levels of invites, tombstones and server ACLs for new rooms.
\ No newline at end of file
diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc
deleted file mode 100644
index 232488e1e5..0000000000
--- a/changelog.d/6836.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix stacktraces when using `ObservableDeferred` and async/await.
diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc
deleted file mode 100644
index 0496f12de8..0000000000
--- a/changelog.d/6837.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port much of `synapse.handlers.federation` to async/await.
diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc
deleted file mode 100644
index 0496f12de8..0000000000
--- a/changelog.d/6840.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port much of `synapse.handlers.federation` to async/await.
diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix
deleted file mode 100644
index e84aa1029f..0000000000
--- a/changelog.d/6844.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix an issue with cross-signing where device signatures were not sent to remote servers.
diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc
deleted file mode 100644
index ad69d608c0..0000000000
--- a/changelog.d/6846.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add details of PR merge strategy to contributing docs.
\ No newline at end of file
diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc
deleted file mode 100644
index 094e911adb..0000000000
--- a/changelog.d/6847.misc
+++ /dev/null
@@ -1 +0,0 @@
-Populate `rooms.room_version` database column at startup, rather than in a background update.
diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix
deleted file mode 100644
index d928a26ec6..0000000000
--- a/changelog.d/6849.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank.
diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6854.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc
deleted file mode 100644
index 904361ddfb..0000000000
--- a/changelog.d/6855.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update pip install directiosn in readme to avoid error when using zsh.
diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6856.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6857.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6858.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc
deleted file mode 100644
index 83626d2939..0000000000
--- a/changelog.d/6862.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce amount we log at `INFO` level.
diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc
deleted file mode 100644
index d24eb68460..0000000000
--- a/changelog.d/6864.misc
+++ /dev/null
@@ -1 +0,0 @@
-Limit the number of events that can be requested by the backfill federation API to 100.
diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature
deleted file mode 100644
index 256feab6ff..0000000000
--- a/changelog.d/6866.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add ability to run some group APIs on workers.
diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc
deleted file mode 100644
index 14f88f9bb7..0000000000
--- a/changelog.d/6869.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `get_room_stats_state` method.
diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc
deleted file mode 100644
index 5161af9983..0000000000
--- a/changelog.d/6871.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add typing to `synapse.federation.sender` and port to async/await.
diff --git a/changelog.d/6872.misc b/changelog.d/6872.misc
deleted file mode 100644
index 215a0c82c3..0000000000
--- a/changelog.d/6872.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor _EventInternalMetadata object to improve type safety.
diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature
deleted file mode 100644
index bbedf8f7f0..0000000000
--- a/changelog.d/6873.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add ability to route federation user device queries to workers.
diff --git a/changelog.d/6877.removal b/changelog.d/6877.removal
deleted file mode 100644
index 9545e31fbe..0000000000
--- a/changelog.d/6877.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0.
diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc
deleted file mode 100644
index e8382e36ae..0000000000
--- a/changelog.d/6882.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reject device display names over 100 characters in length.
diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc
deleted file mode 100644
index e0837d7987..0000000000
--- a/changelog.d/6883.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add an additional entry to the SyTest blacklist for worker mode.
diff --git a/changelog.d/6887.misc b/changelog.d/6887.misc
deleted file mode 100644
index b351d47c7b..0000000000
--- a/changelog.d/6887.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix the use of sed in the linting scripts when using BSD sed.
diff --git a/changelog.d/6888.feature b/changelog.d/6888.feature
deleted file mode 100644
index 1b7ac0c823..0000000000
--- a/changelog.d/6888.feature
+++ /dev/null
@@ -1 +0,0 @@
-The result of a user directory search can now be filtered via the spam checker.
diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc
deleted file mode 100644
index 2f46c385b7..0000000000
--- a/changelog.d/6891.doc
+++ /dev/null
@@ -1 +0,0 @@
-Spell out that the last event sent to a room won't be deleted by a purge. 
diff --git a/changelog.d/6901.misc b/changelog.d/6901.misc
deleted file mode 100644
index b2f12bbe86..0000000000
--- a/changelog.d/6901.misc
+++ /dev/null
@@ -1 +0,0 @@
-Return a 404 instead of 200 for querying information of a non-existant user through the admin API.
\ No newline at end of file
diff --git a/changelog.d/6904.removal b/changelog.d/6904.removal
deleted file mode 100644
index a5cc0c3605..0000000000
--- a/changelog.d/6904.removal
+++ /dev/null
@@ -1 +0,0 @@
-Stop sending alias events during adding / removing aliases. Check alt_aliases in the latest canonical aliases event when deleting an alias.
diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc
deleted file mode 100644
index be0e698af8..0000000000
--- a/changelog.d/6905.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update Synapse's documentation to warn about the deprecation of ACME v1.
diff --git a/changelog.d/6906.doc b/changelog.d/6906.doc
deleted file mode 100644
index 053b2436ae..0000000000
--- a/changelog.d/6906.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add documentation for the spam checker.
diff --git a/changelog.d/6907.doc b/changelog.d/6907.doc
deleted file mode 100644
index be0e698af8..0000000000
--- a/changelog.d/6907.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update Synapse's documentation to warn about the deprecation of ACME v1.
diff --git a/changelog.d/6909.doc b/changelog.d/6909.doc
deleted file mode 100644
index be0e698af8..0000000000
--- a/changelog.d/6909.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update Synapse's documentation to warn about the deprecation of ACME v1.
diff --git a/changelog.d/6915.misc b/changelog.d/6915.misc
deleted file mode 100644
index 3a181ef243..0000000000
--- a/changelog.d/6915.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to the spam checker module.
diff --git a/changelog.d/6918.docker b/changelog.d/6918.docker
deleted file mode 100644
index cc2db5e071..0000000000
--- a/changelog.d/6918.docker
+++ /dev/null
@@ -1 +0,0 @@
-The deprecated "generate-config-on-the-fly" mode is no longer supported.
diff --git a/changelog.d/6919.misc b/changelog.d/6919.misc
deleted file mode 100644
index aa2cd89998..0000000000
--- a/changelog.d/6919.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert the directory handler tests to use HomeserverTestCase.
diff --git a/changelog.d/6920.misc b/changelog.d/6920.misc
deleted file mode 100644
index d333add990..0000000000
--- a/changelog.d/6920.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a warning about indentation to generated configuration files.
diff --git a/changelog.d/6921.docker b/changelog.d/6921.docker
deleted file mode 100644
index 152e723339..0000000000
--- a/changelog.d/6921.docker
+++ /dev/null
@@ -1 +0,0 @@
-Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund.
diff --git a/changelog.d/6936.misc b/changelog.d/6936.misc
deleted file mode 100644
index 9400725017..0000000000
--- a/changelog.d/6936.misc
+++ /dev/null
@@ -1 +0,0 @@
-Increase DB/CPU perf of `_is_server_still_joined` check.
diff --git a/changelog.d/6937.misc b/changelog.d/6937.misc
deleted file mode 100644
index 6d00e58654..0000000000
--- a/changelog.d/6937.misc
+++ /dev/null
@@ -1 +0,0 @@
-Increase perf of `get_auth_chain_ids` used in state res v2.
diff --git a/changelog.d/6938.doc b/changelog.d/6938.doc
deleted file mode 100644
index 117f76f48a..0000000000
--- a/changelog.d/6938.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix worker docs to point `/publicised_groups` API correctly.
diff --git a/changelog.d/6939.feature b/changelog.d/6939.feature
deleted file mode 100644
index 40fe7fc9a9..0000000000
--- a/changelog.d/6939.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/changelog.d/6940.doc b/changelog.d/6940.doc
deleted file mode 100644
index 8c75f48d3d..0000000000
--- a/changelog.d/6940.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up and update docs on setting up federation.
diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix
deleted file mode 100644
index 8561be16a4..0000000000
--- a/changelog.d/6945.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix errors from logging in the purge jobs related to the message retention policies support.
diff --git a/changelog.d/6947.misc b/changelog.d/6947.misc
deleted file mode 100644
index 6d00e58654..0000000000
--- a/changelog.d/6947.misc
+++ /dev/null
@@ -1 +0,0 @@
-Increase perf of `get_auth_chain_ids` used in state res v2.
diff --git a/changelog.d/6948.feature b/changelog.d/6948.feature
deleted file mode 100644
index 40fe7fc9a9..0000000000
--- a/changelog.d/6948.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/changelog.d/6949.feature b/changelog.d/6949.feature
deleted file mode 100644
index 40fe7fc9a9..0000000000
--- a/changelog.d/6949.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/changelog.d/6950.misc b/changelog.d/6950.misc
deleted file mode 100644
index 1c88936b8b..0000000000
--- a/changelog.d/6950.misc
+++ /dev/null
@@ -1 +0,0 @@
-Tiny optimisation for incoming HTTP request dispatch.
diff --git a/changelog.d/6951.misc b/changelog.d/6951.misc
deleted file mode 100644
index 378f52f0a7..0000000000
--- a/changelog.d/6951.misc
+++ /dev/null
@@ -1 +0,0 @@
-Revert #6937.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 8313f177d2..076a297b87 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.10.1"
+__version__ = "1.11.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 2b37eabca1e9355e2e2ab8f65bbdda12431ecc28 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 15:04:47 +0000
Subject: Reduce auth chains fetched during v2 state res. (#6952)

The state res v2 algorithm only cares about the difference between auth
chains, so we can pass in the known common state to the `get_auth_chain`
storage function so that it can ignore those events.
---
 changelog.d/6952.misc                              |  1 +
 synapse/state/__init__.py                          | 15 ++++++++----
 synapse/state/v2.py                                |  2 +-
 .../storage/data_stores/main/event_federation.py   | 28 ++++++++++++++++++----
 tests/state/test_v2.py                             |  6 +++--
 5 files changed, 39 insertions(+), 13 deletions(-)
 create mode 100644 changelog.d/6952.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6952.misc b/changelog.d/6952.misc
new file mode 100644
index 0000000000..e26dc5cab8
--- /dev/null
+++ b/changelog.d/6952.misc
@@ -0,0 +1 @@
+Improve perf of v2 state res for large rooms.
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index fdd6bef6b4..df7a4f6a89 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,7 +16,7 @@
 
 import logging
 from collections import namedtuple
-from typing import Dict, Iterable, List, Optional
+from typing import Dict, Iterable, List, Optional, Set
 
 from six import iteritems, itervalues
 
@@ -662,7 +662,7 @@ class StateResolutionStore(object):
             allow_rejected=allow_rejected,
         )
 
-    def get_auth_chain(self, event_ids):
+    def get_auth_chain(self, event_ids: List[str], ignore_events: Set[str]):
         """Gets the full auth chain for a set of events (including rejected
         events).
 
@@ -674,11 +674,16 @@ class StateResolutionStore(object):
                presence of rejected events
 
         Args:
-            event_ids (list): The event IDs of the events to fetch the auth
-                chain for. Must be state events.
+            event_ids: The event IDs of the events to fetch the auth chain for.
+                Must be state events.
+            ignore_events: Set of events to exclude from the returned auth
+                chain.
+
 
         Returns:
             Deferred[list[str]]: List of event IDs of the auth chain.
         """
 
-        return self.store.get_auth_chain_ids(event_ids, include_given=True)
+        return self.store.get_auth_chain_ids(
+            event_ids, include_given=True, ignore_events=ignore_events,
+        )
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 531018c6a5..75fe58305a 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -248,7 +248,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
             and eid not in common
         )
 
-        auth_chain = yield state_res_store.get_auth_chain(auth_ids)
+        auth_chain = yield state_res_store.get_auth_chain(auth_ids, common)
         auth_ids.update(auth_chain)
 
         auth_sets.append(auth_ids)
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 60c67457b4..e16da2577d 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 import itertools
 import logging
+from typing import List, Optional, Set
 
 from six.moves import range
 from six.moves.queue import Empty, PriorityQueue
@@ -46,21 +47,37 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             event_ids, include_given=include_given
         ).addCallback(self.get_events_as_list)
 
-    def get_auth_chain_ids(self, event_ids, include_given=False):
+    def get_auth_chain_ids(
+        self,
+        event_ids: List[str],
+        include_given: bool = False,
+        ignore_events: Optional[Set[str]] = None,
+    ):
         """Get auth events for given event_ids. The events *must* be state events.
 
         Args:
-            event_ids (list): state events
-            include_given (bool): include the given events in result
+            event_ids: state events
+            include_given: include the given events in result
+            ignore_events: Set of events to exclude from the returned auth
+                chain. This is useful if the caller will just discard the
+                given events anyway, and saves us from figuring out their auth
+                chains if not required.
 
         Returns:
             list of event_ids
         """
         return self.db.runInteraction(
-            "get_auth_chain_ids", self._get_auth_chain_ids_txn, event_ids, include_given
+            "get_auth_chain_ids",
+            self._get_auth_chain_ids_txn,
+            event_ids,
+            include_given,
+            ignore_events,
         )
 
-    def _get_auth_chain_ids_txn(self, txn, event_ids, include_given):
+    def _get_auth_chain_ids_txn(self, txn, event_ids, include_given, ignore_events):
+        if ignore_events is None:
+            ignore_events = set()
+
         if include_given:
             results = set(event_ids)
         else:
@@ -80,6 +97,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
                 txn.execute(base_sql + clause, list(args))
                 new_front.update([r[0] for r in txn])
 
+            new_front -= ignore_events
             new_front -= results
 
             front = new_front
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 5bafad9f19..5059ade850 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -603,7 +603,7 @@ class TestStateResolutionStore(object):
 
         return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map}
 
-    def get_auth_chain(self, event_ids):
+    def get_auth_chain(self, event_ids, ignore_events):
         """Gets the full auth chain for a set of events (including rejected
         events).
 
@@ -617,6 +617,8 @@ class TestStateResolutionStore(object):
         Args:
             event_ids (list): The event IDs of the events to fetch the auth
                 chain for. Must be state events.
+            ignore_events: Set of events to exclude from the returned auth
+                chain.
 
         Returns:
             Deferred[list[str]]: List of event IDs of the auth chain.
@@ -627,7 +629,7 @@ class TestStateResolutionStore(object):
         stack = list(event_ids)
         while stack:
             event_id = stack.pop()
-            if event_id in result:
+            if event_id in result or event_id in ignore_events:
                 continue
 
             result.add(event_id)
-- 
cgit 1.4.1


From fc87d2ffb39ca17065e19bd42ef25f1c84862d2c Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 15:09:00 +0000
Subject: Freeze allocated objects on startup. (#6953)

This may make gc go a bit faster as the gc will know things like
caches/data stores etc. are frozen without having to check.
---
 changelog.d/6953.misc | 1 +
 synapse/app/_base.py  | 9 +++++++++
 2 files changed, 10 insertions(+)
 create mode 100644 changelog.d/6953.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6953.misc b/changelog.d/6953.misc
new file mode 100644
index 0000000000..0ab52041cf
--- /dev/null
+++ b/changelog.d/6953.misc
@@ -0,0 +1 @@
+Reduce time spent doing GC by freezing objects on startup.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 0e8b467a3e..109b1e2fb5 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -279,6 +279,15 @@ def start(hs, listeners=None):
 
         setup_sentry(hs)
         setup_sdnotify(hs)
+
+        # We now freeze all allocated objects in the hopes that (almost)
+        # everything currently allocated are things that will be used for the
+        # rest of time. Doing so means less work each GC (hopefully).
+        #
+        # This only works on Python 3.7
+        if sys.version_info >= (3, 7):
+            gc.collect()
+            gc.freeze()
     except Exception:
         traceback.print_exc(file=sys.stderr)
         reactor = hs.get_reactor()
-- 
cgit 1.4.1


From 7b7c3cedf2fdc0d0c05bbc651e0ff5b59921c3a2 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 19 Feb 2020 15:47:11 +0000
Subject: Minor perf fixes to `get_auth_chain_ids`.

---
 changelog.d/6954.misc                                |  1 +
 synapse/storage/data_stores/main/event_federation.py | 10 ++++------
 synapse/storage/database.py                          |  2 +-
 3 files changed, 6 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/6954.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6954.misc b/changelog.d/6954.misc
new file mode 100644
index 0000000000..8b84ce2f19
--- /dev/null
+++ b/changelog.d/6954.misc
@@ -0,0 +1 @@
+Minor perf fixes to `get_auth_chain_ids`.
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index e16da2577d..750ec1b70d 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -16,7 +16,6 @@ import itertools
 import logging
 from typing import List, Optional, Set
 
-from six.moves import range
 from six.moves.queue import Empty, PriorityQueue
 
 from twisted.internet import defer
@@ -28,6 +27,7 @@ from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
 from synapse.storage.database import Database
 from synapse.util.caches.descriptors import cached
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
@@ -88,14 +88,12 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         front = set(event_ids)
         while front:
             new_front = set()
-            front_list = list(front)
-            chunks = [front_list[x : x + 100] for x in range(0, len(front), 100)]
-            for chunk in chunks:
+            for chunk in batch_iter(front, 100):
                 clause, args = make_in_list_sql_clause(
                     txn.database_engine, "event_id", chunk
                 )
-                txn.execute(base_sql + clause, list(args))
-                new_front.update([r[0] for r in txn])
+                txn.execute(base_sql + clause, args)
+                new_front.update(r[0] for r in txn)
 
             new_front -= ignore_events
             new_front -= results
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 3eeb2f7c04..6dcb5c04da 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -1504,7 +1504,7 @@ class Database(object):
 
 def make_in_list_sql_clause(
     database_engine, column: str, iterable: Iterable
-) -> Tuple[str, Iterable]:
+) -> Tuple[str, list]:
     """Returns an SQL clause that checks the given column is in the iterable.
 
     On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres
-- 
cgit 1.4.1


From 4fb5f4d0ce0444d1d3c2f0b9576b5a91b6307372 Mon Sep 17 00:00:00 2001
From: Ruben Barkow-Kuder 
Date: Thu, 20 Feb 2020 11:37:57 +0100
Subject: Add some clarifications to README.md in the database schema
 directory. (#6615)

Signed-off-by: Ruben Barkow-Kuder 
---
 changelog.d/6615.misc                              |  1 +
 .../data_stores/main/schema/full_schemas/README.md | 24 ++++++++++++++--------
 2 files changed, 17 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/6615.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6615.misc b/changelog.d/6615.misc
new file mode 100644
index 0000000000..9f93152565
--- /dev/null
+++ b/changelog.d/6615.misc
@@ -0,0 +1 @@
+Add some clarifications to `README.md` in the database schema directory.
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/data_stores/main/schema/full_schemas/README.md
index bbd3f18604..c00f287190 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/README.md
+++ b/synapse/storage/data_stores/main/schema/full_schemas/README.md
@@ -1,13 +1,21 @@
-# Building full schema dumps
+# Synapse Database Schemas
 
-These schemas need to be made from a database that has had all background updates run.
+These schemas are used as a basis to create brand new Synapse databases, on both
+SQLite3 and Postgres.
 
-To do so, use `scripts-dev/make_full_schema.sh`. This will produce
-`full.sql.postgres ` and `full.sql.sqlite` files.
+## Building full schema dumps
+
+If you want to recreate these schemas, they need to be made from a database that
+has had all background updates run.
+
+To do so, use `scripts-dev/make_full_schema.sh`. This will produce new
+`full.sql.postgres ` and `full.sql.sqlite` files. 
 
 Ensure postgres is installed and your user has the ability to run bash commands
-such as `createdb`.
+such as `createdb`, then call
+
+    ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
 
-```
-./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
-```
+There are currently two folders with full-schema snapshots. `16` is a snapshot
+from 2015, for historical reference. The other contains the most recent full
+schema snapshot.
-- 
cgit 1.4.1


From a90d0dc5c2650eea298f8d554ca74c2cf4c097eb Mon Sep 17 00:00:00 2001
From: Hubert Chathi 
Date: Thu, 20 Feb 2020 09:59:00 -0500
Subject: don't insert into the device table for remote cross-signing keys
 (#6956)

---
 changelog.d/6956.misc                              |  1 +
 .../storage/data_stores/main/end_to_end_keys.py    | 33 ++++++++++++----------
 2 files changed, 19 insertions(+), 15 deletions(-)
 create mode 100644 changelog.d/6956.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6956.misc b/changelog.d/6956.misc
new file mode 100644
index 0000000000..5cb0894182
--- /dev/null
+++ b/changelog.d/6956.misc
@@ -0,0 +1 @@
+Don't record remote cross-signing keys in the `devices` table.
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index e551606f9d..001a53f9b4 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -680,11 +680,6 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 'user_signing' for a user-signing key
             key (dict): the key data
         """
-        # the cross-signing keys need to occupy the same namespace as devices,
-        # since signatures are identified by device ID.  So add an entry to the
-        # device table to make sure that we don't have a collision with device
-        # IDs
-
         # the 'key' dict will look something like:
         # {
         #   "user_id": "@alice:example.com",
@@ -701,16 +696,24 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
         # The "keys" property must only have one entry, which will be the public
         # key, so we just grab the first value in there
         pubkey = next(iter(key["keys"].values()))
-        self.db.simple_insert_txn(
-            txn,
-            "devices",
-            values={
-                "user_id": user_id,
-                "device_id": pubkey,
-                "display_name": key_type + " signing key",
-                "hidden": True,
-            },
-        )
+
+        # The cross-signing keys need to occupy the same namespace as devices,
+        # since signatures are identified by device ID.  So add an entry to the
+        # device table to make sure that we don't have a collision with device
+        # IDs.
+        # We only need to do this for local users, since remote servers should be
+        # responsible for checking this for their own users.
+        if self.hs.is_mine_id(user_id):
+            self.db.simple_insert_txn(
+                txn,
+                "devices",
+                values={
+                    "user_id": user_id,
+                    "device_id": pubkey,
+                    "display_name": key_type + " signing key",
+                    "hidden": True,
+                },
+            )
 
         # and finally, store the key itself
         with self._cross_signing_id_gen.get_next() as stream_id:
-- 
cgit 1.4.1


From 99eed85a77acdc25b68f4a7b6447a5ffaecebb0d Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 20 Feb 2020 16:24:04 -0500
Subject: Do not send alias events when creating / upgrading a room (#6941)

Stop emitting room alias update events during room creation/upgrade.
---
 changelog.d/6941.removal      |  1 +
 synapse/handlers/directory.py | 19 ++-----------------
 synapse/handlers/room.py      | 36 +++++++++++++-----------------------
 3 files changed, 16 insertions(+), 40 deletions(-)
 create mode 100644 changelog.d/6941.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6941.removal b/changelog.d/6941.removal
new file mode 100644
index 0000000000..8573be84b3
--- /dev/null
+++ b/changelog.d/6941.removal
@@ -0,0 +1 @@
+Stop sending m.room.aliases events during room creation and upgrade.
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index db2104c5f6..921d887b24 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 
+import collections
 import logging
 import string
 from typing import List
@@ -282,22 +283,6 @@ class DirectoryHandler(BaseHandler):
                 Codes.NOT_FOUND,
             )
 
-    @defer.inlineCallbacks
-    def send_room_alias_update_event(self, requester, room_id):
-        aliases = yield self.store.get_aliases_for_room(room_id)
-
-        yield self.event_creation_handler.create_and_send_nonmember_event(
-            requester,
-            {
-                "type": EventTypes.Aliases,
-                "state_key": self.hs.hostname,
-                "room_id": room_id,
-                "sender": requester.user.to_string(),
-                "content": {"aliases": aliases},
-            },
-            ratelimit=False,
-        )
-
     @defer.inlineCallbacks
     def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
         """
@@ -326,7 +311,7 @@ class DirectoryHandler(BaseHandler):
         alt_aliases = content.pop("alt_aliases", None)
         # If the aliases are not a list (or not found) do not attempt to modify
         # the list.
-        if isinstance(alt_aliases, list):
+        if isinstance(alt_aliases, collections.Sequence):
             send_update = True
             alt_aliases = [alias for alias in alt_aliases if alias != alias_str]
             if alt_aliases:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 49ec2f48bc..76e8f61b74 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -149,7 +149,9 @@ class RoomCreationHandler(BaseHandler):
         return ret
 
     @defer.inlineCallbacks
-    def _upgrade_room(self, requester, old_room_id, new_version):
+    def _upgrade_room(
+        self, requester: Requester, old_room_id: str, new_version: RoomVersion
+    ):
         user_id = requester.user.to_string()
 
         # start by allocating a new room id
@@ -448,19 +450,21 @@ class RoomCreationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _move_aliases_to_new_room(
-        self, requester, old_room_id, new_room_id, old_room_state
+        self,
+        requester: Requester,
+        old_room_id: str,
+        new_room_id: str,
+        old_room_state: StateMap[str],
     ):
         directory_handler = self.hs.get_handlers().directory_handler
 
         aliases = yield self.store.get_aliases_for_room(old_room_id)
 
         # check to see if we have a canonical alias.
-        canonical_alias = None
+        canonical_alias_event = None
         canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
         if canonical_alias_event_id:
             canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
-            if canonical_alias_event:
-                canonical_alias = canonical_alias_event.content.get("alias", "")
 
         # first we try to remove the aliases from the old room (we suppress sending
         # the room_aliases event until the end).
@@ -488,19 +492,6 @@ class RoomCreationHandler(BaseHandler):
         if not removed_aliases:
             return
 
-        try:
-            # this can fail if, for some reason, our user doesn't have perms to send
-            # m.room.aliases events in the old room (note that we've already checked that
-            # they have perms to send a tombstone event, so that's not terribly likely).
-            #
-            # If that happens, it's regrettable, but we should carry on: it's the same
-            # as when you remove an alias from the directory normally - it just means that
-            # the aliases event gets out of sync with the directory
-            # (cf https://github.com/vector-im/riot-web/issues/2369)
-            yield directory_handler.send_room_alias_update_event(requester, old_room_id)
-        except AuthError as e:
-            logger.warning("Failed to send updated alias event on old room: %s", e)
-
         # we can now add any aliases we successfully removed to the new room.
         for alias in removed_aliases:
             try:
@@ -517,8 +508,10 @@ class RoomCreationHandler(BaseHandler):
                 # checking module decides it shouldn't, or similar.
                 logger.error("Error adding alias %s to new room: %s", alias, e)
 
+        # If a canonical alias event existed for the old room, fire a canonical
+        # alias event for the new room with a copy of the information.
         try:
-            if canonical_alias and (canonical_alias in removed_aliases):
+            if canonical_alias_event:
                 yield self.event_creation_handler.create_and_send_nonmember_event(
                     requester,
                     {
@@ -526,12 +519,10 @@ class RoomCreationHandler(BaseHandler):
                         "state_key": "",
                         "room_id": new_room_id,
                         "sender": requester.user.to_string(),
-                        "content": {"alias": canonical_alias},
+                        "content": canonical_alias_event.content,
                     },
                     ratelimit=False,
                 )
-
-            yield directory_handler.send_room_alias_update_event(requester, new_room_id)
         except SynapseError as e:
             # again I'm not really expecting this to fail, but if it does, I'd rather
             # we returned the new room to the client at this point.
@@ -757,7 +748,6 @@ class RoomCreationHandler(BaseHandler):
 
         if room_alias:
             result["room_alias"] = room_alias.to_string()
-            yield directory_handler.send_room_alias_update_event(requester, room_id)
 
         return result
 
-- 
cgit 1.4.1


From 8f6d9c4cf0c36180ad26bb84cdbb55b503a942e2 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 21 Feb 2020 08:53:01 +0000
Subject: Small grammar fixes to the ACME v1 deprecation notice (#6944)

Some small fixes to the copy in #6907.
---
 INSTALL.md               |  9 ++++-----
 changelog.d/6944.doc     |  1 +
 synapse/handlers/acme.py | 10 +++++-----
 3 files changed, 10 insertions(+), 10 deletions(-)
 create mode 100644 changelog.d/6944.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index 9fe767704b..aa5eb882bb 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -420,11 +420,10 @@ so, you will need to edit `homeserver.yaml`, as follows:
   Note that, as pointed out in that document, this feature will not
   work with installs set up after November 2020. 
   
-  If you are using your
-  own certificate, be sure to use a `.pem` file that includes the full
-  certificate chain including any intermediate certificates (for
-  instance, if using certbot, use `fullchain.pem` as your certificate,
-  not `cert.pem`).
+  If you are using your own certificate, be sure to use a `.pem` file that
+  includes the full certificate chain including any intermediate certificates
+  (for instance, if using certbot, use `fullchain.pem` as your certificate, not
+  `cert.pem`).
 
 For a more detailed guide to configuring your server for federation, see
 [federate.md](docs/federate.md)
diff --git a/changelog.d/6944.doc b/changelog.d/6944.doc
new file mode 100644
index 0000000000..eb0c534b56
--- /dev/null
+++ b/changelog.d/6944.doc
@@ -0,0 +1 @@
+Small grammatical fixes to the ACME v1 deprecation notice.
\ No newline at end of file
diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py
index 250faa997b..a2d7959abe 100644
--- a/synapse/handlers/acme.py
+++ b/synapse/handlers/acme.py
@@ -27,11 +27,11 @@ logger = logging.getLogger(__name__)
 
 ACME_REGISTER_FAIL_ERROR = """
 --------------------------------------------------------------------------------
-Failed to register with the ACME provider. This is likely happening because the install
-is new, and ACME v1 has been deprecated by Let's Encrypt and is disabled for installs set
-up after November 2019.
-At the moment, Synapse doesn't support ACME v2. For more info and alternative solution,
-check out https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
+Failed to register with the ACME provider. This is likely happening because the installation
+is new, and ACME v1 has been deprecated by Let's Encrypt and disabled for
+new installations since November 2019.
+At the moment, Synapse doesn't support ACME v2. For more information and alternative
+solutions, please read https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
 --------------------------------------------------------------------------------"""
 
 
-- 
cgit 1.4.1


From 9c1b83b0078aa9cc1bb902e14d3f7302625ba099 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 21 Feb 2020 08:56:04 +0000
Subject: 1.11.0

---
 CHANGES.md           | 9 +++++++++
 changelog.d/6944.doc | 1 -
 debian/changelog     | 6 ++++++
 synapse/__init__.py  | 2 +-
 4 files changed, 16 insertions(+), 2 deletions(-)
 delete mode 100644 changelog.d/6944.doc

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index fabf909fa3..ff681762cd 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,12 @@
+Synapse 1.11.0 (2020-02-21)
+===========================
+
+Improved Documentation
+----------------------
+
+- Small grammatical fixes to the ACME v1 deprecation notice. ([\#6944](https://github.com/matrix-org/synapse/issues/6944))
+
+
 Synapse 1.11.0rc1 (2020-02-19)
 ==============================
 
diff --git a/changelog.d/6944.doc b/changelog.d/6944.doc
deleted file mode 100644
index eb0c534b56..0000000000
--- a/changelog.d/6944.doc
+++ /dev/null
@@ -1 +0,0 @@
-Small grammatical fixes to the ACME v1 deprecation notice.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 90314d36af..fbb44cb94b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.11.0) stable; urgency=medium
+
+  * New synapse release 1.11.0.
+
+ -- Synapse Packaging team   Fri, 21 Feb 2020 08:54:34 +0000
+
 matrix-synapse-py3 (1.10.1) stable; urgency=medium
 
   * New synapse release 1.10.1.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 076a297b87..3406ce634f 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.11.0rc1"
+__version__ = "1.11.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 1fcb9a1a7ab2cb4833ea6c823e8250199a0b3d95 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 21 Feb 2020 09:06:18 +0000
Subject: changelog

---
 changelog.d/6967.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/6967.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6967.bugfix b/changelog.d/6967.bugfix
new file mode 100644
index 0000000000..b65f80cf1d
--- /dev/null
+++ b/changelog.d/6967.bugfix
@@ -0,0 +1 @@
+Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room.
-- 
cgit 1.4.1


From 509e381afa8c656e72f5fef3d651a9819794174a Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 21 Feb 2020 07:15:07 -0500
Subject: Clarify list/set/dict/tuple comprehensions and enforce via flake8
 (#6957)

Ensure good comprehension hygiene using flake8-comprehensions.
---
 CONTRIBUTING.md                                    |  2 +-
 changelog.d/6957.misc                              |  1 +
 docs/code_style.md                                 |  2 +-
 scripts-dev/convert_server_keys.py                 |  2 +-
 synapse/app/_base.py                               |  2 +-
 synapse/app/federation_sender.py                   |  4 +--
 synapse/app/pusher.py                              |  2 +-
 synapse/config/server.py                           |  4 +--
 synapse/config/tls.py                              |  2 +-
 synapse/crypto/keyring.py                          |  6 ++--
 synapse/federation/send_queue.py                   |  4 +--
 synapse/groups/groups_server.py                    |  2 +-
 synapse/handlers/device.py                         |  2 +-
 synapse/handlers/directory.py                      |  4 +--
 synapse/handlers/federation.py                     | 18 +++++-----
 synapse/handlers/presence.py                       |  6 ++--
 synapse/handlers/receipts.py                       |  2 +-
 synapse/handlers/room.py                           |  2 +-
 synapse/handlers/search.py                         |  8 ++---
 synapse/handlers/sync.py                           | 22 ++++++------
 synapse/handlers/typing.py                         |  4 +--
 synapse/logging/utils.py                           |  2 +-
 synapse/metrics/__init__.py                        |  2 +-
 synapse/metrics/background_process_metrics.py      |  4 +--
 synapse/push/bulk_push_rule_evaluator.py           |  8 ++---
 synapse/push/emailpusher.py                        |  2 +-
 synapse/push/mailer.py                             | 20 +++++------
 synapse/push/pusherpool.py                         |  2 +-
 synapse/rest/admin/_base.py                        |  4 +--
 synapse/rest/client/v1/push_rule.py                |  6 ++--
 synapse/rest/client/v1/pusher.py                   |  4 +--
 synapse/rest/client/v2_alpha/sync.py               |  2 +-
 synapse/rest/key/v2/remote_key_resource.py         |  2 +-
 synapse/rest/media/v1/_base.py                     | 40 ++++++++++------------
 synapse/state/v1.py                                | 10 +++---
 synapse/state/v2.py                                |  8 ++---
 synapse/storage/_base.py                           |  2 +-
 synapse/storage/background_updates.py              |  2 +-
 synapse/storage/data_stores/main/appservice.py     | 14 ++++----
 synapse/storage/data_stores/main/client_ips.py     |  4 +--
 synapse/storage/data_stores/main/devices.py        | 13 ++++---
 .../storage/data_stores/main/event_federation.py   |  2 +-
 synapse/storage/data_stores/main/events.py         |  8 ++---
 .../storage/data_stores/main/events_bg_updates.py  |  2 +-
 synapse/storage/data_stores/main/events_worker.py  |  6 ++--
 synapse/storage/data_stores/main/push_rule.py      |  8 ++---
 synapse/storage/data_stores/main/receipts.py       |  4 +--
 synapse/storage/data_stores/main/roommember.py     |  4 +--
 synapse/storage/data_stores/main/state.py          |  8 ++---
 synapse/storage/data_stores/main/stream.py         |  8 ++---
 .../storage/data_stores/main/user_erasure_store.py |  4 +--
 synapse/storage/data_stores/state/store.py         |  4 +--
 synapse/storage/database.py                        |  4 +--
 synapse/storage/persist_events.py                  |  8 ++---
 synapse/storage/prepare_database.py                |  6 ++--
 synapse/util/frozenutils.py                        |  2 +-
 synapse/visibility.py                              |  4 +--
 tests/config/test_generate.py                      |  2 +-
 tests/federation/test_federation_server.py         |  2 +-
 tests/handlers/test_presence.py                    |  4 +--
 tests/handlers/test_typing.py                      |  6 ++--
 tests/handlers/test_user_directory.py              | 12 +++----
 tests/push/test_email.py                           |  6 ++--
 tests/push/test_http.py                            |  8 ++---
 tests/rest/client/v2_alpha/test_sync.py            | 28 ++++++++-------
 tests/storage/test__base.py                        |  4 +--
 tests/storage/test_appservice.py                   | 36 +++++++++----------
 tests/storage/test_cleanup_extrems.py              | 10 +++---
 tests/storage/test_event_metrics.py                | 36 +++++++++----------
 tests/storage/test_state.py                        |  2 +-
 tests/test_state.py                                | 18 +++-------
 tests/util/test_stream_change_cache.py             | 18 +++-------
 tox.ini                                            |  1 +
 73 files changed, 251 insertions(+), 276 deletions(-)
 create mode 100644 changelog.d/6957.misc

(limited to 'changelog.d')

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4b01b6ac8c..253a0ca648 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -60,7 +60,7 @@ python 3.6 and to install each tool:
 
 ```
 # Install the dependencies
-pip install -U black flake8 isort
+pip install -U black flake8 flake8-comprehensions isort
 
 # Run the linter script
 ./scripts-dev/lint.sh
diff --git a/changelog.d/6957.misc b/changelog.d/6957.misc
new file mode 100644
index 0000000000..4f98030110
--- /dev/null
+++ b/changelog.d/6957.misc
@@ -0,0 +1 @@
+Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions.
diff --git a/docs/code_style.md b/docs/code_style.md
index 71aecd41f7..6ef6f80290 100644
--- a/docs/code_style.md
+++ b/docs/code_style.md
@@ -30,7 +30,7 @@ The necessary tools are detailed below.
 
     Install `flake8` with:
 
-        pip install --upgrade flake8
+        pip install --upgrade flake8 flake8-comprehensions
 
     Check all application and test code with:
 
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
index 179be61c30..06b4c1e2ff 100644
--- a/scripts-dev/convert_server_keys.py
+++ b/scripts-dev/convert_server_keys.py
@@ -103,7 +103,7 @@ def main():
 
     yaml.safe_dump(result, sys.stdout, default_flow_style=False)
 
-    rows = list(row for server, json in result.items() for row in rows_v2(server, json))
+    rows = [row for server, json in result.items() for row in rows_v2(server, json)]
 
     cursor = connection.cursor()
     cursor.executemany(
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 109b1e2fb5..9ffd23c6df 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -141,7 +141,7 @@ def start_reactor(
 
 def quit_with_error(error_string):
     message_lines = error_string.split("\n")
-    line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
+    line_length = max(len(l) for l in message_lines if len(l) < 80) + 2
     sys.stderr.write("*" * line_length + "\n")
     for line in message_lines:
         sys.stderr.write(" %s\n" % (line.rstrip(),))
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 63a91f1177..b7fcf80ddc 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -262,7 +262,7 @@ class FederationSenderHandler(object):
 
         # ... as well as device updates and messages
         elif stream_name == DeviceListsStream.NAME:
-            hosts = set(row.destination for row in rows)
+            hosts = {row.destination for row in rows}
             for host in hosts:
                 self.federation_sender.send_device_messages(host)
 
@@ -270,7 +270,7 @@ class FederationSenderHandler(object):
             # The to_device stream includes stuff to be pushed to both local
             # clients and remote servers, so we ignore entities that start with
             # '@' (since they'll be local users rather than destinations).
-            hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
+            hosts = {row.entity for row in rows if not row.entity.startswith("@")}
             for host in hosts:
                 self.federation_sender.send_device_messages(host)
 
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index e46b6ac598..84e9f8d5e2 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -158,7 +158,7 @@ class PusherReplicationHandler(ReplicationClientHandler):
                 yield self.pusher_pool.on_new_notifications(token, token)
             elif stream_name == "receipts":
                 yield self.pusher_pool.on_new_receipts(
-                    token, token, set(row.room_id for row in rows)
+                    token, token, {row.room_id for row in rows}
                 )
         except Exception:
             logger.exception("Error poking pushers")
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 0ec1b0fadd..7525765fee 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1066,12 +1066,12 @@ KNOWN_RESOURCES = (
 
 
 def _check_resource_config(listeners):
-    resource_names = set(
+    resource_names = {
         res_name
         for listener in listeners
         for res in listener.get("resources", [])
         for res_name in res.get("names", [])
-    )
+    }
 
     for resource in resource_names:
         if resource not in KNOWN_RESOURCES:
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 97a12d51f6..a65538562b 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -260,7 +260,7 @@ class TlsConfig(Config):
                 crypto.FILETYPE_ASN1, self.tls_certificate
             )
             sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
-            sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
+            sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
             if sha256_fingerprint not in sha256_fingerprints:
                 self.tls_fingerprints.append({"sha256": sha256_fingerprint})
 
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 6fe5a6a26a..983f0ead8c 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -326,9 +326,7 @@ class Keyring(object):
             verify_requests (list[VerifyJsonRequest]): list of verify requests
         """
 
-        remaining_requests = set(
-            (rq for rq in verify_requests if not rq.key_ready.called)
-        )
+        remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
 
         @defer.inlineCallbacks
         def do_iterations():
@@ -396,7 +394,7 @@ class Keyring(object):
 
         results = yield fetcher.get_keys(missing_keys)
 
-        completed = list()
+        completed = []
         for verify_request in remaining_requests:
             server_name = verify_request.server_name
 
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 001bb304ae..876fb0e245 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -129,9 +129,9 @@ class FederationRemoteSendQueue(object):
             for key in keys[:i]:
                 del self.presence_changed[key]
 
-            user_ids = set(
+            user_ids = {
                 user_id for uids in self.presence_changed.values() for user_id in uids
-            )
+            }
 
             keys = self.presence_destinations.keys()
             i = self.presence_destinations.bisect_left(position_to_delete)
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index c106abae21..4f0dc0a209 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -608,7 +608,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
         user_results = yield self.store.get_users_in_group(
             group_id, include_private=True
         )
-        if user_id in [user_result["user_id"] for user_result in user_results]:
+        if user_id in (user_result["user_id"] for user_result in user_results):
             raise SynapseError(400, "User already in group")
 
         content = {
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 50cea3f378..a514c30714 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -742,6 +742,6 @@ class DeviceListUpdater(object):
 
         # We clobber the seen updates since we've re-synced from a given
         # point.
-        self._seen_updates[user_id] = set([stream_id])
+        self._seen_updates[user_id] = {stream_id}
 
         defer.returnValue(result)
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 921d887b24..0b23ca919a 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -72,7 +72,7 @@ class DirectoryHandler(BaseHandler):
         # TODO(erikj): Check if there is a current association.
         if not servers:
             users = yield self.state.get_current_users_in_room(room_id)
-            servers = set(get_domain_from_id(u) for u in users)
+            servers = {get_domain_from_id(u) for u in users}
 
         if not servers:
             raise SynapseError(400, "Failed to get server list")
@@ -255,7 +255,7 @@ class DirectoryHandler(BaseHandler):
             )
 
         users = yield self.state.get_current_users_in_room(room_id)
-        extra_servers = set(get_domain_from_id(u) for u in users)
+        extra_servers = {get_domain_from_id(u) for u in users}
         servers = set(extra_servers) | set(servers)
 
         # If this server is in the list of servers, return it first.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index eb20ef4aec..a689065f89 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -659,11 +659,11 @@ class FederationHandler(BaseHandler):
         # this can happen if a remote server claims that the state or
         # auth_events at an event in room A are actually events in room B
 
-        bad_events = list(
+        bad_events = [
             (event_id, event.room_id)
             for event_id, event in fetched_events.items()
             if event.room_id != room_id
-        )
+        ]
 
         for bad_event_id, bad_room_id in bad_events:
             # This is a bogus situation, but since we may only discover it a long time
@@ -856,7 +856,7 @@ class FederationHandler(BaseHandler):
 
         # Don't bother processing events we already have.
         seen_events = await self.store.have_events_in_timeline(
-            set(e.event_id for e in events)
+            {e.event_id for e in events}
         )
 
         events = [e for e in events if e.event_id not in seen_events]
@@ -866,7 +866,7 @@ class FederationHandler(BaseHandler):
 
         event_map = {e.event_id: e for e in events}
 
-        event_ids = set(e.event_id for e in events)
+        event_ids = {e.event_id for e in events}
 
         # build a list of events whose prev_events weren't in the batch.
         # (XXX: this will include events whose prev_events we already have; that doesn't
@@ -892,13 +892,13 @@ class FederationHandler(BaseHandler):
             state_events.update({s.event_id: s for s in state})
             events_to_state[e_id] = state
 
-        required_auth = set(
+        required_auth = {
             a_id
             for event in events
             + list(state_events.values())
             + list(auth_events.values())
             for a_id in event.auth_event_ids()
-        )
+        }
         auth_events.update(
             {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
         )
@@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler):
     async def on_event_auth(self, event_id: str) -> List[EventBase]:
         event = await self.store.get_event(event_id)
         auth = await self.store.get_auth_chain(
-            [auth_id for auth_id in event.auth_event_ids()], include_given=True
+            list(event.auth_event_ids()), include_given=True
         )
         return list(auth)
 
@@ -2152,7 +2152,7 @@ class FederationHandler(BaseHandler):
 
         # Now get the current auth_chain for the event.
         local_auth_chain = await self.store.get_auth_chain(
-            [auth_id for auth_id in event.auth_event_ids()], include_given=True
+            list(event.auth_event_ids()), include_given=True
         )
 
         # TODO: Check if we would now reject event_id. If so we need to tell
@@ -2654,7 +2654,7 @@ class FederationHandler(BaseHandler):
             member_handler = self.hs.get_room_member_handler()
             yield member_handler.send_membership_event(None, event, context)
         else:
-            destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
+            destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)}
             yield self.federation_client.forward_third_party_invite(
                 destinations, room_id, event_dict
             )
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 202aa9294f..0d6cf2b008 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -313,7 +313,7 @@ class PresenceHandler(object):
                 notified_presence_counter.inc(len(to_notify))
                 yield self._persist_and_notify(list(to_notify.values()))
 
-            self.unpersisted_users_changes |= set(s.user_id for s in new_states)
+            self.unpersisted_users_changes |= {s.user_id for s in new_states}
             self.unpersisted_users_changes -= set(to_notify.keys())
 
             to_federation_ping = {
@@ -698,7 +698,7 @@ class PresenceHandler(object):
         updates = yield self.current_state_for_users(target_user_ids)
         updates = list(updates.values())
 
-        for user_id in set(target_user_ids) - set(u.user_id for u in updates):
+        for user_id in set(target_user_ids) - {u.user_id for u in updates}:
             updates.append(UserPresenceState.default(user_id))
 
         now = self.clock.time_msec()
@@ -886,7 +886,7 @@ class PresenceHandler(object):
             hosts = yield self.state.get_current_hosts_in_room(room_id)
 
             # Filter out ourselves.
-            hosts = set(host for host in hosts if host != self.server_name)
+            hosts = {host for host in hosts if host != self.server_name}
 
             self.federation.send_presence_to_destinations(
                 states=[state], destinations=hosts
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 9283c039e3..8bc100db42 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -94,7 +94,7 @@ class ReceiptsHandler(BaseHandler):
             # no new receipts
             return False
 
-        affected_room_ids = list(set([r.room_id for r in receipts]))
+        affected_room_ids = list({r.room_id for r in receipts})
 
         self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
         # Note that the min here shouldn't be relied upon to be accurate.
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 76e8f61b74..8ee870f0bb 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -355,7 +355,7 @@ class RoomCreationHandler(BaseHandler):
             # If so, mark the new room as non-federatable as well
             creation_content["m.federate"] = False
 
-        initial_state = dict()
+        initial_state = {}
 
         # Replicate relevant room events
         types_to_copy = (
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 110097eab9..ec1542d416 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -184,7 +184,7 @@ class SearchHandler(BaseHandler):
             membership_list=[Membership.JOIN],
             # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
         )
-        room_ids = set(r.room_id for r in rooms)
+        room_ids = {r.room_id for r in rooms}
 
         # If doing a subset of all rooms seearch, check if any of the rooms
         # are from an upgraded room, and search their contents as well
@@ -374,12 +374,12 @@ class SearchHandler(BaseHandler):
                 ).to_string()
 
                 if include_profile:
-                    senders = set(
+                    senders = {
                         ev.sender
                         for ev in itertools.chain(
                             res["events_before"], [event], res["events_after"]
                         )
-                    )
+                    }
 
                     if res["events_after"]:
                         last_event_id = res["events_after"][-1].event_id
@@ -421,7 +421,7 @@ class SearchHandler(BaseHandler):
 
         state_results = {}
         if include_state:
-            rooms = set(e.room_id for e in allowed_events)
+            rooms = {e.room_id for e in allowed_events}
             for room_id in rooms:
                 state = yield self.state_handler.get_current_state(room_id)
                 state_results[room_id] = list(state.values())
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 4324bc702e..669dbc8a48 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -682,11 +682,9 @@ class SyncHandler(object):
 
         # FIXME: order by stream ordering rather than as returned by SQL
         if joined_user_ids or invited_user_ids:
-            summary["m.heroes"] = sorted(
-                [user_id for user_id in (joined_user_ids + invited_user_ids)]
-            )[0:5]
+            summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
         else:
-            summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5]
+            summary["m.heroes"] = sorted(gone_user_ids)[0:5]
 
         if not sync_config.filter_collection.lazy_load_members():
             return summary
@@ -697,9 +695,9 @@ class SyncHandler(object):
 
         # track which members the client should already know about via LL:
         # Ones which are already in state...
-        existing_members = set(
+        existing_members = {
             user_id for (typ, user_id) in state.keys() if typ == EventTypes.Member
-        )
+        }
 
         # ...or ones which are in the timeline...
         for ev in batch.events:
@@ -773,10 +771,10 @@ class SyncHandler(object):
                 # We only request state for the members needed to display the
                 # timeline:
 
-                members_to_fetch = set(
+                members_to_fetch = {
                     event.sender  # FIXME: we also care about invite targets etc.
                     for event in batch.events
-                )
+                }
 
                 if full_state:
                     # always make sure we LL ourselves so we know we're in the room
@@ -1993,10 +1991,10 @@ def _calculate_state(
         )
     }
 
-    c_ids = set(e for e in itervalues(current))
-    ts_ids = set(e for e in itervalues(timeline_start))
-    p_ids = set(e for e in itervalues(previous))
-    tc_ids = set(e for e in itervalues(timeline_contains))
+    c_ids = set(itervalues(current))
+    ts_ids = set(itervalues(timeline_start))
+    p_ids = set(itervalues(previous))
+    tc_ids = set(itervalues(timeline_contains))
 
     # If we are lazyloading room members, we explicitly add the membership events
     # for the senders in the timeline into the state block returned by /sync,
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 5406618431..391bceb0c4 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -198,7 +198,7 @@ class TypingHandler(object):
                 now=now, obj=member, then=now + FEDERATION_PING_INTERVAL
             )
 
-            for domain in set(get_domain_from_id(u) for u in users):
+            for domain in {get_domain_from_id(u) for u in users}:
                 if domain != self.server_name:
                     logger.debug("sending typing update to %s", domain)
                     self.federation.build_and_send_edu(
@@ -231,7 +231,7 @@ class TypingHandler(object):
             return
 
         users = yield self.state.get_current_users_in_room(room_id)
-        domains = set(get_domain_from_id(u) for u in users)
+        domains = {get_domain_from_id(u) for u in users}
 
         if self.server_name in domains:
             logger.info("Got typing update from %s: %r", user_id, content)
diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py
index 6073fc2725..0c2527bd86 100644
--- a/synapse/logging/utils.py
+++ b/synapse/logging/utils.py
@@ -148,7 +148,7 @@ def trace_function(f):
             pathname=pathname,
             lineno=lineno,
             msg=msg,
-            args=tuple(),
+            args=(),
             exc_info=None,
         )
 
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 0b45e1f52a..0dba997a23 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -240,7 +240,7 @@ class BucketCollector(object):
         res.append(["+Inf", sum(data.values())])
 
         metric = HistogramMetricFamily(
-            self.name, "", buckets=res, sum_value=sum([x * y for x, y in data.items()])
+            self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items())
         )
         yield metric
 
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index c53d2a0d40..b65bcd8806 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -80,13 +80,13 @@ _background_process_db_sched_duration = Counter(
 # map from description to a counter, so that we can name our logcontexts
 # incrementally. (It actually duplicates _background_process_start_count, but
 # it's much simpler to do so than to try to combine them.)
-_background_process_counts = dict()  # type: dict[str, int]
+_background_process_counts = {}  # type: dict[str, int]
 
 # map from description to the currently running background processes.
 #
 # it's kept as a dict of sets rather than a big set so that we can keep track
 # of process descriptions that no longer have any active processes.
-_background_processes = dict()  # type: dict[str, set[_BackgroundProcess]]
+_background_processes = {}  # type: dict[str, set[_BackgroundProcess]]
 
 # A lock that covers the above dicts
 _bg_metrics_lock = threading.Lock()
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 7d9f5a38d9..433ca2f416 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -400,11 +400,11 @@ class RulesForRoom(object):
         if logger.isEnabledFor(logging.DEBUG):
             logger.debug("Found members %r: %r", self.room_id, members.values())
 
-        interested_in_user_ids = set(
+        interested_in_user_ids = {
             user_id
             for user_id, membership in itervalues(members)
             if membership == Membership.JOIN
-        )
+        }
 
         logger.debug("Joined: %r", interested_in_user_ids)
 
@@ -412,9 +412,9 @@ class RulesForRoom(object):
             interested_in_user_ids, on_invalidate=self.invalidate_all_cb
         )
 
-        user_ids = set(
+        user_ids = {
             uid for uid, have_pusher in iteritems(if_users_with_pushers) if have_pusher
-        )
+        }
 
         logger.debug("With pushers: %r", user_ids)
 
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index 8c818a86bf..ba4551d619 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -204,7 +204,7 @@ class EmailPusher(object):
                 yield self.send_notification(unprocessed, reason)
 
                 yield self.save_last_stream_ordering_and_success(
-                    max([ea["stream_ordering"] for ea in unprocessed])
+                    max(ea["stream_ordering"] for ea in unprocessed)
                 )
 
                 # we update the throttle on all the possible unprocessed push actions
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index b13b646bfd..4ccaf178ce 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -526,12 +526,10 @@ class Mailer(object):
                     # If the room doesn't have a name, say who the messages
                     # are from explicitly to avoid, "messages in the Bob room"
                     sender_ids = list(
-                        set(
-                            [
-                                notif_events[n["event_id"]].sender
-                                for n in notifs_by_room[room_id]
-                            ]
-                        )
+                        {
+                            notif_events[n["event_id"]].sender
+                            for n in notifs_by_room[room_id]
+                        }
                     )
 
                     member_events = yield self.store.get_events(
@@ -558,12 +556,10 @@ class Mailer(object):
                 # If the reason room doesn't have a name, say who the messages
                 # are from explicitly to avoid, "messages in the Bob room"
                 sender_ids = list(
-                    set(
-                        [
-                            notif_events[n["event_id"]].sender
-                            for n in notifs_by_room[reason["room_id"]]
-                        ]
-                    )
+                    {
+                        notif_events[n["event_id"]].sender
+                        for n in notifs_by_room[reason["room_id"]]
+                    }
                 )
 
                 member_events = yield self.store.get_events(
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index b9dca5bc63..01789a9fb4 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -191,7 +191,7 @@ class PusherPool:
                 min_stream_id - 1, max_stream_id
             )
             # This returns a tuple, user_id is at index 3
-            users_affected = set([r[3] for r in updated_receipts])
+            users_affected = {r[3] for r in updated_receipts}
 
             for u in users_affected:
                 if u in self.pushers:
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index 459482eb6d..a96f75ce26 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -29,7 +29,7 @@ def historical_admin_path_patterns(path_regex):
     Note that this should only be used for existing endpoints: new ones should just
     register for the /_synapse/admin path.
     """
-    return list(
+    return [
         re.compile(prefix + path_regex)
         for prefix in (
             "^/_synapse/admin/v1",
@@ -37,7 +37,7 @@ def historical_admin_path_patterns(path_regex):
             "^/_matrix/client/unstable/admin",
             "^/_matrix/client/r0/admin",
         )
-    )
+    ]
 
 
 def admin_patterns(path_regex: str):
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 4f74600239..9fd4908136 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -49,7 +49,7 @@ class PushRuleRestServlet(RestServlet):
         if self._is_worker:
             raise Exception("Cannot handle PUT /push_rules on worker")
 
-        spec = _rule_spec_from_path([x for x in path.split("/")])
+        spec = _rule_spec_from_path(path.split("/"))
         try:
             priority_class = _priority_class_from_spec(spec)
         except InvalidRuleException as e:
@@ -110,7 +110,7 @@ class PushRuleRestServlet(RestServlet):
         if self._is_worker:
             raise Exception("Cannot handle DELETE /push_rules on worker")
 
-        spec = _rule_spec_from_path([x for x in path.split("/")])
+        spec = _rule_spec_from_path(path.split("/"))
 
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
@@ -138,7 +138,7 @@ class PushRuleRestServlet(RestServlet):
 
         rules = format_push_rules_for_user(requester.user, rules)
 
-        path = [x for x in path.split("/")][1:]
+        path = path.split("/")[1:]
 
         if path == []:
             # we're a reference impl: pedantry is our job.
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index 6f6b7aed6e..550a2f1b44 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -54,9 +54,9 @@ class PushersRestServlet(RestServlet):
 
         pushers = await self.hs.get_datastore().get_pushers_by_user_id(user.to_string())
 
-        filtered_pushers = list(
+        filtered_pushers = [
             {k: v for k, v in p.items() if k in ALLOWED_KEYS} for p in pushers
-        )
+        ]
 
         return 200, {"pushers": filtered_pushers}
 
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index d8292ce29f..8fa68dd37f 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -72,7 +72,7 @@ class SyncRestServlet(RestServlet):
     """
 
     PATTERNS = client_patterns("/sync$")
-    ALLOWED_PRESENCE = set(["online", "offline", "unavailable"])
+    ALLOWED_PRESENCE = {"online", "offline", "unavailable"}
 
     def __init__(self, hs):
         super(SyncRestServlet, self).__init__()
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 9d6813a047..4b6d030a57 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -149,7 +149,7 @@ class RemoteKey(DirectServeResource):
 
         time_now_ms = self.clock.time_msec()
 
-        cache_misses = dict()  # type: Dict[str, Set[str]]
+        cache_misses = {}  # type: Dict[str, Set[str]]
         for (server_name, key_id, from_server), results in cached.items():
             results = [(result["ts_added_ms"], result) for result in results]
 
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 65bbf00073..ba28dd089d 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -135,27 +135,25 @@ def add_file_headers(request, media_type, file_size, upload_name):
 
 # separators as defined in RFC2616. SP and HT are handled separately.
 # see _can_encode_filename_as_token.
-_FILENAME_SEPARATOR_CHARS = set(
-    (
-        "(",
-        ")",
-        "<",
-        ">",
-        "@",
-        ",",
-        ";",
-        ":",
-        "\\",
-        '"',
-        "/",
-        "[",
-        "]",
-        "?",
-        "=",
-        "{",
-        "}",
-    )
-)
+_FILENAME_SEPARATOR_CHARS = {
+    "(",
+    ")",
+    "<",
+    ">",
+    "@",
+    ",",
+    ";",
+    ":",
+    "\\",
+    '"',
+    "/",
+    "[",
+    "]",
+    "?",
+    "=",
+    "{",
+    "}",
+}
 
 
 def _can_encode_filename_as_token(x):
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 24b7c0faef..9bf98d06f2 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -69,9 +69,9 @@ def resolve_events_with_store(
 
     unconflicted_state, conflicted_state = _seperate(state_sets)
 
-    needed_events = set(
+    needed_events = {
         event_id for event_ids in itervalues(conflicted_state) for event_id in event_ids
-    )
+    }
     needed_event_count = len(needed_events)
     if event_map is not None:
         needed_events -= set(iterkeys(event_map))
@@ -261,11 +261,11 @@ def _resolve_state_events(conflicted_state, auth_events):
 
 
 def _resolve_auth_events(events, auth_events):
-    reverse = [i for i in reversed(_ordered_events(events))]
+    reverse = list(reversed(_ordered_events(events)))
 
-    auth_keys = set(
+    auth_keys = {
         key for event in events for key in event_auth.auth_types_for_event(event)
-    )
+    }
 
     new_auth_events = {}
     for key in auth_keys:
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 75fe58305a..0ffe6d8c14 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -105,7 +105,7 @@ def resolve_events_with_store(
                 % (room_id, event.event_id, event.room_id,)
             )
 
-    full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map)
+    full_conflicted_set = {eid for eid in full_conflicted_set if eid in event_map}
 
     logger.debug("%d full_conflicted_set entries", len(full_conflicted_set))
 
@@ -233,7 +233,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
 
     auth_sets = []
     for state_set in state_sets:
-        auth_ids = set(
+        auth_ids = {
             eid
             for key, eid in iteritems(state_set)
             if (
@@ -246,7 +246,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
                 )
             )
             and eid not in common
-        )
+        }
 
         auth_chain = yield state_res_store.get_auth_chain(auth_ids, common)
         auth_ids.update(auth_chain)
@@ -275,7 +275,7 @@ def _seperate(state_sets):
     conflicted_state = {}
 
     for key in set(itertools.chain.from_iterable(state_sets)):
-        event_ids = set(state_set.get(key) for state_set in state_sets)
+        event_ids = {state_set.get(key) for state_set in state_sets}
         if len(event_ids) == 1:
             unconflicted_state[key] = event_ids.pop()
         else:
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index da3b99f93d..13de5f1f62 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta):
             members_changed (iterable[str]): The user_ids of members that have
                 changed
         """
-        for host in set(get_domain_from_id(u) for u in members_changed):
+        for host in {get_domain_from_id(u) for u in members_changed}:
             self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
             self._attempt_to_invalidate_cache("was_host_joined", (room_id, host))
 
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index bd547f35cf..eb1a7e5002 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -189,7 +189,7 @@ class BackgroundUpdater(object):
                 keyvalues=None,
                 retcols=("update_name", "depends_on"),
             )
-            in_flight = set(update["update_name"] for update in updates)
+            in_flight = {update["update_name"] for update in updates}
             for update in updates:
                 if update["depends_on"] not in in_flight:
                     self._background_update_queue.append(update["update_name"])
diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py
index b2f39649fd..efbc06c796 100644
--- a/synapse/storage/data_stores/main/appservice.py
+++ b/synapse/storage/data_stores/main/appservice.py
@@ -135,7 +135,7 @@ class ApplicationServiceTransactionWorkerStore(
             may be empty.
         """
         results = yield self.db.simple_select_list(
-            "application_services_state", dict(state=state), ["as_id"]
+            "application_services_state", {"state": state}, ["as_id"]
         )
         # NB: This assumes this class is linked with ApplicationServiceStore
         as_list = self.get_app_services()
@@ -158,7 +158,7 @@ class ApplicationServiceTransactionWorkerStore(
         """
         result = yield self.db.simple_select_one(
             "application_services_state",
-            dict(as_id=service.id),
+            {"as_id": service.id},
             ["state"],
             allow_none=True,
             desc="get_appservice_state",
@@ -177,7 +177,7 @@ class ApplicationServiceTransactionWorkerStore(
             A Deferred which resolves when the state was set successfully.
         """
         return self.db.simple_upsert(
-            "application_services_state", dict(as_id=service.id), dict(state=state)
+            "application_services_state", {"as_id": service.id}, {"state": state}
         )
 
     def create_appservice_txn(self, service, events):
@@ -253,13 +253,15 @@ class ApplicationServiceTransactionWorkerStore(
             self.db.simple_upsert_txn(
                 txn,
                 "application_services_state",
-                dict(as_id=service.id),
-                dict(last_txn=txn_id),
+                {"as_id": service.id},
+                {"last_txn": txn_id},
             )
 
             # Delete txn
             self.db.simple_delete_txn(
-                txn, "application_services_txns", dict(txn_id=txn_id, as_id=service.id)
+                txn,
+                "application_services_txns",
+                {"txn_id": txn_id, "as_id": service.id},
             )
 
         return self.db.runInteraction(
diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index 13f4c9c72e..e1ccb27142 100644
--- a/synapse/storage/data_stores/main/client_ips.py
+++ b/synapse/storage/data_stores/main/client_ips.py
@@ -530,7 +530,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
             ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"]))
             for row in rows
         )
-        return list(
+        return [
             {
                 "access_token": access_token,
                 "ip": ip,
@@ -538,7 +538,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                 "last_seen": last_seen,
             }
             for (access_token, ip), (user_agent, last_seen) in iteritems(results)
-        )
+        ]
 
     @wrap_as_background_process("prune_old_user_ips")
     async def _prune_old_user_ips(self):
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index b7617efb80..d55733a4cd 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -137,7 +137,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
         # get the cross-signing keys of the users in the list, so that we can
         # determine which of the device changes were cross-signing keys
-        users = set(r[0] for r in updates)
+        users = {r[0] for r in updates}
         master_key_by_user = {}
         self_signing_key_by_user = {}
         for user in users:
@@ -446,7 +446,7 @@ class DeviceWorkerStore(SQLBaseStore):
             a set of user_ids and results_map is a mapping of
             user_id -> device_id -> device_info
         """
-        user_ids = set(user_id for user_id, _ in query_list)
+        user_ids = {user_id for user_id, _ in query_list}
         user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
 
         # We go and check if any of the users need to have their device lists
@@ -454,10 +454,9 @@ class DeviceWorkerStore(SQLBaseStore):
         users_needing_resync = yield self.get_user_ids_requiring_device_list_resync(
             user_ids
         )
-        user_ids_in_cache = (
-            set(user_id for user_id, stream_id in user_map.items() if stream_id)
-            - users_needing_resync
-        )
+        user_ids_in_cache = {
+            user_id for user_id, stream_id in user_map.items() if stream_id
+        } - users_needing_resync
         user_ids_not_in_cache = user_ids - user_ids_in_cache
 
         results = {}
@@ -604,7 +603,7 @@ class DeviceWorkerStore(SQLBaseStore):
             rows = yield self.db.execute(
                 "get_users_whose_signatures_changed", None, sql, user_id, from_key
             )
-            return set(user for row in rows for user in json.loads(row[0]))
+            return {user for row in rows for user in json.loads(row[0])}
         else:
             return set()
 
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 750ec1b70d..49a7b8b433 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -426,7 +426,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
                     query, (room_id, event_id, False, limit - len(event_results))
                 )
 
-                new_results = set(t[0] for t in txn) - seen_events
+                new_results = {t[0] for t in txn} - seen_events
 
                 new_front |= new_results
                 seen_events |= new_results
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index c9d0d68c3a..8ae23df00a 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -145,7 +145,7 @@ class EventsStore(
             return txn.fetchall()
 
         res = yield self.db.runInteraction("read_forward_extremities", fetch)
-        self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
+        self._current_forward_extremities_amount = c_counter([x[0] for x in res])
 
     @_retry_on_integrity_error
     @defer.inlineCallbacks
@@ -598,11 +598,11 @@ class EventsStore(
             # We find out which membership events we may have deleted
             # and which we have added, then we invlidate the caches for all
             # those users.
-            members_changed = set(
+            members_changed = {
                 state_key
                 for ev_type, state_key in itertools.chain(to_delete, to_insert)
                 if ev_type == EventTypes.Member
-            )
+            }
 
             for member in members_changed:
                 txn.call_after(
@@ -1615,7 +1615,7 @@ class EventsStore(
         """
         )
 
-        referenced_state_groups = set(sg for sg, in txn)
+        referenced_state_groups = {sg for sg, in txn}
         logger.info(
             "[purge] found %i referenced state groups", len(referenced_state_groups)
         )
diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py
index 5177b71016..f54c8b1ee0 100644
--- a/synapse/storage/data_stores/main/events_bg_updates.py
+++ b/synapse/storage/data_stores/main/events_bg_updates.py
@@ -402,7 +402,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                     keyvalues={},
                     retcols=("room_id",),
                 )
-                room_ids = set(row["room_id"] for row in rows)
+                room_ids = {row["room_id"] for row in rows}
                 for room_id in room_ids:
                     txn.call_after(
                         self.get_latest_event_ids_in_room.invalidate, (room_id,)
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 7251e819f5..47a3a26072 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -494,9 +494,9 @@ class EventsWorkerStore(SQLBaseStore):
         """
         with Measure(self._clock, "_fetch_event_list"):
             try:
-                events_to_fetch = set(
+                events_to_fetch = {
                     event_id for events, _ in event_list for event_id in events
-                )
+                }
 
                 row_dict = self.db.new_transaction(
                     conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
@@ -804,7 +804,7 @@ class EventsWorkerStore(SQLBaseStore):
             desc="have_events_in_timeline",
         )
 
-        return set(r["event_id"] for r in rows)
+        return {r["event_id"] for r in rows}
 
     @defer.inlineCallbacks
     def have_seen_events(self, event_ids):
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py
index e2673ae073..62ac88d9f2 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/data_stores/main/push_rule.py
@@ -276,21 +276,21 @@ class PushRulesWorkerStore(
         # We ignore app service users for now. This is so that we don't fill
         # up the `get_if_users_have_pushers` cache with AS entries that we
         # know don't have pushers, nor even read receipts.
-        local_users_in_room = set(
+        local_users_in_room = {
             u
             for u in users_in_room
             if self.hs.is_mine_id(u)
             and not self.get_if_app_services_interested_in_user(u)
-        )
+        }
 
         # users in the room who have pushers need to get push rules run because
         # that's how their pushers work
         if_users_with_pushers = yield self.get_if_users_have_pushers(
             local_users_in_room, on_invalidate=cache_context.invalidate
         )
-        user_ids = set(
+        user_ids = {
             uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
-        )
+        }
 
         users_with_receipts = yield self.get_users_with_read_receipts_in_room(
             room_id, on_invalidate=cache_context.invalidate
diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py
index 96e54d145e..0d932a0672 100644
--- a/synapse/storage/data_stores/main/receipts.py
+++ b/synapse/storage/data_stores/main/receipts.py
@@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
     @cachedInlineCallbacks()
     def get_users_with_read_receipts_in_room(self, room_id):
         receipts = yield self.get_receipts_for_room(room_id, "m.read")
-        return set(r["user_id"] for r in receipts)
+        return {r["user_id"] for r in receipts}
 
     @cached(num_args=2)
     def get_receipts_for_room(self, room_id, receipt_type):
@@ -283,7 +283,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 args.append(limit)
             txn.execute(sql, args)
 
-            return list(r[0:5] + (json.loads(r[5]),) for r in txn)
+            return [r[0:5] + (json.loads(r[5]),) for r in txn]
 
         return self.db.runInteraction(
             "get_all_updated_receipts", get_all_updated_receipts_txn
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index d5ced05701..d5bd0cb5cf 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -465,7 +465,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
             txn.execute(sql % (clause,), args)
 
-            return set(row[0] for row in txn)
+            return {row[0] for row in txn}
 
         return await self.db.runInteraction(
             "get_users_server_still_shares_room_with",
@@ -826,7 +826,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
                 GROUP BY room_id, user_id;
             """
             txn.execute(sql, (user_id,))
-            return set(row[0] for row in txn if row[1] == 0)
+            return {row[0] for row in txn if row[1] == 0}
 
         return self.db.runInteraction(
             "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 3d34103e67..3a3b9a8e72 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -321,7 +321,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             desc="get_referenced_state_groups",
         )
 
-        return set(row["state_group"] for row in rows)
+        return {row["state_group"] for row in rows}
 
 
 class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
@@ -367,7 +367,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
             """
 
             txn.execute(sql, (last_room_id, batch_size))
-            room_ids = list(row[0] for row in txn)
+            room_ids = [row[0] for row in txn]
             if not room_ids:
                 return True, set()
 
@@ -384,7 +384,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
 
             txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name))
 
-            joined_room_ids = set(row[0] for row in txn)
+            joined_room_ids = {row[0] for row in txn}
 
             left_rooms = set(room_ids) - joined_room_ids
 
@@ -404,7 +404,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
                 retcols=("state_key",),
             )
 
-            potentially_left_users = set(row["state_key"] for row in rows)
+            potentially_left_users = {row["state_key"] for row in rows}
 
             # Now lets actually delete the rooms from the DB.
             self.db.simple_delete_many_txn(
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py
index 056b25b13a..ada5cce6c2 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/data_stores/main/stream.py
@@ -346,11 +346,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             from_key (str): The room_key portion of a StreamToken
         """
         from_key = RoomStreamToken.parse_stream_token(from_key).stream
-        return set(
+        return {
             room_id
             for room_id in room_ids
             if self._events_stream_cache.has_entity_changed(room_id, from_key)
-        )
+        }
 
     @defer.inlineCallbacks
     def get_room_events_stream_for_room(
@@ -679,11 +679,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
         events_before = yield self.get_events_as_list(
-            [e for e in results["before"]["event_ids"]], get_prev_content=True
+            list(results["before"]["event_ids"]), get_prev_content=True
         )
 
         events_after = yield self.get_events_as_list(
-            [e for e in results["after"]["event_ids"]], get_prev_content=True
+            list(results["after"]["event_ids"]), get_prev_content=True
         )
 
         return {
diff --git a/synapse/storage/data_stores/main/user_erasure_store.py b/synapse/storage/data_stores/main/user_erasure_store.py
index af8025bc17..ec6b8a4ffd 100644
--- a/synapse/storage/data_stores/main/user_erasure_store.py
+++ b/synapse/storage/data_stores/main/user_erasure_store.py
@@ -63,9 +63,9 @@ class UserErasureWorkerStore(SQLBaseStore):
             retcols=("user_id",),
             desc="are_users_erased",
         )
-        erased_users = set(row["user_id"] for row in rows)
+        erased_users = {row["user_id"] for row in rows}
 
-        res = dict((u, u in erased_users) for u in user_ids)
+        res = {u: u in erased_users for u in user_ids}
         return res
 
 
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
index c4ee9b7ccb..57a5267663 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/data_stores/state/store.py
@@ -520,11 +520,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             retcols=("state_group",),
         )
 
-        remaining_state_groups = set(
+        remaining_state_groups = {
             row["state_group"]
             for row in rows
             if row["state_group"] not in state_groups_to_delete
-        )
+        }
 
         logger.info(
             "[purge] de-delta-ing %i remaining state groups",
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 6dcb5c04da..1953614401 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -554,8 +554,8 @@ class Database(object):
         Returns:
             A list of dicts where the key is the column header.
         """
-        col_headers = list(intern(str(column[0])) for column in cursor.description)
-        results = list(dict(zip(col_headers, row)) for row in cursor)
+        col_headers = [intern(str(column[0])) for column in cursor.description]
+        results = [dict(zip(col_headers, row)) for row in cursor]
         return results
 
     def execute(self, desc, decoder, query, *args):
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index b950550f23..0f9ac1cf09 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -602,14 +602,14 @@ class EventsPersistenceStorage(object):
             event_id_to_state_group.update(event_to_groups)
 
         # State groups of old_latest_event_ids
-        old_state_groups = set(
+        old_state_groups = {
             event_id_to_state_group[evid] for evid in old_latest_event_ids
-        )
+        }
 
         # State groups of new_latest_event_ids
-        new_state_groups = set(
+        new_state_groups = {
             event_id_to_state_group[evid] for evid in new_latest_event_ids
-        )
+        }
 
         # If they old and new groups are the same then we don't need to do
         # anything.
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c285ef52a0..fc69c32a0a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -345,9 +345,9 @@ def _upgrade_existing_database(
                     "Could not open delta dir for version %d: %s" % (v, directory)
                 )
 
-        duplicates = set(
+        duplicates = {
             file_name for file_name, count in file_name_counter.items() if count > 1
-        )
+        }
         if duplicates:
             # We don't support using the same file name in the same delta version.
             raise PrepareDatabaseException(
@@ -454,7 +454,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams)
         ),
         (modname,),
     )
-    applied_deltas = set(d for d, in cur)
+    applied_deltas = {d for d, in cur}
     for (name, stream) in names_and_streams:
         if name in applied_deltas:
             continue
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 635b897d6c..f2ccd5e7c6 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -30,7 +30,7 @@ def freeze(o):
         return o
 
     try:
-        return tuple([freeze(i) for i in o])
+        return tuple(freeze(i) for i in o)
     except TypeError:
         pass
 
diff --git a/synapse/visibility.py b/synapse/visibility.py
index d0abd8f04f..e60d9756b7 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -75,7 +75,7 @@ def filter_events_for_client(
     """
     # Filter out events that have been soft failed so that we don't relay them
     # to clients.
-    events = list(e for e in events if not e.internal_metadata.is_soft_failed())
+    events = [e for e in events if not e.internal_metadata.is_soft_failed()]
 
     types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
     event_id_to_state = yield storage.state.get_state_for_events(
@@ -97,7 +97,7 @@ def filter_events_for_client(
     erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
 
     if apply_retention_policies:
-        room_ids = set(e.room_id for e in events)
+        room_ids = {e.room_id for e in events}
         retention_policies = {}
 
         for room_id in room_ids:
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 2684e662de..463855ecc8 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -48,7 +48,7 @@ class ConfigGenerationTestCase(unittest.TestCase):
             )
 
         self.assertSetEqual(
-            set(["homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"]),
+            {"homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"},
             set(os.listdir(self.dir)),
         )
 
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index e7d8699040..296dc887be 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -83,7 +83,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase):
             )
         )
 
-        self.assertEqual(members, set(["@user:other.example.com", u1]))
+        self.assertEqual(members, {"@user:other.example.com", u1})
         self.assertEqual(len(channel.json_body["pdus"]), 6)
 
     def test_needs_to_be_in_room(self):
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index c171038df8..64915bafcd 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -338,7 +338,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         new_state = handle_timeout(
-            state, is_mine=True, syncing_user_ids=set([user_id]), now=now
+            state, is_mine=True, syncing_user_ids={user_id}, now=now
         )
 
         self.assertIsNotNone(new_state)
@@ -579,7 +579,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(expected_state.state, PresenceState.ONLINE)
         self.federation_sender.send_presence_to_destinations.assert_called_once_with(
-            destinations=set(("server2", "server3")), states=[expected_state]
+            destinations={"server2", "server3"}, states=[expected_state]
         )
 
     def _add_new_user(self, room_id, user_id):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 140cc0a3c2..07b204666e 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -129,12 +129,12 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         hs.get_auth().check_user_in_room = check_user_in_room
 
         def get_joined_hosts_for_room(room_id):
-            return set(member.domain for member in self.room_members)
+            return {member.domain for member in self.room_members}
 
         self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room
 
         def get_current_users_in_room(room_id):
-            return set(str(u) for u in self.room_members)
+            return {str(u) for u in self.room_members}
 
         hs.get_state_handler().get_current_users_in_room = get_current_users_in_room
 
@@ -257,7 +257,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
 
         member = RoomMember(ROOM_ID, U_APPLE.to_string())
         self.handler._member_typing_until[member] = 1002000
-        self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()])
+        self.handler._room_typing[ROOM_ID] = {U_APPLE.to_string()}
 
         self.assertEquals(self.event_source.get_current_key(), 0)
 
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 0a4765fff4..7b92bdbc47 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -114,7 +114,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         public_users = self.get_users_in_public_rooms()
 
         self.assertEqual(
-            self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
+            self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
         )
         self.assertEqual(public_users, [])
 
@@ -169,7 +169,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         public_users = self.get_users_in_public_rooms()
 
         self.assertEqual(
-            self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
+            self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
         )
         self.assertEqual(public_users, [])
 
@@ -226,7 +226,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         public_users = self.get_users_in_public_rooms()
 
         self.assertEqual(
-            self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
+            self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)}
         )
         self.assertEqual(public_users, [])
 
@@ -358,12 +358,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         public_users = self.get_users_in_public_rooms()
 
         # User 1 and User 2 are in the same public room
-        self.assertEqual(set(public_users), set([(u1, room), (u2, room)]))
+        self.assertEqual(set(public_users), {(u1, room), (u2, room)})
 
         # User 1 and User 3 share private rooms
         self.assertEqual(
             self._compress_shared(shares_private),
-            set([(u1, u3, private_room), (u3, u1, private_room)]),
+            {(u1, u3, private_room), (u3, u1, private_room)},
         )
 
     def test_initial_share_all_users(self):
@@ -398,7 +398,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
 
         # No users share rooms
         self.assertEqual(public_users, [])
-        self.assertEqual(self._compress_shared(shares_private), set([]))
+        self.assertEqual(self._compress_shared(shares_private), set())
 
         # Despite not sharing a room, search_all_users means we get a search
         # result.
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 80187406bc..83032cc9ea 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -163,7 +163,7 @@ class EmailPusherTests(HomeserverTestCase):
 
         # Get the stream ordering before it gets sent
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": self.user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
@@ -174,7 +174,7 @@ class EmailPusherTests(HomeserverTestCase):
 
         # It hasn't succeeded yet, so the stream ordering shouldn't have moved
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": self.user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
@@ -192,7 +192,7 @@ class EmailPusherTests(HomeserverTestCase):
 
         # The stream ordering has increased
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": self.user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index fe3441f081..baf9c785f4 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -102,7 +102,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         # Get the stream ordering before it gets sent
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
@@ -113,7 +113,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         # It hasn't succeeded yet, so the stream ordering shouldn't have moved
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
@@ -132,7 +132,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         # The stream ordering has increased
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
@@ -152,7 +152,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         # The stream ordering has increased, again
         pushers = self.get_success(
-            self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+            self.hs.get_datastore().get_pushers_by({"user_name": user_id})
         )
         pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 9c13a13786..fa3a3ec1bd 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -40,16 +40,14 @@ class FilterTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(channel.code, 200)
         self.assertTrue(
-            set(
-                [
-                    "next_batch",
-                    "rooms",
-                    "presence",
-                    "account_data",
-                    "to_device",
-                    "device_lists",
-                ]
-            ).issubset(set(channel.json_body.keys()))
+            {
+                "next_batch",
+                "rooms",
+                "presence",
+                "account_data",
+                "to_device",
+                "device_lists",
+            }.issubset(set(channel.json_body.keys()))
         )
 
     def test_sync_presence_disabled(self):
@@ -63,9 +61,13 @@ class FilterTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(channel.code, 200)
         self.assertTrue(
-            set(
-                ["next_batch", "rooms", "account_data", "to_device", "device_lists"]
-            ).issubset(set(channel.json_body.keys()))
+            {
+                "next_batch",
+                "rooms",
+                "account_data",
+                "to_device",
+                "device_lists",
+            }.issubset(set(channel.json_body.keys()))
         )
 
 
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index d491ea2924..e37260a820 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -373,7 +373,7 @@ class UpsertManyTests(unittest.HomeserverTestCase):
         )
         self.assertEqual(
             set(self._dump_to_tuple(res)),
-            set([(1, "user1", "hello"), (2, "user2", "there")]),
+            {(1, "user1", "hello"), (2, "user2", "there")},
         )
 
         # Update only user2
@@ -400,5 +400,5 @@ class UpsertManyTests(unittest.HomeserverTestCase):
         )
         self.assertEqual(
             set(self._dump_to_tuple(res)),
-            set([(1, "user1", "hello"), (2, "user2", "bleb")]),
+            {(1, "user1", "hello"), (2, "user2", "bleb")},
         )
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index fd52512696..31710949a8 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -69,14 +69,14 @@ class ApplicationServiceStoreTestCase(unittest.TestCase):
                 pass
 
     def _add_appservice(self, as_token, id, url, hs_token, sender):
-        as_yaml = dict(
-            url=url,
-            as_token=as_token,
-            hs_token=hs_token,
-            id=id,
-            sender_localpart=sender,
-            namespaces={},
-        )
+        as_yaml = {
+            "url": url,
+            "as_token": as_token,
+            "hs_token": hs_token,
+            "id": id,
+            "sender_localpart": sender,
+            "namespaces": {},
+        }
         # use the token as the filename
         with open(as_token, "w") as outfile:
             outfile.write(yaml.dump(as_yaml))
@@ -135,14 +135,14 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         )
 
     def _add_service(self, url, as_token, id):
-        as_yaml = dict(
-            url=url,
-            as_token=as_token,
-            hs_token="something",
-            id=id,
-            sender_localpart="a_sender",
-            namespaces={},
-        )
+        as_yaml = {
+            "url": url,
+            "as_token": as_token,
+            "hs_token": "something",
+            "id": id,
+            "sender_localpart": "a_sender",
+            "namespaces": {},
+        }
         # use the token as the filename
         with open(as_token, "w") as outfile:
             outfile.write(yaml.dump(as_yaml))
@@ -384,8 +384,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         )
         self.assertEquals(2, len(services))
         self.assertEquals(
-            set([self.as_list[2]["id"], self.as_list[0]["id"]]),
-            set([services[0].id, services[1].id]),
+            {self.as_list[2]["id"], self.as_list[0]["id"]},
+            {services[0].id, services[1].id},
         )
 
 
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index 029ac26454..0e04b2cf92 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -134,7 +134,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
         latest_event_ids = self.get_success(
             self.store.get_latest_event_ids_in_room(self.room_id)
         )
-        self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b)))
+        self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b})
 
         # Run the background update and check it did the right thing
         self.run_background_update()
@@ -172,7 +172,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
         latest_event_ids = self.get_success(
             self.store.get_latest_event_ids_in_room(self.room_id)
         )
-        self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b)))
+        self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b})
 
         # Run the background update and check it did the right thing
         self.run_background_update()
@@ -227,9 +227,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
         latest_event_ids = self.get_success(
             self.store.get_latest_event_ids_in_room(self.room_id)
         )
-        self.assertEqual(
-            set(latest_event_ids), set((event_id_a, event_id_b, event_id_c))
-        )
+        self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c})
 
         # Run the background update and check it did the right thing
         self.run_background_update()
@@ -237,7 +235,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
         latest_event_ids = self.get_success(
             self.store.get_latest_event_ids_in_room(self.room_id)
         )
-        self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c]))
+        self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c})
 
 
 class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py
index f26ff57a18..a7b7fd36d3 100644
--- a/tests/storage/test_event_metrics.py
+++ b/tests/storage/test_event_metrics.py
@@ -59,24 +59,22 @@ class ExtremStatisticsTestCase(HomeserverTestCase):
             )
         )
 
-        expected = set(
-            [
-                b'synapse_forward_extremities_bucket{le="1.0"} 0.0',
-                b'synapse_forward_extremities_bucket{le="2.0"} 2.0',
-                b'synapse_forward_extremities_bucket{le="3.0"} 2.0',
-                b'synapse_forward_extremities_bucket{le="5.0"} 2.0',
-                b'synapse_forward_extremities_bucket{le="7.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="10.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="15.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="20.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="50.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="100.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="200.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="500.0"} 3.0',
-                b'synapse_forward_extremities_bucket{le="+Inf"} 3.0',
-                b"synapse_forward_extremities_count 3.0",
-                b"synapse_forward_extremities_sum 10.0",
-            ]
-        )
+        expected = {
+            b'synapse_forward_extremities_bucket{le="1.0"} 0.0',
+            b'synapse_forward_extremities_bucket{le="2.0"} 2.0',
+            b'synapse_forward_extremities_bucket{le="3.0"} 2.0',
+            b'synapse_forward_extremities_bucket{le="5.0"} 2.0',
+            b'synapse_forward_extremities_bucket{le="7.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="10.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="15.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="20.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="50.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="100.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="200.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="500.0"} 3.0',
+            b'synapse_forward_extremities_bucket{le="+Inf"} 3.0',
+            b"synapse_forward_extremities_count 3.0",
+            b"synapse_forward_extremities_sum 10.0",
+        }
 
         self.assertEqual(items, expected)
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index 04d58fbf24..0b88308ff4 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -394,7 +394,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
         ) = self.state_datastore._state_group_cache.get(group)
 
         self.assertEqual(is_all, False)
-        self.assertEqual(known_absent, set([(e1.type, e1.state_key)]))
+        self.assertEqual(known_absent, {(e1.type, e1.state_key)})
         self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id})
 
         ############################################
diff --git a/tests/test_state.py b/tests/test_state.py
index d1578fe581..66f22f6813 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -254,9 +254,7 @@ class StateTestCase(unittest.TestCase):
         ctx_d = context_store["D"]
 
         prev_state_ids = yield ctx_d.get_prev_state_ids()
-        self.assertSetEqual(
-            {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()}
-        )
+        self.assertSetEqual({"START", "A", "C"}, set(prev_state_ids.values()))
 
         self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
         self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
@@ -313,9 +311,7 @@ class StateTestCase(unittest.TestCase):
         ctx_e = context_store["E"]
 
         prev_state_ids = yield ctx_e.get_prev_state_ids()
-        self.assertSetEqual(
-            {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()}
-        )
+        self.assertSetEqual({"START", "A", "B", "C"}, set(prev_state_ids.values()))
         self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event)
         self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group)
 
@@ -388,9 +384,7 @@ class StateTestCase(unittest.TestCase):
         ctx_d = context_store["D"]
 
         prev_state_ids = yield ctx_d.get_prev_state_ids()
-        self.assertSetEqual(
-            {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()}
-        )
+        self.assertSetEqual({"A1", "A2", "A3", "A5", "B"}, set(prev_state_ids.values()))
 
         self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event)
         self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
@@ -482,7 +476,7 @@ class StateTestCase(unittest.TestCase):
         current_state_ids = yield context.get_current_state_ids()
 
         self.assertEqual(
-            set([e.event_id for e in old_state]), set(current_state_ids.values())
+            {e.event_id for e in old_state}, set(current_state_ids.values())
         )
 
         self.assertEqual(group_name, context.state_group)
@@ -513,9 +507,7 @@ class StateTestCase(unittest.TestCase):
 
         prev_state_ids = yield context.get_prev_state_ids()
 
-        self.assertEqual(
-            set([e.event_id for e in old_state]), set(prev_state_ids.values())
-        )
+        self.assertEqual({e.event_id for e in old_state}, set(prev_state_ids.values()))
 
         self.assertIsNotNone(context.state_group)
 
diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index f2be63706b..72a9de5370 100644
--- a/tests/util/test_stream_change_cache.py
+++ b/tests/util/test_stream_change_cache.py
@@ -67,7 +67,7 @@ class StreamChangeCacheTests(unittest.TestCase):
         # If we update an existing entity, it keeps the two existing entities
         cache.entity_has_changed("bar@baz.net", 5)
         self.assertEqual(
-            set(["bar@baz.net", "user@elsewhere.org"]), set(cache._entity_to_key)
+            {"bar@baz.net", "user@elsewhere.org"}, set(cache._entity_to_key)
         )
 
     def test_get_all_entities_changed(self):
@@ -137,7 +137,7 @@ class StreamChangeCacheTests(unittest.TestCase):
             cache.get_entities_changed(
                 ["user@foo.com", "bar@baz.net", "user@elsewhere.org"], stream_pos=2
             ),
-            set(["bar@baz.net", "user@elsewhere.org"]),
+            {"bar@baz.net", "user@elsewhere.org"},
         )
 
         # Query all the entries mid-way through the stream, but include one
@@ -153,7 +153,7 @@ class StreamChangeCacheTests(unittest.TestCase):
                 ],
                 stream_pos=2,
             ),
-            set(["bar@baz.net", "user@elsewhere.org"]),
+            {"bar@baz.net", "user@elsewhere.org"},
         )
 
         # Query all the entries, but before the first known point. We will get
@@ -168,21 +168,13 @@ class StreamChangeCacheTests(unittest.TestCase):
                 ],
                 stream_pos=0,
             ),
-            set(
-                [
-                    "user@foo.com",
-                    "bar@baz.net",
-                    "user@elsewhere.org",
-                    "not@here.website",
-                ]
-            ),
+            {"user@foo.com", "bar@baz.net", "user@elsewhere.org", "not@here.website"},
         )
 
         # Query a subset of the entries mid-way through the stream. We should
         # only get back the subset.
         self.assertEqual(
-            cache.get_entities_changed(["bar@baz.net"], stream_pos=2),
-            set(["bar@baz.net"]),
+            cache.get_entities_changed(["bar@baz.net"], stream_pos=2), {"bar@baz.net"},
         )
 
     def test_max_pos(self):
diff --git a/tox.ini b/tox.ini
index b9132a3177..b715ea0bff 100644
--- a/tox.ini
+++ b/tox.ini
@@ -123,6 +123,7 @@ skip_install = True
 basepython = python3.6
 deps =
     flake8
+    flake8-comprehensions
     black==19.10b0  # We pin so that our tests don't start failing on new releases of black.
 commands =
     python -m black --check --diff .
-- 
cgit 1.4.1


From 7936d2a96e4781ad7d1ae27f78b65c8eb8d5c3f5 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 21 Feb 2020 07:18:33 -0500
Subject: Publishing/removing from the directory requires a power level greater
 than canonical aliases.

---
 changelog.d/6965.feature |  1 +
 synapse/api/auth.py      | 10 +++++-----
 2 files changed, 6 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6965.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6965.feature b/changelog.d/6965.feature
new file mode 100644
index 0000000000..6ad9956e40
--- /dev/null
+++ b/changelog.d/6965.feature
@@ -0,0 +1 @@
+Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index f576d65388..5ca18b4301 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -538,13 +538,13 @@ class Auth(object):
         return defer.succeed(auth_ids)
 
     @defer.inlineCallbacks
-    def check_can_change_room_list(self, room_id, user):
+    def check_can_change_room_list(self, room_id: str, user: UserID):
         """Check if the user is allowed to edit the room's entry in the
         published room list.
 
         Args:
-            room_id (str)
-            user (UserID)
+            room_id
+            user
         """
 
         is_admin = yield self.is_server_admin(user)
@@ -556,7 +556,7 @@ class Auth(object):
 
         # We currently require the user is a "moderator" in the room. We do this
         # by checking if they would (theoretically) be able to change the
-        # m.room.aliases events
+        # m.room.canonical_alias events
         power_level_event = yield self.state.get_current_state(
             room_id, EventTypes.PowerLevels, ""
         )
@@ -566,7 +566,7 @@ class Auth(object):
             auth_events[(EventTypes.PowerLevels, "")] = power_level_event
 
         send_level = event_auth.get_send_level(
-            EventTypes.Aliases, "", power_level_event
+            EventTypes.CanonicalAlias, "", power_level_event
         )
         user_level = event_auth.get_user_power_level(user_id, auth_events)
 
-- 
cgit 1.4.1


From fcf45994881d652571d965547b2287d796f798fc Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 21 Feb 2020 12:40:23 -0500
Subject: Stop returning aliases as part of the room list. (#6970)

---
 changelog.d/6970.removal      | 1 +
 synapse/handlers/room_list.py | 9 ---------
 2 files changed, 1 insertion(+), 9 deletions(-)
 create mode 100644 changelog.d/6970.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6970.removal b/changelog.d/6970.removal
new file mode 100644
index 0000000000..89bd363b95
--- /dev/null
+++ b/changelog.d/6970.removal
@@ -0,0 +1 @@
+The room list endpoint no longer returns a list of aliases.
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index c615206df1..0b7d3da680 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -216,15 +216,6 @@ class RoomListHandler(BaseHandler):
                         direction_is_forward=False,
                     ).to_token()
 
-        for room in results:
-            # populate search result entries with additional fields, namely
-            # 'aliases'
-            room_id = room["room_id"]
-
-            aliases = yield self.store.get_aliases_for_room(room_id)
-            if aliases:
-                room["aliases"] = aliases
-
         response["chunk"] = results
 
         response["total_room_count_estimate"] = yield self.store.count_public_rooms(
-- 
cgit 1.4.1


From 7b0e2d961ce9b70ed2d41f27f624ab752af26400 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 21 Feb 2020 18:44:03 +0100
Subject: Change displayname of user as admin in rooms (#6876)

---
 changelog.d/6572.bugfix     |  1 +
 synapse/handlers/profile.py | 12 +++++++++++-
 2 files changed, 12 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6572.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6572.bugfix b/changelog.d/6572.bugfix
new file mode 100644
index 0000000000..4f708f409f
--- /dev/null
+++ b/changelog.d/6572.bugfix
@@ -0,0 +1 @@
+When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room.
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index f9579d69ee..50ce0c585b 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -28,7 +28,7 @@ from synapse.api.errors import (
     SynapseError,
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import UserID, get_domain_from_id
+from synapse.types import UserID, create_requester, get_domain_from_id
 
 from ._base import BaseHandler
 
@@ -165,6 +165,12 @@ class BaseProfileHandler(BaseHandler):
         if new_displayname == "":
             new_displayname = None
 
+        # If the admin changes the display name of a user, the requesting user cannot send
+        # the join event to update the displayname in the rooms.
+        # This must be done by the target user himself.
+        if by_admin:
+            requester = create_requester(target_user)
+
         yield self.store.set_profile_displayname(target_user.localpart, new_displayname)
 
         if self.hs.config.user_directory_search_all_users:
@@ -217,6 +223,10 @@ class BaseProfileHandler(BaseHandler):
                 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
             )
 
+        # Same like set_displayname
+        if by_admin:
+            requester = create_requester(target_user)
+
         yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url)
 
         if self.hs.config.user_directory_search_all_users:
-- 
cgit 1.4.1


From af6c3895015580d04c5affd95321c75802c3cb62 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 21 Feb 2020 12:50:48 -0500
Subject: No longer use room alias events to calculate room names for push
 notifications. (#6966)

---
 changelog.d/6966.removal          |  1 +
 synapse/push/presentable_names.py | 36 ++++++++++++++----------------------
 2 files changed, 15 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/6966.removal

(limited to 'changelog.d')

diff --git a/changelog.d/6966.removal b/changelog.d/6966.removal
new file mode 100644
index 0000000000..69673d9139
--- /dev/null
+++ b/changelog.d/6966.removal
@@ -0,0 +1 @@
+Synapse no longer uses room alias events to calculate room names for email notifications.
diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py
index 16a7e8e31d..0644a13cfc 100644
--- a/synapse/push/presentable_names.py
+++ b/synapse/push/presentable_names.py
@@ -18,6 +18,8 @@ import re
 
 from twisted.internet import defer
 
+from synapse.api.constants import EventTypes
+
 logger = logging.getLogger(__name__)
 
 # intentionally looser than what aliases we allow to be registered since
@@ -50,17 +52,17 @@ def calculate_room_name(
         (string or None) A human readable name for the room.
     """
     # does it have a name?
-    if ("m.room.name", "") in room_state_ids:
+    if (EventTypes.Name, "") in room_state_ids:
         m_room_name = yield store.get_event(
-            room_state_ids[("m.room.name", "")], allow_none=True
+            room_state_ids[(EventTypes.Name, "")], allow_none=True
         )
         if m_room_name and m_room_name.content and m_room_name.content["name"]:
             return m_room_name.content["name"]
 
     # does it have a canonical alias?
-    if ("m.room.canonical_alias", "") in room_state_ids:
+    if (EventTypes.CanonicalAlias, "") in room_state_ids:
         canon_alias = yield store.get_event(
-            room_state_ids[("m.room.canonical_alias", "")], allow_none=True
+            room_state_ids[(EventTypes.CanonicalAlias, "")], allow_none=True
         )
         if (
             canon_alias
@@ -74,32 +76,22 @@ def calculate_room_name(
     # for an event type, so rearrange the data structure
     room_state_bytype_ids = _state_as_two_level_dict(room_state_ids)
 
-    # right then, any aliases at all?
-    if "m.room.aliases" in room_state_bytype_ids:
-        m_room_aliases = room_state_bytype_ids["m.room.aliases"]
-        for alias_id in m_room_aliases.values():
-            alias_event = yield store.get_event(alias_id, allow_none=True)
-            if alias_event and alias_event.content.get("aliases"):
-                the_aliases = alias_event.content["aliases"]
-                if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]):
-                    return the_aliases[0]
-
     if not fallback_to_members:
         return None
 
     my_member_event = None
-    if ("m.room.member", user_id) in room_state_ids:
+    if (EventTypes.Member, user_id) in room_state_ids:
         my_member_event = yield store.get_event(
-            room_state_ids[("m.room.member", user_id)], allow_none=True
+            room_state_ids[(EventTypes.Member, user_id)], allow_none=True
         )
 
     if (
         my_member_event is not None
         and my_member_event.content["membership"] == "invite"
     ):
-        if ("m.room.member", my_member_event.sender) in room_state_ids:
+        if (EventTypes.Member, my_member_event.sender) in room_state_ids:
             inviter_member_event = yield store.get_event(
-                room_state_ids[("m.room.member", my_member_event.sender)],
+                room_state_ids[(EventTypes.Member, my_member_event.sender)],
                 allow_none=True,
             )
             if inviter_member_event:
@@ -114,9 +106,9 @@ def calculate_room_name(
 
     # we're going to have to generate a name based on who's in the room,
     # so find out who is in the room that isn't the user.
-    if "m.room.member" in room_state_bytype_ids:
+    if EventTypes.Member in room_state_bytype_ids:
         member_events = yield store.get_events(
-            list(room_state_bytype_ids["m.room.member"].values())
+            list(room_state_bytype_ids[EventTypes.Member].values())
         )
         all_members = [
             ev
@@ -138,9 +130,9 @@ def calculate_room_name(
             # self-chat, peeked room with 1 participant,
             # or inbound invite, or outbound 3PID invite.
             if all_members[0].sender == user_id:
-                if "m.room.third_party_invite" in room_state_bytype_ids:
+                if EventTypes.ThirdPartyInvite in room_state_bytype_ids:
                     third_party_invites = room_state_bytype_ids[
-                        "m.room.third_party_invite"
+                        EventTypes.ThirdPartyInvite
                     ].values()
 
                     if len(third_party_invites) > 0:
-- 
cgit 1.4.1


From 4c2ed3f20ef5361ea04da9c678d157d8735ca120 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 24 Feb 2020 15:18:38 +0000
Subject: Fix minor issues with email config (#6962)

 * Give `notif_template_html`, `notif_template_text` default values (fixes #6960)
 * Don't complain if `smtp_host` and `smtp_port` are unset, since they have sensible defaults (fixes #6961)
 * Set the example for `enable_notifs` to `True`, for consistency and because it's more useful
 * Raise errors as ConfigError rather than RuntimeError for nicer formatting
---
 changelog.d/6962.bugfix       |  1 +
 docs/sample_config.yaml       |  9 +++---
 synapse/config/emailconfig.py | 66 ++++++++++++++++++++-----------------------
 3 files changed, 36 insertions(+), 40 deletions(-)
 create mode 100644 changelog.d/6962.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6962.bugfix b/changelog.d/6962.bugfix
new file mode 100644
index 0000000000..9f5229d400
--- /dev/null
+++ b/changelog.d/6962.bugfix
@@ -0,0 +1 @@
+Fix a couple of bugs in email configuration handling.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 8a036071e1..54cbe840d5 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1409,10 +1409,6 @@ email:
   #
   #require_transport_security: true
 
-  # Enable sending emails for messages that the user has missed
-  #
-  #enable_notifs: false
-
   # notif_from defines the "From" address to use when sending emails.
   # It must be set if email sending is enabled.
   #
@@ -1430,6 +1426,11 @@ email:
   #
   #app_name: my_branded_matrix_server
 
+  # Uncomment the following to enable sending emails for messages that the user
+  # has missed. Disabled by default.
+  #
+  #enable_notifs: true
+
   # Uncomment the following to disable automatic subscription to email
   # notifications for new users. Enabled by default.
   #
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 74853f9faa..f31fc85ec8 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -27,6 +27,12 @@ import pkg_resources
 
 from ._base import Config, ConfigError
 
+MISSING_PASSWORD_RESET_CONFIG_ERROR = """\
+Password reset emails are enabled on this homeserver due to a partial
+'email' block. However, the following required keys are missing:
+    %s
+"""
+
 
 class EmailConfig(Config):
     section = "email"
@@ -142,24 +148,18 @@ class EmailConfig(Config):
             bleach
 
         if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
-            required = ["smtp_host", "smtp_port", "notif_from"]
-
             missing = []
-            for k in required:
-                if k not in email_config:
-                    missing.append("email." + k)
+            if not self.email_notif_from:
+                missing.append("email.notif_from")
 
             # public_baseurl is required to build password reset and validation links that
             # will be emailed to users
             if config.get("public_baseurl") is None:
                 missing.append("public_baseurl")
 
-            if len(missing) > 0:
-                raise RuntimeError(
-                    "Password resets emails are configured to be sent from "
-                    "this homeserver due to a partial 'email' block. "
-                    "However, the following required keys are missing: %s"
-                    % (", ".join(missing),)
+            if missing:
+                raise ConfigError(
+                    MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),)
                 )
 
             # These email templates have placeholders in them, and thus must be
@@ -245,32 +245,25 @@ class EmailConfig(Config):
             )
 
         if self.email_enable_notifs:
-            required = [
-                "smtp_host",
-                "smtp_port",
-                "notif_from",
-                "notif_template_html",
-                "notif_template_text",
-            ]
-
             missing = []
-            for k in required:
-                if k not in email_config:
-                    missing.append(k)
-
-            if len(missing) > 0:
-                raise RuntimeError(
-                    "email.enable_notifs is True but required keys are missing: %s"
-                    % (", ".join(["email." + k for k in missing]),)
-                )
+            if not self.email_notif_from:
+                missing.append("email.notif_from")
 
             if config.get("public_baseurl") is None:
-                raise RuntimeError(
-                    "email.enable_notifs is True but no public_baseurl is set"
+                missing.append("public_baseurl")
+
+            if missing:
+                raise ConfigError(
+                    "email.enable_notifs is True but required keys are missing: %s"
+                    % (", ".join(missing),)
                 )
 
-            self.email_notif_template_html = email_config["notif_template_html"]
-            self.email_notif_template_text = email_config["notif_template_text"]
+            self.email_notif_template_html = email_config.get(
+                "notif_template_html", "notif_mail.html"
+            )
+            self.email_notif_template_text = email_config.get(
+                "notif_template_text", "notif_mail.txt"
+            )
 
             for f in self.email_notif_template_text, self.email_notif_template_html:
                 p = os.path.join(self.email_template_dir, f)
@@ -323,10 +316,6 @@ class EmailConfig(Config):
           #
           #require_transport_security: true
 
-          # Enable sending emails for messages that the user has missed
-          #
-          #enable_notifs: false
-
           # notif_from defines the "From" address to use when sending emails.
           # It must be set if email sending is enabled.
           #
@@ -344,6 +333,11 @@ class EmailConfig(Config):
           #
           #app_name: my_branded_matrix_server
 
+          # Uncomment the following to enable sending emails for messages that the user
+          # has missed. Disabled by default.
+          #
+          #enable_notifs: true
+
           # Uncomment the following to disable automatic subscription to email
           # notifications for new users. Enabled by default.
           #
-- 
cgit 1.4.1


From a301934f4610ffce490fbb925aaa898aac2829bc Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 24 Feb 2020 15:46:41 +0000
Subject: Upsert room version when we join over federation (#6968)

This is intended as a precursor to storing room versions when we receive an
invite over federation, but has the happy side-effect of fixing #3374 at last.

In short: change the store_room with try/except to a proper upsert which
updates the right columns.
---
 changelog.d/6968.bugfix                  |  1 +
 synapse/handlers/federation.py           | 22 ++++++++++++----------
 synapse/storage/data_stores/main/room.py | 17 +++++++++++++++++
 3 files changed, 30 insertions(+), 10 deletions(-)
 create mode 100644 changelog.d/6968.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6968.bugfix b/changelog.d/6968.bugfix
new file mode 100644
index 0000000000..9965bfc0c3
--- /dev/null
+++ b/changelog.d/6968.bugfix
@@ -0,0 +1 @@
+Fix `duplicate key` error which was logged when rejoining a room over federation.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index a689065f89..fb0a586eaa 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1323,16 +1323,18 @@ class FederationHandler(BaseHandler):
 
             logger.debug("do_invite_join event: %s", event)
 
-            try:
-                await self.store.store_room(
-                    room_id=room_id,
-                    room_creator_user_id="",
-                    is_public=False,
-                    room_version=room_version_obj,
-                )
-            except Exception:
-                # FIXME
-                pass
+            # if this is the first time we've joined this room, it's time to add
+            # a row to `rooms` with the correct room version. If there's already a
+            # row there, we should override it, since it may have been populated
+            # based on an invite request which lied about the room version.
+            #
+            # federation_client.send_join has already checked that the room
+            # version in the received create event is the same as room_version_obj,
+            # so we can rely on it now.
+            #
+            await self.store.upsert_room_on_join(
+                room_id=room_id, room_version=room_version_obj,
+            )
 
             await self._persist_auth_tree(
                 origin, auth_chain, state, event, room_version_obj
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 9a17e336ba..70137dfbe4 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -954,6 +954,23 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
         self.config = hs.config
 
+    async def upsert_room_on_join(self, room_id: str, room_version: RoomVersion):
+        """Ensure that the room is stored in the table
+
+        Called when we join a room over federation, and overwrites any room version
+        currently in the table.
+        """
+        await self.db.simple_upsert(
+            desc="upsert_room_on_join",
+            table="rooms",
+            keyvalues={"room_id": room_id},
+            values={"room_version": room_version.identifier},
+            insertion_values={"is_public": False, "creator": ""},
+            # rooms has a unique constraint on room_id, so no need to lock when doing an
+            # emulated upsert.
+            lock=False,
+        )
+
     @defer.inlineCallbacks
     def store_room(
         self,
-- 
cgit 1.4.1


From 691659568fa57f6afd9918886efc72b9e7081d8f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 24 Feb 2020 17:20:45 +0000
Subject: Remove redundant store_room call (#6979)

`_process_received_pdu` is only called by `on_receive_pdu`, which ignores any
events for unknown rooms, so this is redundant.
---
 changelog.d/6979.misc          |  1 +
 synapse/handlers/federation.py | 23 -----------------------
 2 files changed, 1 insertion(+), 23 deletions(-)
 create mode 100644 changelog.d/6979.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6979.misc b/changelog.d/6979.misc
new file mode 100644
index 0000000000..c57b398c2f
--- /dev/null
+++ b/changelog.d/6979.misc
@@ -0,0 +1 @@
+Remove redundant `store_room` call from `FederationHandler._process_received_pdu`.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index fb0a586eaa..c2e6ee266d 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -41,7 +41,6 @@ from synapse.api.errors import (
     FederationDeniedError,
     FederationError,
     RequestSendFailed,
-    StoreError,
     SynapseError,
 )
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
@@ -707,28 +706,6 @@ class FederationHandler(BaseHandler):
         except AuthError as e:
             raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
 
-        room = await self.store.get_room(room_id)
-
-        if not room:
-            try:
-                prev_state_ids = await context.get_prev_state_ids()
-                create_event = await self.store.get_event(
-                    prev_state_ids[(EventTypes.Create, "")]
-                )
-
-                room_version_id = create_event.content.get(
-                    "room_version", RoomVersions.V1.identifier
-                )
-
-                await self.store.store_room(
-                    room_id=room_id,
-                    room_creator_user_id="",
-                    is_public=False,
-                    room_version=KNOWN_ROOM_VERSIONS[room_version_id],
-                )
-            except StoreError:
-                logger.exception("Failed to store room.")
-
         if event.type == EventTypes.Member:
             if event.membership == Membership.JOIN:
                 # Only fire user_joined_room if the user has acutally
-- 
cgit 1.4.1


From 4aea0bd292bf9e33d166fcfd632159cfe35050dd Mon Sep 17 00:00:00 2001
From: Fridtjof Mund <2780577+fridtjof@users.noreply.github.com>
Date: Tue, 25 Feb 2020 11:48:13 +0100
Subject: contrib/docker: remove quotes for POSTGRES_INITDB_ARGS (#6984)

I made a mistake in https://github.com/matrix-org/synapse/pull/6921 - the quotes break the postgres container's startup script (or docker-compose), which makes initdb fail: https://github.com/matrix-org/synapse/pull/6921#issuecomment-590657154

Signed-off-by: Fridtjof Mund 
---
 changelog.d/6984.docker           | 1 +
 contrib/docker/docker-compose.yml | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6984.docker

(limited to 'changelog.d')

diff --git a/changelog.d/6984.docker b/changelog.d/6984.docker
new file mode 100644
index 0000000000..84a55e1267
--- /dev/null
+++ b/changelog.d/6984.docker
@@ -0,0 +1 @@
+Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 5df29379c8..453b305053 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -58,7 +58,7 @@ services:
       - POSTGRES_PASSWORD=changeme
       # ensure the database gets created correctly
       # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
-      - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
+      - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
     volumes:
       # You may store the database tables in a local folder..
       - ./schemas:/var/lib/postgresql/data
-- 
cgit 1.4.1


From bbf8886a05be6a929556d6f09a1b6ce053a3c403 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 25 Feb 2020 16:56:55 +0000
Subject: Merge worker apps into one. (#6964)

---
 changelog.d/6964.misc                       |   1 +
 synapse/app/appservice.py                   | 156 +----
 synapse/app/client_reader.py                | 190 +-----
 synapse/app/event_creator.py                | 186 +-----
 synapse/app/federation_reader.py            | 172 +-----
 synapse/app/federation_sender.py            | 303 +--------
 synapse/app/frontend_proxy.py               | 236 +------
 synapse/app/generic_worker.py               | 917 ++++++++++++++++++++++++++++
 synapse/app/media_repository.py             | 157 +----
 synapse/app/pusher.py                       | 209 +------
 synapse/app/synchrotron.py                  | 449 +-------------
 synapse/app/user_dir.py                     | 211 +------
 synapse/replication/slave/storage/events.py |  20 +
 synapse/storage/data_stores/main/pusher.py  | 156 ++---
 tests/app/test_frontend_proxy.py            |  12 +-
 tests/app/test_openid_listener.py           |   4 +-
 16 files changed, 1052 insertions(+), 2327 deletions(-)
 create mode 100644 changelog.d/6964.misc
 create mode 100644 synapse/app/generic_worker.py

(limited to 'changelog.d')

diff --git a/changelog.d/6964.misc b/changelog.d/6964.misc
new file mode 100644
index 0000000000..ec5c004bbe
--- /dev/null
+++ b/changelog.d/6964.misc
@@ -0,0 +1 @@
+Merge worker apps together.
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 2217d4a4fb..add43147b3 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -13,161 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext, run_in_background
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.directory import DirectoryStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.server import HomeServer
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.appservice")
-
-
-class AppserviceSlaveStore(
-    DirectoryStore,
-    SlavedEventStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-):
-    pass
-
-
-class AppserviceServer(HomeServer):
-    DATASTORE_CLASS = AppserviceSlaveStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse appservice now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ASReplicationHandler(self)
 
+import sys
 
-class ASReplicationHandler(ReplicationClientHandler):
-    def __init__(self, hs):
-        super(ASReplicationHandler, self).__init__(hs.get_datastore())
-        self.appservice_handler = hs.get_application_service_handler()
-
-    async def on_rdata(self, stream_name, token, rows):
-        await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
-
-        if stream_name == "events":
-            max_stream_id = self.store.get_room_max_stream_ordering()
-            run_in_background(self._notify_app_services, max_stream_id)
-
-    @defer.inlineCallbacks
-    def _notify_app_services(self, room_stream_id):
-        try:
-            yield self.appservice_handler.notify_interested_services(room_stream_id)
-        except Exception:
-            logger.exception("Error notifying application services of event")
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse appservice", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.appservice"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    if config.notify_appservices:
-        sys.stderr.write(
-            "\nThe appservices must be disabled in the main synapse process"
-            "\nbefore they can be run in a separate worker."
-            "\nPlease add ``notify_appservices: false`` to the main config"
-            "\n"
-        )
-        sys.exit(1)
-
-    # Force the pushers to start since they will be disabled in the main config
-    config.notify_appservices = True
-
-    ps = AppserviceServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ps, config, use_worker_options=True)
-
-    ps.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ps, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-appservice", config)
-
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 7fa91a3b11..add43147b3 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -13,195 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.server import JsonResource
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.directory import DirectoryStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.groups import SlavedGroupServerStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.profile import SlavedProfileStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v1.login import LoginRestServlet
-from synapse.rest.client.v1.push_rule import PushRuleRestServlet
-from synapse.rest.client.v1.room import (
-    JoinedRoomMemberListRestServlet,
-    PublicRoomListRestServlet,
-    RoomEventContextServlet,
-    RoomMemberListRestServlet,
-    RoomMessageListRestServlet,
-    RoomStateRestServlet,
-)
-from synapse.rest.client.v1.voip import VoipRestServlet
-from synapse.rest.client.v2_alpha import groups
-from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
-from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
-from synapse.rest.client.v2_alpha.register import RegisterRestServlet
-from synapse.rest.client.versions import VersionsRestServlet
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.monthly_active_users import (
-    MonthlyActiveUsersWorkerStore,
-)
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.client_reader")
-
-
-class ClientReaderSlavedStore(
-    SlavedDeviceInboxStore,
-    SlavedDeviceStore,
-    SlavedReceiptsStore,
-    SlavedPushRuleStore,
-    SlavedGroupServerStore,
-    SlavedAccountDataStore,
-    SlavedEventStore,
-    SlavedKeyStore,
-    RoomStore,
-    DirectoryStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-    SlavedTransactionStore,
-    SlavedProfileStore,
-    SlavedClientIpStore,
-    MonthlyActiveUsersWorkerStore,
-    BaseSlavedStore,
-):
-    pass
-
-
-class ClientReaderServer(HomeServer):
-    DATASTORE_CLASS = ClientReaderSlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "client":
-                    resource = JsonResource(self, canonical_json=False)
-
-                    PublicRoomListRestServlet(self).register(resource)
-                    RoomMemberListRestServlet(self).register(resource)
-                    JoinedRoomMemberListRestServlet(self).register(resource)
-                    RoomStateRestServlet(self).register(resource)
-                    RoomEventContextServlet(self).register(resource)
-                    RoomMessageListRestServlet(self).register(resource)
-                    RegisterRestServlet(self).register(resource)
-                    LoginRestServlet(self).register(resource)
-                    ThreepidRestServlet(self).register(resource)
-                    KeyQueryServlet(self).register(resource)
-                    KeyChangesServlet(self).register(resource)
-                    VoipRestServlet(self).register(resource)
-                    PushRuleRestServlet(self).register(resource)
-                    VersionsRestServlet(self).register(resource)
-
-                    groups.register_servlets(self, resource)
-
-                    resources.update({"/_matrix/client": resource})
-
-        root_resource = create_resource_tree(resources, NoResource())
 
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse client reader now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ReplicationClientHandler(self.get_datastore())
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse client reader", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.client_reader"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = ClientReaderServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-client-reader", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 58e5b354f6..e9c098c4e7 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -13,191 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.server import JsonResource
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.directory import DirectoryStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.profile import SlavedProfileStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v1.profile import (
-    ProfileAvatarURLRestServlet,
-    ProfileDisplaynameRestServlet,
-    ProfileRestServlet,
-)
-from synapse.rest.client.v1.room import (
-    JoinRoomAliasServlet,
-    RoomMembershipRestServlet,
-    RoomSendEventRestServlet,
-    RoomStateEventRestServlet,
-)
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.monthly_active_users import (
-    MonthlyActiveUsersWorkerStore,
-)
-from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.event_creator")
-
-
-class EventCreatorSlavedStore(
-    # FIXME(#3714): We need to add UserDirectoryStore as we write directly
-    # rather than going via the correct worker.
-    UserDirectoryStore,
-    DirectoryStore,
-    SlavedTransactionStore,
-    SlavedProfileStore,
-    SlavedAccountDataStore,
-    SlavedPusherStore,
-    SlavedReceiptsStore,
-    SlavedPushRuleStore,
-    SlavedDeviceStore,
-    SlavedClientIpStore,
-    SlavedApplicationServiceStore,
-    SlavedEventStore,
-    SlavedRegistrationStore,
-    RoomStore,
-    MonthlyActiveUsersWorkerStore,
-    BaseSlavedStore,
-):
-    pass
-
-
-class EventCreatorServer(HomeServer):
-    DATASTORE_CLASS = EventCreatorSlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "client":
-                    resource = JsonResource(self, canonical_json=False)
-                    RoomSendEventRestServlet(self).register(resource)
-                    RoomMembershipRestServlet(self).register(resource)
-                    RoomStateEventRestServlet(self).register(resource)
-                    JoinRoomAliasServlet(self).register(resource)
-                    ProfileAvatarURLRestServlet(self).register(resource)
-                    ProfileDisplaynameRestServlet(self).register(resource)
-                    ProfileRestServlet(self).register(resource)
-                    resources.update(
-                        {
-                            "/_matrix/client/r0": resource,
-                            "/_matrix/client/unstable": resource,
-                            "/_matrix/client/v2_alpha": resource,
-                            "/_matrix/client/api/v1": resource,
-                        }
-                    )
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse event creator now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
 
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ReplicationClientHandler(self.get_datastore())
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse event creator", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.event_creator"
-
-    assert config.worker_replication_http_port is not None
-
-    # This should only be done on the user directory worker or the master
-    config.update_user_directory = False
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = EventCreatorServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-event-creator", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index d055d11b23..add43147b3 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -13,177 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.federation.transport.server import TransportLayerServer
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.directory import DirectoryStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.groups import SlavedGroupServerStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.profile import SlavedProfileStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.key.v2 import KeyApiV2Resource
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.monthly_active_users import (
-    MonthlyActiveUsersWorkerStore,
-)
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.federation_reader")
-
-
-class FederationReaderSlavedStore(
-    SlavedAccountDataStore,
-    SlavedProfileStore,
-    SlavedApplicationServiceStore,
-    SlavedPusherStore,
-    SlavedPushRuleStore,
-    SlavedReceiptsStore,
-    SlavedEventStore,
-    SlavedKeyStore,
-    SlavedRegistrationStore,
-    SlavedGroupServerStore,
-    SlavedDeviceStore,
-    RoomStore,
-    DirectoryStore,
-    SlavedTransactionStore,
-    MonthlyActiveUsersWorkerStore,
-    BaseSlavedStore,
-):
-    pass
-
-
-class FederationReaderServer(HomeServer):
-    DATASTORE_CLASS = FederationReaderSlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "federation":
-                    resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
-                if name == "openid" and "federation" not in res["names"]:
-                    # Only load the openid resource separately if federation resource
-                    # is not specified since federation resource includes openid
-                    # resource.
-                    resources.update(
-                        {
-                            FEDERATION_PREFIX: TransportLayerServer(
-                                self, servlet_groups=["openid"]
-                            )
-                        }
-                    )
-
-                if name in ["keys", "federation"]:
-                    resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-            reactor=self.get_reactor(),
-        )
 
-        logger.info("Synapse federation reader now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ReplicationClientHandler(self.get_datastore())
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config(
-            "Synapse federation reader", config_options
-        )
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.federation_reader"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = FederationReaderServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-federation-reader", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index b7fcf80ddc..add43147b3 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -13,308 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.federation import send_queue
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext, run_in_background
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.presence import SlavedPresenceStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.replication.tcp.streams._base import (
-    DeviceListsStream,
-    ReceiptsStream,
-    ToDeviceStream,
-)
-from synapse.server import HomeServer
-from synapse.storage.database import Database
-from synapse.types import ReadReceipt
-from synapse.util.async_helpers import Linearizer
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.federation_sender")
-
-
-class FederationSenderSlaveStore(
-    SlavedDeviceInboxStore,
-    SlavedTransactionStore,
-    SlavedReceiptsStore,
-    SlavedEventStore,
-    SlavedRegistrationStore,
-    SlavedDeviceStore,
-    SlavedPresenceStore,
-):
-    def __init__(self, database: Database, db_conn, hs):
-        super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs)
-
-        # We pull out the current federation stream position now so that we
-        # always have a known value for the federation position in memory so
-        # that we don't have to bounce via a deferred once when we start the
-        # replication streams.
-        self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
-
-    def _get_federation_out_pos(self, db_conn):
-        sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
-        sql = self.database_engine.convert_param_style(sql)
-
-        txn = db_conn.cursor()
-        txn.execute(sql, ("federation",))
-        rows = txn.fetchall()
-        txn.close()
-
-        return rows[0][0] if rows else -1
-
-
-class FederationSenderServer(HomeServer):
-    DATASTORE_CLASS = FederationSenderSlaveStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse federation_sender now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return FederationSenderReplicationHandler(self)
-
-
-class FederationSenderReplicationHandler(ReplicationClientHandler):
-    def __init__(self, hs):
-        super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
-        self.send_handler = FederationSenderHandler(hs, self)
-
-    async def on_rdata(self, stream_name, token, rows):
-        await super(FederationSenderReplicationHandler, self).on_rdata(
-            stream_name, token, rows
-        )
-        self.send_handler.process_replication_rows(stream_name, token, rows)
-
-    def get_streams_to_replicate(self):
-        args = super(
-            FederationSenderReplicationHandler, self
-        ).get_streams_to_replicate()
-        args.update(self.send_handler.stream_positions())
-        return args
-
-    def on_remote_server_up(self, server: str):
-        """Called when get a new REMOTE_SERVER_UP command."""
-
-        # Let's wake up the transaction queue for the server in case we have
-        # pending stuff to send to it.
-        self.send_handler.wake_destination(server)
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config(
-            "Synapse federation sender", config_options
-        )
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
 
-    assert config.worker_app == "synapse.app.federation_sender"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    if config.send_federation:
-        sys.stderr.write(
-            "\nThe send_federation must be disabled in the main synapse process"
-            "\nbefore they can be run in a separate worker."
-            "\nPlease add ``send_federation: false`` to the main config"
-            "\n"
-        )
-        sys.exit(1)
-
-    # Force the pushers to start since they will be disabled in the main config
-    config.send_federation = True
-
-    ss = FederationSenderServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-federation-sender", config)
-
-
-class FederationSenderHandler(object):
-    """Processes the replication stream and forwards the appropriate entries
-    to the federation sender.
-    """
-
-    def __init__(self, hs: FederationSenderServer, replication_client):
-        self.store = hs.get_datastore()
-        self._is_mine_id = hs.is_mine_id
-        self.federation_sender = hs.get_federation_sender()
-        self.replication_client = replication_client
-
-        self.federation_position = self.store.federation_out_pos_startup
-        self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
-
-        self._last_ack = self.federation_position
-
-        self._room_serials = {}
-        self._room_typing = {}
-
-    def on_start(self):
-        # There may be some events that are persisted but haven't been sent,
-        # so send them now.
-        self.federation_sender.notify_new_events(
-            self.store.get_room_max_stream_ordering()
-        )
-
-    def wake_destination(self, server: str):
-        self.federation_sender.wake_destination(server)
-
-    def stream_positions(self):
-        return {"federation": self.federation_position}
-
-    def process_replication_rows(self, stream_name, token, rows):
-        # The federation stream contains things that we want to send out, e.g.
-        # presence, typing, etc.
-        if stream_name == "federation":
-            send_queue.process_rows_for_federation(self.federation_sender, rows)
-            run_in_background(self.update_token, token)
-
-        # We also need to poke the federation sender when new events happen
-        elif stream_name == "events":
-            self.federation_sender.notify_new_events(token)
-
-        # ... and when new receipts happen
-        elif stream_name == ReceiptsStream.NAME:
-            run_as_background_process(
-                "process_receipts_for_federation", self._on_new_receipts, rows
-            )
-
-        # ... as well as device updates and messages
-        elif stream_name == DeviceListsStream.NAME:
-            hosts = {row.destination for row in rows}
-            for host in hosts:
-                self.federation_sender.send_device_messages(host)
-
-        elif stream_name == ToDeviceStream.NAME:
-            # The to_device stream includes stuff to be pushed to both local
-            # clients and remote servers, so we ignore entities that start with
-            # '@' (since they'll be local users rather than destinations).
-            hosts = {row.entity for row in rows if not row.entity.startswith("@")}
-            for host in hosts:
-                self.federation_sender.send_device_messages(host)
-
-    @defer.inlineCallbacks
-    def _on_new_receipts(self, rows):
-        """
-        Args:
-            rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
-                new receipts to be processed
-        """
-        for receipt in rows:
-            # we only want to send on receipts for our own users
-            if not self._is_mine_id(receipt.user_id):
-                continue
-            receipt_info = ReadReceipt(
-                receipt.room_id,
-                receipt.receipt_type,
-                receipt.user_id,
-                [receipt.event_id],
-                receipt.data,
-            )
-            yield self.federation_sender.send_read_receipt(receipt_info)
-
-    @defer.inlineCallbacks
-    def update_token(self, token):
-        try:
-            self.federation_position = token
-
-            # We linearize here to ensure we don't have races updating the token
-            with (yield self._fed_position_linearizer.queue(None)):
-                if self._last_ack < self.federation_position:
-                    yield self.store.update_federation_out_pos(
-                        "federation", self.federation_position
-                    )
-
-                    # We ACK this token over replication so that the master can drop
-                    # its in memory queues
-                    self.replication_client.send_federation_ack(
-                        self.federation_position
-                    )
-                    self._last_ack = self.federation_position
-        except Exception:
-            logger.exception("Error updating federation stream position")
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 30e435eead..add43147b3 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -13,241 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.api.errors import HttpResponseException, SynapseError
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.server import JsonResource
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v2_alpha._base import client_patterns
-from synapse.server import HomeServer
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.frontend_proxy")
-
-
-class PresenceStatusStubServlet(RestServlet):
-    PATTERNS = client_patterns("/presence/(?P[^/]*)/status")
-
-    def __init__(self, hs):
-        super(PresenceStatusStubServlet, self).__init__()
-        self.http_client = hs.get_simple_http_client()
-        self.auth = hs.get_auth()
-        self.main_uri = hs.config.worker_main_http_uri
-
-    @defer.inlineCallbacks
-    def on_GET(self, request, user_id):
-        # Pass through the auth headers, if any, in case the access token
-        # is there.
-        auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
-        headers = {"Authorization": auth_headers}
-
-        try:
-            result = yield self.http_client.get_json(
-                self.main_uri + request.uri.decode("ascii"), headers=headers
-            )
-        except HttpResponseException as e:
-            raise e.to_synapse_error()
-
-        return 200, result
-
-    @defer.inlineCallbacks
-    def on_PUT(self, request, user_id):
-        yield self.auth.get_user_by_req(request)
-        return 200, {}
-
-
-class KeyUploadServlet(RestServlet):
-    PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$")
-
-    def __init__(self, hs):
-        """
-        Args:
-            hs (synapse.server.HomeServer): server
-        """
-        super(KeyUploadServlet, self).__init__()
-        self.auth = hs.get_auth()
-        self.store = hs.get_datastore()
-        self.http_client = hs.get_simple_http_client()
-        self.main_uri = hs.config.worker_main_http_uri
-
-    @defer.inlineCallbacks
-    def on_POST(self, request, device_id):
-        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
-        user_id = requester.user.to_string()
-        body = parse_json_object_from_request(request)
-
-        if device_id is not None:
-            # passing the device_id here is deprecated; however, we allow it
-            # for now for compatibility with older clients.
-            if requester.device_id is not None and device_id != requester.device_id:
-                logger.warning(
-                    "Client uploading keys for a different device "
-                    "(logged in as %s, uploading for %s)",
-                    requester.device_id,
-                    device_id,
-                )
-        else:
-            device_id = requester.device_id
-
-        if device_id is None:
-            raise SynapseError(
-                400, "To upload keys, you must pass device_id when authenticating"
-            )
-
-        if body:
-            # They're actually trying to upload something, proxy to main synapse.
-            # Pass through the auth headers, if any, in case the access token
-            # is there.
-            auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
-            headers = {"Authorization": auth_headers}
-            result = yield self.http_client.post_json_get_json(
-                self.main_uri + request.uri.decode("ascii"), body, headers=headers
-            )
-
-            return 200, result
-        else:
-            # Just interested in counts.
-            result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
-            return 200, {"one_time_key_counts": result}
-
-
-class FrontendProxySlavedStore(
-    SlavedDeviceStore,
-    SlavedClientIpStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-    BaseSlavedStore,
-):
-    pass
 
+import sys
 
-class FrontendProxyServer(HomeServer):
-    DATASTORE_CLASS = FrontendProxySlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "client":
-                    resource = JsonResource(self, canonical_json=False)
-                    KeyUploadServlet(self).register(resource)
-
-                    # If presence is disabled, use the stub servlet that does
-                    # not allow sending presence
-                    if not self.config.use_presence:
-                        PresenceStatusStubServlet(self).register(resource)
-
-                    resources.update(
-                        {
-                            "/_matrix/client/r0": resource,
-                            "/_matrix/client/unstable": resource,
-                            "/_matrix/client/v2_alpha": resource,
-                            "/_matrix/client/api/v1": resource,
-                        }
-                    )
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-            reactor=self.get_reactor(),
-        )
-
-        logger.info("Synapse client reader now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ReplicationClientHandler(self.get_datastore())
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse frontend proxy", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.frontend_proxy"
-
-    assert config.worker_main_http_uri is not None
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = FrontendProxyServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-frontend-proxy", config)
-
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
new file mode 100644
index 0000000000..30efd39092
--- /dev/null
+++ b/synapse/app/generic_worker.py
@@ -0,0 +1,917 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import contextlib
+import logging
+import sys
+
+from twisted.internet import defer, reactor
+from twisted.web.resource import NoResource
+
+import synapse
+import synapse.events
+from synapse.api.constants import EventTypes
+from synapse.api.errors import HttpResponseException, SynapseError
+from synapse.api.urls import (
+    CLIENT_API_PREFIX,
+    FEDERATION_PREFIX,
+    LEGACY_MEDIA_PREFIX,
+    MEDIA_PREFIX,
+    SERVER_KEY_V2_PREFIX,
+)
+from synapse.app import _base
+from synapse.config._base import ConfigError
+from synapse.config.homeserver import HomeServerConfig
+from synapse.config.logger import setup_logging
+from synapse.federation import send_queue
+from synapse.federation.transport.server import TransportLayerServer
+from synapse.handlers.presence import PresenceHandler, get_interested_parties
+from synapse.http.server import JsonResource
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseSite
+from synapse.logging.context import LoggingContext, run_in_background
+from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
+from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
+from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
+from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
+from synapse.replication.slave.storage.devices import SlavedDeviceStore
+from synapse.replication.slave.storage.directory import DirectoryStore
+from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.filtering import SlavedFilteringStore
+from synapse.replication.slave.storage.groups import SlavedGroupServerStore
+from synapse.replication.slave.storage.keys import SlavedKeyStore
+from synapse.replication.slave.storage.presence import SlavedPresenceStore
+from synapse.replication.slave.storage.profile import SlavedProfileStore
+from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
+from synapse.replication.slave.storage.pushers import SlavedPusherStore
+from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
+from synapse.replication.slave.storage.registration import SlavedRegistrationStore
+from synapse.replication.slave.storage.room import RoomStore
+from synapse.replication.slave.storage.transactions import SlavedTransactionStore
+from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.replication.tcp.streams._base import (
+    DeviceListsStream,
+    ReceiptsStream,
+    ToDeviceStream,
+)
+from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
+from synapse.rest.admin import register_servlets_for_media_repo
+from synapse.rest.client.v1 import events
+from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
+from synapse.rest.client.v1.login import LoginRestServlet
+from synapse.rest.client.v1.profile import (
+    ProfileAvatarURLRestServlet,
+    ProfileDisplaynameRestServlet,
+    ProfileRestServlet,
+)
+from synapse.rest.client.v1.push_rule import PushRuleRestServlet
+from synapse.rest.client.v1.room import (
+    JoinedRoomMemberListRestServlet,
+    JoinRoomAliasServlet,
+    PublicRoomListRestServlet,
+    RoomEventContextServlet,
+    RoomInitialSyncRestServlet,
+    RoomMemberListRestServlet,
+    RoomMembershipRestServlet,
+    RoomMessageListRestServlet,
+    RoomSendEventRestServlet,
+    RoomStateEventRestServlet,
+    RoomStateRestServlet,
+)
+from synapse.rest.client.v1.voip import VoipRestServlet
+from synapse.rest.client.v2_alpha import groups, sync, user_directory
+from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
+from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
+from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+from synapse.rest.client.versions import VersionsRestServlet
+from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.server import HomeServer
+from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
+from synapse.storage.data_stores.main.presence import UserPresenceState
+from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
+from synapse.types import ReadReceipt
+from synapse.util.async_helpers import Linearizer
+from synapse.util.httpresourcetree import create_resource_tree
+from synapse.util.manhole import manhole
+from synapse.util.stringutils import random_string
+from synapse.util.versionstring import get_version_string
+
+logger = logging.getLogger("synapse.app.generic_worker")
+
+
+class PresenceStatusStubServlet(RestServlet):
+    """If presence is disabled this servlet can be used to stub out setting
+    presence status, while proxying the getters to the master instance.
+    """
+
+    PATTERNS = client_patterns("/presence/(?P[^/]*)/status")
+
+    def __init__(self, hs):
+        super(PresenceStatusStubServlet, self).__init__()
+        self.http_client = hs.get_simple_http_client()
+        self.auth = hs.get_auth()
+        self.main_uri = hs.config.worker_main_http_uri
+
+    async def on_GET(self, request, user_id):
+        # Pass through the auth headers, if any, in case the access token
+        # is there.
+        auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
+        headers = {"Authorization": auth_headers}
+
+        try:
+            result = await self.http_client.get_json(
+                self.main_uri + request.uri.decode("ascii"), headers=headers
+            )
+        except HttpResponseException as e:
+            raise e.to_synapse_error()
+
+        return 200, result
+
+    async def on_PUT(self, request, user_id):
+        await self.auth.get_user_by_req(request)
+        return 200, {}
+
+
+class KeyUploadServlet(RestServlet):
+    """An implementation of the `KeyUploadServlet` that responds to read only
+    requests, but otherwise proxies through to the master instance.
+    """
+
+    PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(KeyUploadServlet, self).__init__()
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+        self.http_client = hs.get_simple_http_client()
+        self.main_uri = hs.config.worker_main_http_uri
+
+    async def on_POST(self, request, device_id):
+        requester = await self.auth.get_user_by_req(request, allow_guest=True)
+        user_id = requester.user.to_string()
+        body = parse_json_object_from_request(request)
+
+        if device_id is not None:
+            # passing the device_id here is deprecated; however, we allow it
+            # for now for compatibility with older clients.
+            if requester.device_id is not None and device_id != requester.device_id:
+                logger.warning(
+                    "Client uploading keys for a different device "
+                    "(logged in as %s, uploading for %s)",
+                    requester.device_id,
+                    device_id,
+                )
+        else:
+            device_id = requester.device_id
+
+        if device_id is None:
+            raise SynapseError(
+                400, "To upload keys, you must pass device_id when authenticating"
+            )
+
+        if body:
+            # They're actually trying to upload something, proxy to main synapse.
+            # Pass through the auth headers, if any, in case the access token
+            # is there.
+            auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
+            headers = {"Authorization": auth_headers}
+            result = await self.http_client.post_json_get_json(
+                self.main_uri + request.uri.decode("ascii"), body, headers=headers
+            )
+
+            return 200, result
+        else:
+            # Just interested in counts.
+            result = await self.store.count_e2e_one_time_keys(user_id, device_id)
+            return 200, {"one_time_key_counts": result}
+
+
+UPDATE_SYNCING_USERS_MS = 10 * 1000
+
+
+class GenericWorkerPresence(object):
+    def __init__(self, hs):
+        self.hs = hs
+        self.is_mine_id = hs.is_mine_id
+        self.http_client = hs.get_simple_http_client()
+        self.store = hs.get_datastore()
+        self.user_to_num_current_syncs = {}
+        self.clock = hs.get_clock()
+        self.notifier = hs.get_notifier()
+
+        active_presence = self.store.take_presence_startup_info()
+        self.user_to_current_state = {state.user_id: state for state in active_presence}
+
+        # user_id -> last_sync_ms. Lists the users that have stopped syncing
+        # but we haven't notified the master of that yet
+        self.users_going_offline = {}
+
+        self._send_stop_syncing_loop = self.clock.looping_call(
+            self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
+        )
+
+        self.process_id = random_string(16)
+        logger.info("Presence process_id is %r", self.process_id)
+
+    def send_user_sync(self, user_id, is_syncing, last_sync_ms):
+        if self.hs.config.use_presence:
+            self.hs.get_tcp_replication().send_user_sync(
+                user_id, is_syncing, last_sync_ms
+            )
+
+    def mark_as_coming_online(self, user_id):
+        """A user has started syncing. Send a UserSync to the master, unless they
+        had recently stopped syncing.
+
+        Args:
+            user_id (str)
+        """
+        going_offline = self.users_going_offline.pop(user_id, None)
+        if not going_offline:
+            # Safe to skip because we haven't yet told the master they were offline
+            self.send_user_sync(user_id, True, self.clock.time_msec())
+
+    def mark_as_going_offline(self, user_id):
+        """A user has stopped syncing. We wait before notifying the master as
+        its likely they'll come back soon. This allows us to avoid sending
+        a stopped syncing immediately followed by a started syncing notification
+        to the master
+
+        Args:
+            user_id (str)
+        """
+        self.users_going_offline[user_id] = self.clock.time_msec()
+
+    def send_stop_syncing(self):
+        """Check if there are any users who have stopped syncing a while ago
+        and haven't come back yet. If there are poke the master about them.
+        """
+        now = self.clock.time_msec()
+        for user_id, last_sync_ms in list(self.users_going_offline.items()):
+            if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
+                self.users_going_offline.pop(user_id, None)
+                self.send_user_sync(user_id, False, last_sync_ms)
+
+    def set_state(self, user, state, ignore_status_msg=False):
+        # TODO Hows this supposed to work?
+        return defer.succeed(None)
+
+    get_states = __func__(PresenceHandler.get_states)
+    get_state = __func__(PresenceHandler.get_state)
+    current_state_for_users = __func__(PresenceHandler.current_state_for_users)
+
+    def user_syncing(self, user_id, affect_presence):
+        if affect_presence:
+            curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
+            self.user_to_num_current_syncs[user_id] = curr_sync + 1
+
+            # If we went from no in flight sync to some, notify replication
+            if self.user_to_num_current_syncs[user_id] == 1:
+                self.mark_as_coming_online(user_id)
+
+        def _end():
+            # We check that the user_id is in user_to_num_current_syncs because
+            # user_to_num_current_syncs may have been cleared if we are
+            # shutting down.
+            if affect_presence and user_id in self.user_to_num_current_syncs:
+                self.user_to_num_current_syncs[user_id] -= 1
+
+                # If we went from one in flight sync to non, notify replication
+                if self.user_to_num_current_syncs[user_id] == 0:
+                    self.mark_as_going_offline(user_id)
+
+        @contextlib.contextmanager
+        def _user_syncing():
+            try:
+                yield
+            finally:
+                _end()
+
+        return defer.succeed(_user_syncing())
+
+    @defer.inlineCallbacks
+    def notify_from_replication(self, states, stream_id):
+        parties = yield get_interested_parties(self.store, states)
+        room_ids_to_states, users_to_states = parties
+
+        self.notifier.on_new_event(
+            "presence_key",
+            stream_id,
+            rooms=room_ids_to_states.keys(),
+            users=users_to_states.keys(),
+        )
+
+    @defer.inlineCallbacks
+    def process_replication_rows(self, token, rows):
+        states = [
+            UserPresenceState(
+                row.user_id,
+                row.state,
+                row.last_active_ts,
+                row.last_federation_update_ts,
+                row.last_user_sync_ts,
+                row.status_msg,
+                row.currently_active,
+            )
+            for row in rows
+        ]
+
+        for state in states:
+            self.user_to_current_state[state.user_id] = state
+
+        stream_id = token
+        yield self.notify_from_replication(states, stream_id)
+
+    def get_currently_syncing_users(self):
+        if self.hs.config.use_presence:
+            return [
+                user_id
+                for user_id, count in self.user_to_num_current_syncs.items()
+                if count > 0
+            ]
+        else:
+            return set()
+
+
+class GenericWorkerTyping(object):
+    def __init__(self, hs):
+        self._latest_room_serial = 0
+        self._reset()
+
+    def _reset(self):
+        """
+        Reset the typing handler's data caches.
+        """
+        # map room IDs to serial numbers
+        self._room_serials = {}
+        # map room IDs to sets of users currently typing
+        self._room_typing = {}
+
+    def stream_positions(self):
+        # We must update this typing token from the response of the previous
+        # sync. In particular, the stream id may "reset" back to zero/a low
+        # value which we *must* use for the next replication request.
+        return {"typing": self._latest_room_serial}
+
+    def process_replication_rows(self, token, rows):
+        if self._latest_room_serial > token:
+            # The master has gone backwards. To prevent inconsistent data, just
+            # clear everything.
+            self._reset()
+
+        # Set the latest serial token to whatever the server gave us.
+        self._latest_room_serial = token
+
+        for row in rows:
+            self._room_serials[row.room_id] = token
+            self._room_typing[row.room_id] = row.user_ids
+
+
+class GenericWorkerSlavedStore(
+    # FIXME(#3714): We need to add UserDirectoryStore as we write directly
+    # rather than going via the correct worker.
+    UserDirectoryStore,
+    SlavedDeviceInboxStore,
+    SlavedDeviceStore,
+    SlavedReceiptsStore,
+    SlavedPushRuleStore,
+    SlavedGroupServerStore,
+    SlavedAccountDataStore,
+    SlavedPusherStore,
+    SlavedEventStore,
+    SlavedKeyStore,
+    RoomStore,
+    DirectoryStore,
+    SlavedApplicationServiceStore,
+    SlavedRegistrationStore,
+    SlavedTransactionStore,
+    SlavedProfileStore,
+    SlavedClientIpStore,
+    SlavedPresenceStore,
+    SlavedFilteringStore,
+    MonthlyActiveUsersWorkerStore,
+    MediaRepositoryStore,
+    BaseSlavedStore,
+):
+    def __init__(self, database, db_conn, hs):
+        super(GenericWorkerSlavedStore, self).__init__(database, db_conn, hs)
+
+        # We pull out the current federation stream position now so that we
+        # always have a known value for the federation position in memory so
+        # that we don't have to bounce via a deferred once when we start the
+        # replication streams.
+        self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
+
+    def _get_federation_out_pos(self, db_conn):
+        sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
+        sql = self.database_engine.convert_param_style(sql)
+
+        txn = db_conn.cursor()
+        txn.execute(sql, ("federation",))
+        rows = txn.fetchall()
+        txn.close()
+
+        return rows[0][0] if rows else -1
+
+
+class GenericWorkerServer(HomeServer):
+    DATASTORE_CLASS = GenericWorkerSlavedStore
+
+    def _listen_http(self, listener_config):
+        port = listener_config["port"]
+        bind_addresses = listener_config["bind_addresses"]
+        site_tag = listener_config.get("tag", port)
+        resources = {}
+        for res in listener_config["resources"]:
+            for name in res["names"]:
+                if name == "metrics":
+                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
+                elif name == "client":
+                    resource = JsonResource(self, canonical_json=False)
+
+                    PublicRoomListRestServlet(self).register(resource)
+                    RoomMemberListRestServlet(self).register(resource)
+                    JoinedRoomMemberListRestServlet(self).register(resource)
+                    RoomStateRestServlet(self).register(resource)
+                    RoomEventContextServlet(self).register(resource)
+                    RoomMessageListRestServlet(self).register(resource)
+                    RegisterRestServlet(self).register(resource)
+                    LoginRestServlet(self).register(resource)
+                    ThreepidRestServlet(self).register(resource)
+                    KeyQueryServlet(self).register(resource)
+                    KeyChangesServlet(self).register(resource)
+                    VoipRestServlet(self).register(resource)
+                    PushRuleRestServlet(self).register(resource)
+                    VersionsRestServlet(self).register(resource)
+                    RoomSendEventRestServlet(self).register(resource)
+                    RoomMembershipRestServlet(self).register(resource)
+                    RoomStateEventRestServlet(self).register(resource)
+                    JoinRoomAliasServlet(self).register(resource)
+                    ProfileAvatarURLRestServlet(self).register(resource)
+                    ProfileDisplaynameRestServlet(self).register(resource)
+                    ProfileRestServlet(self).register(resource)
+                    KeyUploadServlet(self).register(resource)
+
+                    sync.register_servlets(self, resource)
+                    events.register_servlets(self, resource)
+                    InitialSyncRestServlet(self).register(resource)
+                    RoomInitialSyncRestServlet(self).register(resource)
+
+                    user_directory.register_servlets(self, resource)
+
+                    # If presence is disabled, use the stub servlet that does
+                    # not allow sending presence
+                    if not self.config.use_presence:
+                        PresenceStatusStubServlet(self).register(resource)
+
+                    groups.register_servlets(self, resource)
+
+                    resources.update({CLIENT_API_PREFIX: resource})
+                elif name == "federation":
+                    resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
+                elif name == "media":
+                    media_repo = self.get_media_repository_resource()
+
+                    # We need to serve the admin servlets for media on the
+                    # worker.
+                    admin_resource = JsonResource(self, canonical_json=False)
+                    register_servlets_for_media_repo(self, admin_resource)
+
+                    resources.update(
+                        {
+                            MEDIA_PREFIX: media_repo,
+                            LEGACY_MEDIA_PREFIX: media_repo,
+                            "/_synapse/admin": admin_resource,
+                        }
+                    )
+
+                if name == "openid" and "federation" not in res["names"]:
+                    # Only load the openid resource separately if federation resource
+                    # is not specified since federation resource includes openid
+                    # resource.
+                    resources.update(
+                        {
+                            FEDERATION_PREFIX: TransportLayerServer(
+                                self, servlet_groups=["openid"]
+                            )
+                        }
+                    )
+
+                if name in ["keys", "federation"]:
+                    resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+
+        root_resource = create_resource_tree(resources, NoResource())
+
+        _base.listen_tcp(
+            bind_addresses,
+            port,
+            SynapseSite(
+                "synapse.access.http.%s" % (site_tag,),
+                site_tag,
+                listener_config,
+                root_resource,
+                self.version_string,
+            ),
+            reactor=self.get_reactor(),
+        )
+
+        logger.info("Synapse worker now listening on port %d", port)
+
+    def start_listening(self, listeners):
+        for listener in listeners:
+            if listener["type"] == "http":
+                self._listen_http(listener)
+            elif listener["type"] == "manhole":
+                _base.listen_tcp(
+                    listener["bind_addresses"],
+                    listener["port"],
+                    manhole(
+                        username="matrix", password="rabbithole", globals={"hs": self}
+                    ),
+                )
+            elif listener["type"] == "metrics":
+                if not self.get_config().enable_metrics:
+                    logger.warning(
+                        (
+                            "Metrics listener configured, but "
+                            "enable_metrics is not True!"
+                        )
+                    )
+                else:
+                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
+            else:
+                logger.warning("Unrecognized listener type: %s", listener["type"])
+
+        self.get_tcp_replication().start_replication(self)
+
+    def remove_pusher(self, app_id, push_key, user_id):
+        self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
+
+    def build_tcp_replication(self):
+        return GenericWorkerReplicationHandler(self)
+
+    def build_presence_handler(self):
+        return GenericWorkerPresence(self)
+
+    def build_typing_handler(self):
+        return GenericWorkerTyping(self)
+
+
+class GenericWorkerReplicationHandler(ReplicationClientHandler):
+    def __init__(self, hs):
+        super(GenericWorkerReplicationHandler, self).__init__(hs.get_datastore())
+
+        self.store = hs.get_datastore()
+        self.typing_handler = hs.get_typing_handler()
+        # NB this is a SynchrotronPresence, not a normal PresenceHandler
+        self.presence_handler = hs.get_presence_handler()
+        self.notifier = hs.get_notifier()
+
+        self.notify_pushers = hs.config.start_pushers
+        self.pusher_pool = hs.get_pusherpool()
+
+        if hs.config.send_federation:
+            self.send_handler = FederationSenderHandler(hs, self)
+        else:
+            self.send_handler = None
+
+    async def on_rdata(self, stream_name, token, rows):
+        await super(GenericWorkerReplicationHandler, self).on_rdata(
+            stream_name, token, rows
+        )
+        run_in_background(self.process_and_notify, stream_name, token, rows)
+
+    def get_streams_to_replicate(self):
+        args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate()
+        args.update(self.typing_handler.stream_positions())
+        if self.send_handler:
+            args.update(self.send_handler.stream_positions())
+        return args
+
+    def get_currently_syncing_users(self):
+        return self.presence_handler.get_currently_syncing_users()
+
+    async def process_and_notify(self, stream_name, token, rows):
+        try:
+            if self.send_handler:
+                self.send_handler.process_replication_rows(stream_name, token, rows)
+
+            if stream_name == "events":
+                # We shouldn't get multiple rows per token for events stream, so
+                # we don't need to optimise this for multiple rows.
+                for row in rows:
+                    if row.type != EventsStreamEventRow.TypeId:
+                        continue
+                    assert isinstance(row, EventsStreamRow)
+
+                    event = await self.store.get_event(
+                        row.data.event_id, allow_rejected=True
+                    )
+                    if event.rejected_reason:
+                        continue
+
+                    extra_users = ()
+                    if event.type == EventTypes.Member:
+                        extra_users = (event.state_key,)
+                    max_token = self.store.get_room_max_stream_ordering()
+                    self.notifier.on_new_room_event(
+                        event, token, max_token, extra_users
+                    )
+
+                await self.pusher_pool.on_new_notifications(token, token)
+            elif stream_name == "push_rules":
+                self.notifier.on_new_event(
+                    "push_rules_key", token, users=[row.user_id for row in rows]
+                )
+            elif stream_name in ("account_data", "tag_account_data"):
+                self.notifier.on_new_event(
+                    "account_data_key", token, users=[row.user_id for row in rows]
+                )
+            elif stream_name == "receipts":
+                self.notifier.on_new_event(
+                    "receipt_key", token, rooms=[row.room_id for row in rows]
+                )
+                await self.pusher_pool.on_new_receipts(
+                    token, token, {row.room_id for row in rows}
+                )
+            elif stream_name == "typing":
+                self.typing_handler.process_replication_rows(token, rows)
+                self.notifier.on_new_event(
+                    "typing_key", token, rooms=[row.room_id for row in rows]
+                )
+            elif stream_name == "to_device":
+                entities = [row.entity for row in rows if row.entity.startswith("@")]
+                if entities:
+                    self.notifier.on_new_event("to_device_key", token, users=entities)
+            elif stream_name == "device_lists":
+                all_room_ids = set()
+                for row in rows:
+                    room_ids = await self.store.get_rooms_for_user(row.user_id)
+                    all_room_ids.update(room_ids)
+                self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
+            elif stream_name == "presence":
+                await self.presence_handler.process_replication_rows(token, rows)
+            elif stream_name == "receipts":
+                self.notifier.on_new_event(
+                    "groups_key", token, users=[row.user_id for row in rows]
+                )
+            elif stream_name == "pushers":
+                for row in rows:
+                    if row.deleted:
+                        self.stop_pusher(row.user_id, row.app_id, row.pushkey)
+                    else:
+                        await self.start_pusher(row.user_id, row.app_id, row.pushkey)
+        except Exception:
+            logger.exception("Error processing replication")
+
+    def stop_pusher(self, user_id, app_id, pushkey):
+        if not self.notify_pushers:
+            return
+
+        key = "%s:%s" % (app_id, pushkey)
+        pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
+        pusher = pushers_for_user.pop(key, None)
+        if pusher is None:
+            return
+        logger.info("Stopping pusher %r / %r", user_id, key)
+        pusher.on_stop()
+
+    async def start_pusher(self, user_id, app_id, pushkey):
+        if not self.notify_pushers:
+            return
+
+        key = "%s:%s" % (app_id, pushkey)
+        logger.info("Starting pusher %r / %r", user_id, key)
+        return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
+
+    def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+
+        # Let's wake up the transaction queue for the server in case we have
+        # pending stuff to send to it.
+        if self.send_handler:
+            self.send_handler.wake_destination(server)
+
+
+class FederationSenderHandler(object):
+    """Processes the replication stream and forwards the appropriate entries
+    to the federation sender.
+    """
+
+    def __init__(self, hs: GenericWorkerServer, replication_client):
+        self.store = hs.get_datastore()
+        self._is_mine_id = hs.is_mine_id
+        self.federation_sender = hs.get_federation_sender()
+        self.replication_client = replication_client
+
+        self.federation_position = self.store.federation_out_pos_startup
+        self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
+
+        self._last_ack = self.federation_position
+
+        self._room_serials = {}
+        self._room_typing = {}
+
+    def on_start(self):
+        # There may be some events that are persisted but haven't been sent,
+        # so send them now.
+        self.federation_sender.notify_new_events(
+            self.store.get_room_max_stream_ordering()
+        )
+
+    def wake_destination(self, server: str):
+        self.federation_sender.wake_destination(server)
+
+    def stream_positions(self):
+        return {"federation": self.federation_position}
+
+    def process_replication_rows(self, stream_name, token, rows):
+        # The federation stream contains things that we want to send out, e.g.
+        # presence, typing, etc.
+        if stream_name == "federation":
+            send_queue.process_rows_for_federation(self.federation_sender, rows)
+            run_in_background(self.update_token, token)
+
+        # We also need to poke the federation sender when new events happen
+        elif stream_name == "events":
+            self.federation_sender.notify_new_events(token)
+
+        # ... and when new receipts happen
+        elif stream_name == ReceiptsStream.NAME:
+            run_as_background_process(
+                "process_receipts_for_federation", self._on_new_receipts, rows
+            )
+
+        # ... as well as device updates and messages
+        elif stream_name == DeviceListsStream.NAME:
+            hosts = {row.destination for row in rows}
+            for host in hosts:
+                self.federation_sender.send_device_messages(host)
+
+        elif stream_name == ToDeviceStream.NAME:
+            # The to_device stream includes stuff to be pushed to both local
+            # clients and remote servers, so we ignore entities that start with
+            # '@' (since they'll be local users rather than destinations).
+            hosts = {row.entity for row in rows if not row.entity.startswith("@")}
+            for host in hosts:
+                self.federation_sender.send_device_messages(host)
+
+    async def _on_new_receipts(self, rows):
+        """
+        Args:
+            rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
+                new receipts to be processed
+        """
+        for receipt in rows:
+            # we only want to send on receipts for our own users
+            if not self._is_mine_id(receipt.user_id):
+                continue
+            receipt_info = ReadReceipt(
+                receipt.room_id,
+                receipt.receipt_type,
+                receipt.user_id,
+                [receipt.event_id],
+                receipt.data,
+            )
+            await self.federation_sender.send_read_receipt(receipt_info)
+
+    async def update_token(self, token):
+        try:
+            self.federation_position = token
+
+            # We linearize here to ensure we don't have races updating the token
+            with (await self._fed_position_linearizer.queue(None)):
+                if self._last_ack < self.federation_position:
+                    await self.store.update_federation_out_pos(
+                        "federation", self.federation_position
+                    )
+
+                    # We ACK this token over replication so that the master can drop
+                    # its in memory queues
+                    self.replication_client.send_federation_ack(
+                        self.federation_position
+                    )
+                    self._last_ack = self.federation_position
+        except Exception:
+            logger.exception("Error updating federation stream position")
+
+
+def start(config_options):
+    try:
+        config = HomeServerConfig.load_config("Synapse worker", config_options)
+    except ConfigError as e:
+        sys.stderr.write("\n" + str(e) + "\n")
+        sys.exit(1)
+
+    # For backwards compatibility let any of the old app names.
+    assert config.worker_app in (
+        "synapse.app.appservice",
+        "synapse.app.client_reader",
+        "synapse.app.event_creator",
+        "synapse.app.federation_reader",
+        "synapse.app.federation_sender",
+        "synapse.app.frontend_proxy",
+        "synapse.app.generic_worker",
+        "synapse.app.media_repository",
+        "synapse.app.pusher",
+        "synapse.app.synchrotron",
+        "synapse.app.user_dir",
+    )
+
+    if config.worker_app == "synapse.app.appservice":
+        if config.notify_appservices:
+            sys.stderr.write(
+                "\nThe appservices must be disabled in the main synapse process"
+                "\nbefore they can be run in a separate worker."
+                "\nPlease add ``notify_appservices: false`` to the main config"
+                "\n"
+            )
+            sys.exit(1)
+
+        # Force the appservice to start since they will be disabled in the main config
+        config.notify_appservices = True
+
+    if config.worker_app == "synapse.app.pusher":
+        if config.start_pushers:
+            sys.stderr.write(
+                "\nThe pushers must be disabled in the main synapse process"
+                "\nbefore they can be run in a separate worker."
+                "\nPlease add ``start_pushers: false`` to the main config"
+                "\n"
+            )
+            sys.exit(1)
+
+        # Force the pushers to start since they will be disabled in the main config
+        config.start_pushers = True
+
+    if config.worker_app == "synapse.app.user_dir":
+        if config.update_user_directory:
+            sys.stderr.write(
+                "\nThe update_user_directory must be disabled in the main synapse process"
+                "\nbefore they can be run in a separate worker."
+                "\nPlease add ``update_user_directory: false`` to the main config"
+                "\n"
+            )
+            sys.exit(1)
+
+        # Force the pushers to start since they will be disabled in the main config
+        config.update_user_directory = True
+
+    if config.worker_app == "synapse.app.federation_sender":
+        if config.send_federation:
+            sys.stderr.write(
+                "\nThe send_federation must be disabled in the main synapse process"
+                "\nbefore they can be run in a separate worker."
+                "\nPlease add ``send_federation: false`` to the main config"
+                "\n"
+            )
+            sys.exit(1)
+
+        # Force the pushers to start since they will be disabled in the main config
+        config.send_federation = True
+
+    synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
+
+    ss = GenericWorkerServer(
+        config.server_name,
+        config=config,
+        version_string="Synapse/" + get_version_string(synapse),
+    )
+
+    setup_logging(ss, config, use_worker_options=True)
+
+    ss.setup()
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
+
+    _base.start_worker_reactor("synapse-generic-worker", config)
+
+
+if __name__ == "__main__":
+    with LoggingContext("main"):
+        start(sys.argv[1:])
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 5b5832214a..add43147b3 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -13,162 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.server import JsonResource
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.admin import register_servlets_for_media_repo
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.media_repository")
-
-
-class MediaRepositorySlavedStore(
-    RoomStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-    SlavedClientIpStore,
-    SlavedTransactionStore,
-    BaseSlavedStore,
-    MediaRepositoryStore,
-):
-    pass
-
-
-class MediaRepositoryServer(HomeServer):
-    DATASTORE_CLASS = MediaRepositorySlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "media":
-                    media_repo = self.get_media_repository_resource()
-
-                    # We need to serve the admin servlets for media on the
-                    # worker.
-                    admin_resource = JsonResource(self, canonical_json=False)
-                    register_servlets_for_media_repo(self, admin_resource)
-
-                    resources.update(
-                        {
-                            MEDIA_PREFIX: media_repo,
-                            LEGACY_MEDIA_PREFIX: media_repo,
-                            "/_synapse/admin": admin_resource,
-                        }
-                    )
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
 
-        logger.info("Synapse media repository now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return ReplicationClientHandler(self.get_datastore())
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config(
-            "Synapse media repository", config_options
-        )
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.media_repository"
-
-    if config.enable_media_repo:
-        _base.quit_with_error(
-            "enable_media_repo must be disabled in the main synapse process\n"
-            "before the media repo can be run in a separate worker.\n"
-            "Please add ``enable_media_repo: false`` to the main config\n"
-        )
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = MediaRepositoryServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-media-repository", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 84e9f8d5e2..add43147b3 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -13,213 +13,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import logging
-import sys
-
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext, run_in_background
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import __func__
-from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.server import HomeServer
-from synapse.storage import DataStore
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.pusher")
-
-
-class PusherSlaveStore(
-    SlavedEventStore,
-    SlavedPusherStore,
-    SlavedReceiptsStore,
-    SlavedAccountDataStore,
-    RoomStore,
-):
-    update_pusher_last_stream_ordering_and_success = __func__(
-        DataStore.update_pusher_last_stream_ordering_and_success
-    )
-
-    update_pusher_failing_since = __func__(DataStore.update_pusher_failing_since)
-
-    update_pusher_last_stream_ordering = __func__(
-        DataStore.update_pusher_last_stream_ordering
-    )
-
-    get_throttle_params_by_room = __func__(DataStore.get_throttle_params_by_room)
-
-    set_throttle_params = __func__(DataStore.set_throttle_params)
-
-    get_time_of_last_push_action_before = __func__(
-        DataStore.get_time_of_last_push_action_before
-    )
-
-    get_profile_displayname = __func__(DataStore.get_profile_displayname)
-
-
-class PusherServer(HomeServer):
-    DATASTORE_CLASS = PusherSlaveStore
-
-    def remove_pusher(self, app_id, push_key, user_id):
-        self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse pusher now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
 
-    def build_tcp_replication(self):
-        return PusherReplicationHandler(self)
-
-
-class PusherReplicationHandler(ReplicationClientHandler):
-    def __init__(self, hs):
-        super(PusherReplicationHandler, self).__init__(hs.get_datastore())
-
-        self.pusher_pool = hs.get_pusherpool()
-
-    async def on_rdata(self, stream_name, token, rows):
-        await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
-        run_in_background(self.poke_pushers, stream_name, token, rows)
-
-    @defer.inlineCallbacks
-    def poke_pushers(self, stream_name, token, rows):
-        try:
-            if stream_name == "pushers":
-                for row in rows:
-                    if row.deleted:
-                        yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
-                    else:
-                        yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
-            elif stream_name == "events":
-                yield self.pusher_pool.on_new_notifications(token, token)
-            elif stream_name == "receipts":
-                yield self.pusher_pool.on_new_receipts(
-                    token, token, {row.room_id for row in rows}
-                )
-        except Exception:
-            logger.exception("Error poking pushers")
-
-    def stop_pusher(self, user_id, app_id, pushkey):
-        key = "%s:%s" % (app_id, pushkey)
-        pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
-        pusher = pushers_for_user.pop(key, None)
-        if pusher is None:
-            return
-        logger.info("Stopping pusher %r / %r", user_id, key)
-        pusher.on_stop()
-
-    def start_pusher(self, user_id, app_id, pushkey):
-        key = "%s:%s" % (app_id, pushkey)
-        logger.info("Starting pusher %r / %r", user_id, key)
-        return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse pusher", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.pusher"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    if config.start_pushers:
-        sys.stderr.write(
-            "\nThe pushers must be disabled in the main synapse process"
-            "\nbefore they can be run in a separate worker."
-            "\nPlease add ``start_pushers: false`` to the main config"
-            "\n"
-        )
-        sys.exit(1)
-
-    # Force the pushers to start since they will be disabled in the main config
-    config.start_pushers = True
-
-    ps = PusherServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ps, config, use_worker_options=True)
-
-    ps.setup()
-
-    def start():
-        _base.start(ps, config.worker_listeners)
-        ps.get_pusherpool().start()
-
-    reactor.addSystemEventTrigger("before", "startup", start)
-
-    _base.start_worker_reactor("synapse-pusher", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
-        ps = start(sys.argv[1:])
+        start(sys.argv[1:])
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 8982c0676e..add43147b3 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -13,454 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import contextlib
-import logging
-import sys
-
-from six import iteritems
-
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse.api.constants import EventTypes
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.handlers.presence import PresenceHandler, get_interested_parties
-from synapse.http.server import JsonResource
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext, run_in_background
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
-from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.groups import SlavedGroupServerStore
-from synapse.replication.slave.storage.presence import SlavedPresenceStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
-from synapse.rest.client.v1 import events
-from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
-from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
-from synapse.rest.client.v2_alpha import sync
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.monthly_active_users import (
-    MonthlyActiveUsersWorkerStore,
-)
-from synapse.storage.data_stores.main.presence import UserPresenceState
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.stringutils import random_string
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.synchrotron")
-
-
-class SynchrotronSlavedStore(
-    SlavedReceiptsStore,
-    SlavedAccountDataStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-    SlavedFilteringStore,
-    SlavedPresenceStore,
-    SlavedGroupServerStore,
-    SlavedDeviceInboxStore,
-    SlavedDeviceStore,
-    SlavedPushRuleStore,
-    SlavedEventStore,
-    SlavedClientIpStore,
-    RoomStore,
-    MonthlyActiveUsersWorkerStore,
-    BaseSlavedStore,
-):
-    pass
-
-
-UPDATE_SYNCING_USERS_MS = 10 * 1000
-
-
-class SynchrotronPresence(object):
-    def __init__(self, hs):
-        self.hs = hs
-        self.is_mine_id = hs.is_mine_id
-        self.http_client = hs.get_simple_http_client()
-        self.store = hs.get_datastore()
-        self.user_to_num_current_syncs = {}
-        self.clock = hs.get_clock()
-        self.notifier = hs.get_notifier()
-
-        active_presence = self.store.take_presence_startup_info()
-        self.user_to_current_state = {state.user_id: state for state in active_presence}
-
-        # user_id -> last_sync_ms. Lists the users that have stopped syncing
-        # but we haven't notified the master of that yet
-        self.users_going_offline = {}
-
-        self._send_stop_syncing_loop = self.clock.looping_call(
-            self.send_stop_syncing, 10 * 1000
-        )
-
-        self.process_id = random_string(16)
-        logger.info("Presence process_id is %r", self.process_id)
-
-    def send_user_sync(self, user_id, is_syncing, last_sync_ms):
-        if self.hs.config.use_presence:
-            self.hs.get_tcp_replication().send_user_sync(
-                user_id, is_syncing, last_sync_ms
-            )
-
-    def mark_as_coming_online(self, user_id):
-        """A user has started syncing. Send a UserSync to the master, unless they
-        had recently stopped syncing.
-
-        Args:
-            user_id (str)
-        """
-        going_offline = self.users_going_offline.pop(user_id, None)
-        if not going_offline:
-            # Safe to skip because we haven't yet told the master they were offline
-            self.send_user_sync(user_id, True, self.clock.time_msec())
-
-    def mark_as_going_offline(self, user_id):
-        """A user has stopped syncing. We wait before notifying the master as
-        its likely they'll come back soon. This allows us to avoid sending
-        a stopped syncing immediately followed by a started syncing notification
-        to the master
-
-        Args:
-            user_id (str)
-        """
-        self.users_going_offline[user_id] = self.clock.time_msec()
-
-    def send_stop_syncing(self):
-        """Check if there are any users who have stopped syncing a while ago
-        and haven't come back yet. If there are poke the master about them.
-        """
-        now = self.clock.time_msec()
-        for user_id, last_sync_ms in list(self.users_going_offline.items()):
-            if now - last_sync_ms > 10 * 1000:
-                self.users_going_offline.pop(user_id, None)
-                self.send_user_sync(user_id, False, last_sync_ms)
-
-    def set_state(self, user, state, ignore_status_msg=False):
-        # TODO Hows this supposed to work?
-        return defer.succeed(None)
-
-    get_states = __func__(PresenceHandler.get_states)
-    get_state = __func__(PresenceHandler.get_state)
-    current_state_for_users = __func__(PresenceHandler.current_state_for_users)
-
-    def user_syncing(self, user_id, affect_presence):
-        if affect_presence:
-            curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
-            self.user_to_num_current_syncs[user_id] = curr_sync + 1
-
-            # If we went from no in flight sync to some, notify replication
-            if self.user_to_num_current_syncs[user_id] == 1:
-                self.mark_as_coming_online(user_id)
-
-        def _end():
-            # We check that the user_id is in user_to_num_current_syncs because
-            # user_to_num_current_syncs may have been cleared if we are
-            # shutting down.
-            if affect_presence and user_id in self.user_to_num_current_syncs:
-                self.user_to_num_current_syncs[user_id] -= 1
-
-                # If we went from one in flight sync to non, notify replication
-                if self.user_to_num_current_syncs[user_id] == 0:
-                    self.mark_as_going_offline(user_id)
-
-        @contextlib.contextmanager
-        def _user_syncing():
-            try:
-                yield
-            finally:
-                _end()
-
-        return defer.succeed(_user_syncing())
-
-    @defer.inlineCallbacks
-    def notify_from_replication(self, states, stream_id):
-        parties = yield get_interested_parties(self.store, states)
-        room_ids_to_states, users_to_states = parties
-
-        self.notifier.on_new_event(
-            "presence_key",
-            stream_id,
-            rooms=room_ids_to_states.keys(),
-            users=users_to_states.keys(),
-        )
-
-    @defer.inlineCallbacks
-    def process_replication_rows(self, token, rows):
-        states = [
-            UserPresenceState(
-                row.user_id,
-                row.state,
-                row.last_active_ts,
-                row.last_federation_update_ts,
-                row.last_user_sync_ts,
-                row.status_msg,
-                row.currently_active,
-            )
-            for row in rows
-        ]
-
-        for state in states:
-            self.user_to_current_state[state.user_id] = state
-
-        stream_id = token
-        yield self.notify_from_replication(states, stream_id)
-
-    def get_currently_syncing_users(self):
-        if self.hs.config.use_presence:
-            return [
-                user_id
-                for user_id, count in iteritems(self.user_to_num_current_syncs)
-                if count > 0
-            ]
-        else:
-            return set()
-
 
-class SynchrotronTyping(object):
-    def __init__(self, hs):
-        self._latest_room_serial = 0
-        self._reset()
-
-    def _reset(self):
-        """
-        Reset the typing handler's data caches.
-        """
-        # map room IDs to serial numbers
-        self._room_serials = {}
-        # map room IDs to sets of users currently typing
-        self._room_typing = {}
-
-    def stream_positions(self):
-        # We must update this typing token from the response of the previous
-        # sync. In particular, the stream id may "reset" back to zero/a low
-        # value which we *must* use for the next replication request.
-        return {"typing": self._latest_room_serial}
-
-    def process_replication_rows(self, token, rows):
-        if self._latest_room_serial > token:
-            # The master has gone backwards. To prevent inconsistent data, just
-            # clear everything.
-            self._reset()
-
-        # Set the latest serial token to whatever the server gave us.
-        self._latest_room_serial = token
-
-        for row in rows:
-            self._room_serials[row.room_id] = token
-            self._room_typing[row.room_id] = row.user_ids
-
-
-class SynchrotronApplicationService(object):
-    def notify_interested_services(self, event):
-        pass
-
-
-class SynchrotronServer(HomeServer):
-    DATASTORE_CLASS = SynchrotronSlavedStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "client":
-                    resource = JsonResource(self, canonical_json=False)
-                    sync.register_servlets(self, resource)
-                    events.register_servlets(self, resource)
-                    InitialSyncRestServlet(self).register(resource)
-                    RoomInitialSyncRestServlet(self).register(resource)
-                    resources.update(
-                        {
-                            "/_matrix/client/r0": resource,
-                            "/_matrix/client/unstable": resource,
-                            "/_matrix/client/v2_alpha": resource,
-                            "/_matrix/client/api/v1": resource,
-                        }
-                    )
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse synchrotron now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return SyncReplicationHandler(self)
-
-    def build_presence_handler(self):
-        return SynchrotronPresence(self)
-
-    def build_typing_handler(self):
-        return SynchrotronTyping(self)
-
-
-class SyncReplicationHandler(ReplicationClientHandler):
-    def __init__(self, hs):
-        super(SyncReplicationHandler, self).__init__(hs.get_datastore())
-
-        self.store = hs.get_datastore()
-        self.typing_handler = hs.get_typing_handler()
-        # NB this is a SynchrotronPresence, not a normal PresenceHandler
-        self.presence_handler = hs.get_presence_handler()
-        self.notifier = hs.get_notifier()
-
-    async def on_rdata(self, stream_name, token, rows):
-        await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
-        run_in_background(self.process_and_notify, stream_name, token, rows)
-
-    def get_streams_to_replicate(self):
-        args = super(SyncReplicationHandler, self).get_streams_to_replicate()
-        args.update(self.typing_handler.stream_positions())
-        return args
-
-    def get_currently_syncing_users(self):
-        return self.presence_handler.get_currently_syncing_users()
-
-    async def process_and_notify(self, stream_name, token, rows):
-        try:
-            if stream_name == "events":
-                # We shouldn't get multiple rows per token for events stream, so
-                # we don't need to optimise this for multiple rows.
-                for row in rows:
-                    if row.type != EventsStreamEventRow.TypeId:
-                        continue
-                    assert isinstance(row, EventsStreamRow)
-
-                    event = await self.store.get_event(
-                        row.data.event_id, allow_rejected=True
-                    )
-                    if event.rejected_reason:
-                        continue
-
-                    extra_users = ()
-                    if event.type == EventTypes.Member:
-                        extra_users = (event.state_key,)
-                    max_token = self.store.get_room_max_stream_ordering()
-                    self.notifier.on_new_room_event(
-                        event, token, max_token, extra_users
-                    )
-            elif stream_name == "push_rules":
-                self.notifier.on_new_event(
-                    "push_rules_key", token, users=[row.user_id for row in rows]
-                )
-            elif stream_name in ("account_data", "tag_account_data"):
-                self.notifier.on_new_event(
-                    "account_data_key", token, users=[row.user_id for row in rows]
-                )
-            elif stream_name == "receipts":
-                self.notifier.on_new_event(
-                    "receipt_key", token, rooms=[row.room_id for row in rows]
-                )
-            elif stream_name == "typing":
-                self.typing_handler.process_replication_rows(token, rows)
-                self.notifier.on_new_event(
-                    "typing_key", token, rooms=[row.room_id for row in rows]
-                )
-            elif stream_name == "to_device":
-                entities = [row.entity for row in rows if row.entity.startswith("@")]
-                if entities:
-                    self.notifier.on_new_event("to_device_key", token, users=entities)
-            elif stream_name == "device_lists":
-                all_room_ids = set()
-                for row in rows:
-                    room_ids = await self.store.get_rooms_for_user(row.user_id)
-                    all_room_ids.update(room_ids)
-                self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
-            elif stream_name == "presence":
-                await self.presence_handler.process_replication_rows(token, rows)
-            elif stream_name == "receipts":
-                self.notifier.on_new_event(
-                    "groups_key", token, users=[row.user_id for row in rows]
-                )
-        except Exception:
-            logger.exception("Error processing replication")
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse synchrotron", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.synchrotron"
-
-    synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    ss = SynchrotronServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-        application_service_handler=SynchrotronApplicationService(),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-synchrotron", config)
+import sys
 
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index ba536d6f04..503d44f687 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -14,217 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 import sys
 
-from twisted.internet import defer, reactor
-from twisted.web.resource import NoResource
-
-import synapse
-from synapse import events
-from synapse.app import _base
-from synapse.config._base import ConfigError
-from synapse.config.homeserver import HomeServerConfig
-from synapse.config.logger import setup_logging
-from synapse.http.server import JsonResource
-from synapse.http.site import SynapseSite
-from synapse.logging.context import LoggingContext, run_in_background
-from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
-from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.registration import SlavedRegistrationStore
-from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.replication.tcp.streams.events import (
-    EventsStream,
-    EventsStreamCurrentStateRow,
-)
-from synapse.rest.client.v2_alpha import user_directory
-from synapse.server import HomeServer
-from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
-from synapse.storage.database import Database
-from synapse.util.caches.stream_change_cache import StreamChangeCache
-from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.manhole import manhole
-from synapse.util.versionstring import get_version_string
-
-logger = logging.getLogger("synapse.app.user_dir")
-
-
-class UserDirectorySlaveStore(
-    SlavedEventStore,
-    SlavedApplicationServiceStore,
-    SlavedRegistrationStore,
-    SlavedClientIpStore,
-    UserDirectoryStore,
-    BaseSlavedStore,
-):
-    def __init__(self, database: Database, db_conn, hs):
-        super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs)
-
-        events_max = self._stream_id_gen.get_current_token()
-        curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
-            db_conn,
-            "current_state_delta_stream",
-            entity_column="room_id",
-            stream_column="stream_id",
-            max_value=events_max,  # As we share the stream id with events token
-            limit=1000,
-        )
-        self._curr_state_delta_stream_cache = StreamChangeCache(
-            "_curr_state_delta_stream_cache",
-            min_curr_state_delta_id,
-            prefilled_cache=curr_state_delta_prefill,
-        )
-
-    def stream_positions(self):
-        result = super(UserDirectorySlaveStore, self).stream_positions()
-        return result
-
-    def process_replication_rows(self, stream_name, token, rows):
-        if stream_name == EventsStream.NAME:
-            self._stream_id_gen.advance(token)
-            for row in rows:
-                if row.type != EventsStreamCurrentStateRow.TypeId:
-                    continue
-                self._curr_state_delta_stream_cache.entity_has_changed(
-                    row.data.room_id, token
-                )
-        return super(UserDirectorySlaveStore, self).process_replication_rows(
-            stream_name, token, rows
-        )
-
-
-class UserDirectoryServer(HomeServer):
-    DATASTORE_CLASS = UserDirectorySlaveStore
-
-    def _listen_http(self, listener_config):
-        port = listener_config["port"]
-        bind_addresses = listener_config["bind_addresses"]
-        site_tag = listener_config.get("tag", port)
-        resources = {}
-        for res in listener_config["resources"]:
-            for name in res["names"]:
-                if name == "metrics":
-                    resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
-                elif name == "client":
-                    resource = JsonResource(self, canonical_json=False)
-                    user_directory.register_servlets(self, resource)
-                    resources.update(
-                        {
-                            "/_matrix/client/r0": resource,
-                            "/_matrix/client/unstable": resource,
-                            "/_matrix/client/v2_alpha": resource,
-                            "/_matrix/client/api/v1": resource,
-                        }
-                    )
-
-        root_resource = create_resource_tree(resources, NoResource())
-
-        _base.listen_tcp(
-            bind_addresses,
-            port,
-            SynapseSite(
-                "synapse.access.http.%s" % (site_tag,),
-                site_tag,
-                listener_config,
-                root_resource,
-                self.version_string,
-            ),
-        )
-
-        logger.info("Synapse user_dir now listening on port %d", port)
-
-    def start_listening(self, listeners):
-        for listener in listeners:
-            if listener["type"] == "http":
-                self._listen_http(listener)
-            elif listener["type"] == "manhole":
-                _base.listen_tcp(
-                    listener["bind_addresses"],
-                    listener["port"],
-                    manhole(
-                        username="matrix", password="rabbithole", globals={"hs": self}
-                    ),
-                )
-            elif listener["type"] == "metrics":
-                if not self.get_config().enable_metrics:
-                    logger.warning(
-                        (
-                            "Metrics listener configured, but "
-                            "enable_metrics is not True!"
-                        )
-                    )
-                else:
-                    _base.listen_metrics(listener["bind_addresses"], listener["port"])
-            else:
-                logger.warning("Unrecognized listener type: %s", listener["type"])
-
-        self.get_tcp_replication().start_replication(self)
-
-    def build_tcp_replication(self):
-        return UserDirectoryReplicationHandler(self)
-
-
-class UserDirectoryReplicationHandler(ReplicationClientHandler):
-    def __init__(self, hs):
-        super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
-        self.user_directory = hs.get_user_directory_handler()
-
-    async def on_rdata(self, stream_name, token, rows):
-        await super(UserDirectoryReplicationHandler, self).on_rdata(
-            stream_name, token, rows
-        )
-        if stream_name == EventsStream.NAME:
-            run_in_background(self._notify_directory)
-
-    @defer.inlineCallbacks
-    def _notify_directory(self):
-        try:
-            yield self.user_directory.notify_new_event()
-        except Exception:
-            logger.exception("Error notifiying user directory of state update")
-
-
-def start(config_options):
-    try:
-        config = HomeServerConfig.load_config("Synapse user directory", config_options)
-    except ConfigError as e:
-        sys.stderr.write("\n" + str(e) + "\n")
-        sys.exit(1)
-
-    assert config.worker_app == "synapse.app.user_dir"
-
-    events.USE_FROZEN_DICTS = config.use_frozen_dicts
-
-    if config.update_user_directory:
-        sys.stderr.write(
-            "\nThe update_user_directory must be disabled in the main synapse process"
-            "\nbefore they can be run in a separate worker."
-            "\nPlease add ``update_user_directory: false`` to the main config"
-            "\n"
-        )
-        sys.exit(1)
-
-    # Force the pushers to start since they will be disabled in the main config
-    config.update_user_directory = True
-
-    ss = UserDirectoryServer(
-        config.server_name,
-        config=config,
-        version_string="Synapse/" + get_version_string(synapse),
-    )
-
-    setup_logging(ss, config, use_worker_options=True)
-
-    ss.setup()
-    reactor.addSystemEventTrigger(
-        "before", "startup", _base.start, ss, config.worker_listeners
-    )
-
-    _base.start_worker_reactor("synapse-user-dir", config)
-
+from synapse.app.generic_worker import start
+from synapse.util.logcontext import LoggingContext
 
 if __name__ == "__main__":
     with LoggingContext("main"):
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 3aa6cb8b96..e73342c657 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -32,6 +32,7 @@ from synapse.storage.data_stores.main.state import StateGroupWorkerStore
 from synapse.storage.data_stores.main.stream import StreamWorkerStore
 from synapse.storage.data_stores.main.user_erasure_store import UserErasureWorkerStore
 from synapse.storage.database import Database
+from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
@@ -68,6 +69,21 @@ class SlavedEventStore(
 
         super(SlavedEventStore, self).__init__(database, db_conn, hs)
 
+        events_max = self._stream_id_gen.get_current_token()
+        curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
+            db_conn,
+            "current_state_delta_stream",
+            entity_column="room_id",
+            stream_column="stream_id",
+            max_value=events_max,  # As we share the stream id with events token
+            limit=1000,
+        )
+        self._curr_state_delta_stream_cache = StreamChangeCache(
+            "_curr_state_delta_stream_cache",
+            min_curr_state_delta_id,
+            prefilled_cache=curr_state_delta_prefill,
+        )
+
     # Cached functions can't be accessed through a class instance so we need
     # to reach inside the __dict__ to extract them.
 
@@ -120,6 +136,10 @@ class SlavedEventStore(
                 backfilled=False,
             )
         elif row.type == EventsStreamCurrentStateRow.TypeId:
+            self._curr_state_delta_stream_cache.entity_has_changed(
+                row.data.room_id, token
+            )
+
             if data.type == EventTypes.Member:
                 self.get_rooms_for_user_with_stream_ordering.invalidate(
                     (data.state_key,)
diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/data_stores/main/pusher.py
index 6b03233262..547b9d69cb 100644
--- a/synapse/storage/data_stores/main/pusher.py
+++ b/synapse/storage/data_stores/main/pusher.py
@@ -197,6 +197,84 @@ class PusherWorkerStore(SQLBaseStore):
 
         return result
 
+    @defer.inlineCallbacks
+    def update_pusher_last_stream_ordering(
+        self, app_id, pushkey, user_id, last_stream_ordering
+    ):
+        yield self.db.simple_update_one(
+            "pushers",
+            {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
+            {"last_stream_ordering": last_stream_ordering},
+            desc="update_pusher_last_stream_ordering",
+        )
+
+    @defer.inlineCallbacks
+    def update_pusher_last_stream_ordering_and_success(
+        self, app_id, pushkey, user_id, last_stream_ordering, last_success
+    ):
+        """Update the last stream ordering position we've processed up to for
+        the given pusher.
+
+        Args:
+            app_id (str)
+            pushkey (str)
+            last_stream_ordering (int)
+            last_success (int)
+
+        Returns:
+            Deferred[bool]: True if the pusher still exists; False if it has been deleted.
+        """
+        updated = yield self.db.simple_update(
+            table="pushers",
+            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
+            updatevalues={
+                "last_stream_ordering": last_stream_ordering,
+                "last_success": last_success,
+            },
+            desc="update_pusher_last_stream_ordering_and_success",
+        )
+
+        return bool(updated)
+
+    @defer.inlineCallbacks
+    def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since):
+        yield self.db.simple_update(
+            table="pushers",
+            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
+            updatevalues={"failing_since": failing_since},
+            desc="update_pusher_failing_since",
+        )
+
+    @defer.inlineCallbacks
+    def get_throttle_params_by_room(self, pusher_id):
+        res = yield self.db.simple_select_list(
+            "pusher_throttle",
+            {"pusher": pusher_id},
+            ["room_id", "last_sent_ts", "throttle_ms"],
+            desc="get_throttle_params_by_room",
+        )
+
+        params_by_room = {}
+        for row in res:
+            params_by_room[row["room_id"]] = {
+                "last_sent_ts": row["last_sent_ts"],
+                "throttle_ms": row["throttle_ms"],
+            }
+
+        return params_by_room
+
+    @defer.inlineCallbacks
+    def set_throttle_params(self, pusher_id, room_id, params):
+        # no need to lock because `pusher_throttle` has a primary key on
+        # (pusher, room_id) so simple_upsert will retry
+        yield self.db.simple_upsert(
+            "pusher_throttle",
+            {"pusher": pusher_id, "room_id": room_id},
+            params,
+            desc="set_throttle_params",
+            lock=False,
+        )
+
 
 class PusherStore(PusherWorkerStore):
     def get_pushers_stream_token(self):
@@ -282,81 +360,3 @@ class PusherStore(PusherWorkerStore):
 
         with self._pushers_id_gen.get_next() as stream_id:
             yield self.db.runInteraction("delete_pusher", delete_pusher_txn, stream_id)
-
-    @defer.inlineCallbacks
-    def update_pusher_last_stream_ordering(
-        self, app_id, pushkey, user_id, last_stream_ordering
-    ):
-        yield self.db.simple_update_one(
-            "pushers",
-            {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
-            {"last_stream_ordering": last_stream_ordering},
-            desc="update_pusher_last_stream_ordering",
-        )
-
-    @defer.inlineCallbacks
-    def update_pusher_last_stream_ordering_and_success(
-        self, app_id, pushkey, user_id, last_stream_ordering, last_success
-    ):
-        """Update the last stream ordering position we've processed up to for
-        the given pusher.
-
-        Args:
-            app_id (str)
-            pushkey (str)
-            last_stream_ordering (int)
-            last_success (int)
-
-        Returns:
-            Deferred[bool]: True if the pusher still exists; False if it has been deleted.
-        """
-        updated = yield self.db.simple_update(
-            table="pushers",
-            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
-            updatevalues={
-                "last_stream_ordering": last_stream_ordering,
-                "last_success": last_success,
-            },
-            desc="update_pusher_last_stream_ordering_and_success",
-        )
-
-        return bool(updated)
-
-    @defer.inlineCallbacks
-    def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since):
-        yield self.db.simple_update(
-            table="pushers",
-            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
-            updatevalues={"failing_since": failing_since},
-            desc="update_pusher_failing_since",
-        )
-
-    @defer.inlineCallbacks
-    def get_throttle_params_by_room(self, pusher_id):
-        res = yield self.db.simple_select_list(
-            "pusher_throttle",
-            {"pusher": pusher_id},
-            ["room_id", "last_sent_ts", "throttle_ms"],
-            desc="get_throttle_params_by_room",
-        )
-
-        params_by_room = {}
-        for row in res:
-            params_by_room[row["room_id"]] = {
-                "last_sent_ts": row["last_sent_ts"],
-                "throttle_ms": row["throttle_ms"],
-            }
-
-        return params_by_room
-
-    @defer.inlineCallbacks
-    def set_throttle_params(self, pusher_id, room_id, params):
-        # no need to lock because `pusher_throttle` has a primary key on
-        # (pusher, room_id) so simple_upsert will retry
-        yield self.db.simple_upsert(
-            "pusher_throttle",
-            {"pusher": pusher_id, "room_id": room_id},
-            params,
-            desc="set_throttle_params",
-            lock=False,
-        )
diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py
index 8bdbc608a9..160e55aca9 100644
--- a/tests/app/test_frontend_proxy.py
+++ b/tests/app/test_frontend_proxy.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.app.frontend_proxy import FrontendProxyServer
+from synapse.app.generic_worker import GenericWorkerServer
 
 from tests.unittest import HomeserverTestCase
 
@@ -22,7 +22,7 @@ class FrontendProxyTests(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
 
         hs = self.setup_test_homeserver(
-            http_client=None, homeserverToUse=FrontendProxyServer
+            http_client=None, homeserverToUse=GenericWorkerServer
         )
 
         return hs
@@ -46,9 +46,7 @@ class FrontendProxyTests(HomeserverTestCase):
         # Grab the resource from the site that was told to listen
         self.assertEqual(len(self.reactor.tcpServers), 1)
         site = self.reactor.tcpServers[0][1]
-        self.resource = (
-            site.resource.children[b"_matrix"].children[b"client"].children[b"r0"]
-        )
+        self.resource = site.resource.children[b"_matrix"].children[b"client"]
 
         request, channel = self.make_request("PUT", "presence/a/status")
         self.render(request)
@@ -76,9 +74,7 @@ class FrontendProxyTests(HomeserverTestCase):
         # Grab the resource from the site that was told to listen
         self.assertEqual(len(self.reactor.tcpServers), 1)
         site = self.reactor.tcpServers[0][1]
-        self.resource = (
-            site.resource.children[b"_matrix"].children[b"client"].children[b"r0"]
-        )
+        self.resource = site.resource.children[b"_matrix"].children[b"client"]
 
         request, channel = self.make_request("PUT", "presence/a/status")
         self.render(request)
diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py
index 48792d1480..1fe048048b 100644
--- a/tests/app/test_openid_listener.py
+++ b/tests/app/test_openid_listener.py
@@ -16,7 +16,7 @@ from mock import Mock, patch
 
 from parameterized import parameterized
 
-from synapse.app.federation_reader import FederationReaderServer
+from synapse.app.generic_worker import GenericWorkerServer
 from synapse.app.homeserver import SynapseHomeServer
 
 from tests.unittest import HomeserverTestCase
@@ -25,7 +25,7 @@ from tests.unittest import HomeserverTestCase
 class FederationReaderOpenIDListenerTests(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
         hs = self.setup_test_homeserver(
-            http_client=None, homeserverToUse=FederationReaderServer
+            http_client=None, homeserverToUse=GenericWorkerServer
         )
         return hs
 
-- 
cgit 1.4.1


From e66f099ca952ef47944c7bba3fd942f98245d39f Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 25 Feb 2020 17:46:00 +0000
Subject: Sanity-check database before running upgrades (#6982)

Some of the database deltas rely on `config.server_name` being set correctly,
so we should check that it is before running the deltas.

Fixes #6870.
---
 changelog.d/6982.feature                     |  1 +
 synapse/storage/data_stores/main/__init__.py | 34 ++++++++++++++++------------
 synapse/storage/prepare_database.py          | 15 ++++++++++--
 3 files changed, 33 insertions(+), 17 deletions(-)
 create mode 100644 changelog.d/6982.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6982.feature b/changelog.d/6982.feature
new file mode 100644
index 0000000000..934cc5141a
--- /dev/null
+++ b/changelog.d/6982.feature
@@ -0,0 +1 @@
+Check that server_name is correctly set before running database updates.
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index 2700cca822..acca079f23 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -20,6 +20,7 @@ import logging
 import time
 
 from synapse.api.constants import PresenceState
+from synapse.config.homeserver import HomeServerConfig
 from synapse.storage.database import Database
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import (
@@ -117,16 +118,6 @@ class DataStore(
         self._clock = hs.get_clock()
         self.database_engine = database.engine
 
-        all_users_native = are_all_users_on_domain(
-            db_conn.cursor(), database.engine, hs.hostname
-        )
-        if not all_users_native:
-            raise Exception(
-                "Found users in database not native to %s!\n"
-                "You cannot changed a synapse server_name after it's been configured"
-                % (hs.hostname,)
-            )
-
         self._stream_id_gen = StreamIdGenerator(
             db_conn,
             "events",
@@ -567,13 +558,26 @@ class DataStore(
         )
 
 
-def are_all_users_on_domain(txn, database_engine, domain):
+def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig):
+    """Called before upgrading an existing database to check that it is broadly sane
+    compared with the configuration.
+    """
+    domain = config.server_name
+
     sql = database_engine.convert_param_style(
         "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?"
     )
     pat = "%:" + domain
-    txn.execute(sql, (pat,))
-    num_not_matching = txn.fetchall()[0][0]
+    cur.execute(sql, (pat,))
+    num_not_matching = cur.fetchall()[0][0]
     if num_not_matching == 0:
-        return True
-    return False
+        return
+
+    raise Exception(
+        "Found users in database not native to %s!\n"
+        "You cannot changed a synapse server_name after it's been configured"
+        % (domain,)
+    )
+
+
+__all__ = ["DataStore", "check_database_before_upgrade"]
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index fc69c32a0a..6cb7d4b922 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -278,13 +278,17 @@ def _upgrade_existing_database(
             the current_version wasn't generated by applying those delta files.
         database_engine (DatabaseEngine)
         config (synapse.config.homeserver.HomeServerConfig|None):
-            application config, or None if we are connecting to an existing
-            database which we expect to be configured already
+            None if we are initialising a blank database, otherwise the application
+            config
         data_stores (list[str]): The names of the data stores to instantiate
             on the given database.
         is_empty (bool): Is this a blank database? I.e. do we need to run the
             upgrade portions of the delta scripts.
     """
+    if is_empty:
+        assert not applied_delta_files
+    else:
+        assert config
 
     if current_version > SCHEMA_VERSION:
         raise ValueError(
@@ -292,6 +296,13 @@ def _upgrade_existing_database(
             + "new for the server to understand"
         )
 
+    # some of the deltas assume that config.server_name is set correctly, so now
+    # is a good time to run the sanity check.
+    if not is_empty and "main" in data_stores:
+        from synapse.storage.data_stores.main import check_database_before_upgrade
+
+        check_database_before_upgrade(cur, database_engine, config)
+
     start_ver = current_version
     if not upgraded:
         start_ver += 1
-- 
cgit 1.4.1


From 8c75b621bfe03725cc8da071516ebc66d3872760 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 26 Feb 2020 12:22:55 +0000
Subject: Ensure 'deactivated' parameter is a boolean on user admin API, Fix
 error handling of call to deactivate user (#6990)

---
 changelog.d/6990.bugfix         |  1 +
 synapse/rest/admin/users.py     | 11 +++++---
 synapse/rest/client/v1/login.py |  1 +
 tests/rest/admin/test_user.py   | 59 +++++++++++++++++++++++++++++++++++++++++
 4 files changed, 68 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/6990.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6990.bugfix b/changelog.d/6990.bugfix
new file mode 100644
index 0000000000..8c1c48f4d4
--- /dev/null
+++ b/changelog.d/6990.bugfix
@@ -0,0 +1 @@
+Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API.
\ No newline at end of file
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 2107b5dc56..c5b461a236 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -228,13 +228,16 @@ class UserRestServletV2(RestServlet):
                     )
 
             if "deactivated" in body:
-                deactivate = bool(body["deactivated"])
+                deactivate = body["deactivated"]
+                if not isinstance(deactivate, bool):
+                    raise SynapseError(
+                        400, "'deactivated' parameter is not of type boolean"
+                    )
+
                 if deactivate and not user["deactivated"]:
-                    result = await self.deactivate_account_handler.deactivate_account(
+                    await self.deactivate_account_handler.deactivate_account(
                         target_user.to_string(), False
                     )
-                    if not result:
-                        raise SynapseError(500, "Could not deactivate user")
 
             user = await self.admin_handler.get_user(target_user)
             return 200, user
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 1294e080dc..2c99536678 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -599,6 +599,7 @@ class SSOAuthHandler(object):
         redirect_url = self._add_login_token_to_redirect_url(
             client_redirect_url, login_token
         )
+        # Load page
         request.redirect(redirect_url)
         finish_request(request)
 
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 490ce8f55d..cbe4a6a51f 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -507,3 +507,62 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(1, channel.json_body["admin"])
         self.assertEqual(0, channel.json_body["is_guest"])
         self.assertEqual(1, channel.json_body["deactivated"])
+
+    def test_accidental_deactivation_prevention(self):
+        """
+        Ensure an account can't accidentally be deactivated by using a str value
+        for the deactivated body parameter
+        """
+        self.hs.config.registration_shared_secret = None
+
+        # Create user
+        body = json.dumps({"password": "abc123"})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("bob", channel.json_body["displayname"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("bob", channel.json_body["displayname"])
+        self.assertEqual(0, channel.json_body["deactivated"])
+
+        # Change password (and use a str for deactivate instead of a bool)
+        body = json.dumps({"password": "abc123", "deactivated": "false"})  # oops!
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Check user is not deactivated
+        request, channel = self.make_request(
+            "GET", self.url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("bob", channel.json_body["displayname"])
+
+        # Ensure they're still alive
+        self.assertEqual(0, channel.json_body["deactivated"])
-- 
cgit 1.4.1


From 7728d87fd7e1af17dd6b0c619cbfecb1fadb624f Mon Sep 17 00:00:00 2001
From: Uday Bansal <43824981+udaybansal19@users.noreply.github.com>
Date: Wed, 26 Feb 2020 20:47:03 +0530
Subject: Updated warning for incorrect database collation/ctype (#6985)

Signed-off-by: Uday Bansal <43824981+udaybansal19@users.noreply.github.com>
---
 changelog.d/6985.misc               |  1 +
 synapse/storage/engines/postgres.py | 10 +++++++---
 2 files changed, 8 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/6985.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6985.misc b/changelog.d/6985.misc
new file mode 100644
index 0000000000..ba367fa9af
--- /dev/null
+++ b/changelog.d/6985.misc
@@ -0,0 +1 @@
+Update warning for incorrect database collation/ctype to include link to documentation.
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index a077345960..53b3f372b0 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -53,7 +53,7 @@ class PostgresEngine(object):
             if rows and rows[0][0] != "UTF8":
                 raise IncorrectDatabaseSetup(
                     "Database has incorrect encoding: '%s' instead of 'UTF8'\n"
-                    "See docs/postgres.rst for more information." % (rows[0][0],)
+                    "See docs/postgres.md for more information." % (rows[0][0],)
                 )
 
             txn.execute(
@@ -62,12 +62,16 @@ class PostgresEngine(object):
             collation, ctype = txn.fetchone()
             if collation != "C":
                 logger.warning(
-                    "Database has incorrect collation of %r. Should be 'C'", collation
+                    "Database has incorrect collation of %r. Should be 'C'\n"
+                    "See docs/postgres.md for more information.",
+                    collation,
                 )
 
             if ctype != "C":
                 logger.warning(
-                    "Database has incorrect ctype of %r. Should be 'C'", ctype
+                    "Database has incorrect ctype of %r. Should be 'C'\n"
+                    "See docs/postgres.md for more information.",
+                    ctype,
                 )
 
     def check_new_database(self, txn):
-- 
cgit 1.4.1


From 1f773eec912e4908ab60f7823f5c0a024261af4d Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 26 Feb 2020 15:33:26 +0000
Subject: Port PresenceHandler to async/await (#6991)

---
 changelog.d/6991.misc               |   1 +
 synapse/handlers/message.py         |   5 +-
 synapse/handlers/presence.py        | 192 ++++++++++++++++--------------------
 synapse/replication/tcp/resource.py |   6 +-
 synapse/server.pyi                  |   5 +
 tests/handlers/test_presence.py     |  18 ++--
 tox.ini                             |   1 +
 7 files changed, 113 insertions(+), 115 deletions(-)
 create mode 100644 changelog.d/6991.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6991.misc b/changelog.d/6991.misc
new file mode 100644
index 0000000000..5130f4e8af
--- /dev/null
+++ b/changelog.d/6991.misc
@@ -0,0 +1 @@
+Port `synapse.handlers.presence` to async/await.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index d6be280952..a0103addd3 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1016,11 +1016,10 @@ class EventCreationHandler(object):
             # matters as sometimes presence code can take a while.
             run_in_background(self._bump_active_time, requester.user)
 
-    @defer.inlineCallbacks
-    def _bump_active_time(self, user):
+    async def _bump_active_time(self, user):
         try:
             presence = self.hs.get_presence_handler()
-            yield presence.bump_presence_active_time(user)
+            await presence.bump_presence_active_time(user)
         except Exception:
             logger.exception("Error bumping presence active time")
 
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 0d6cf2b008..5526015ddb 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -24,11 +24,12 @@ The methods that define policy are:
 
 import logging
 from contextlib import contextmanager
-from typing import Dict, Set
+from typing import Dict, List, Set
 
 from six import iteritems, itervalues
 
 from prometheus_client import Counter
+from typing_extensions import ContextManager
 
 from twisted.internet import defer
 
@@ -42,10 +43,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.presence import UserPresenceState
 from synapse.types import UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 from synapse.util.metrics import Measure
 from synapse.util.wheel_timer import WheelTimer
 
+MYPY = False
+if MYPY:
+    import synapse.server
+
 logger = logging.getLogger(__name__)
 
 
@@ -97,7 +102,6 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
 class PresenceHandler(object):
     def __init__(self, hs: "synapse.server.HomeServer"):
         self.hs = hs
-        self.is_mine = hs.is_mine
         self.is_mine_id = hs.is_mine_id
         self.server_name = hs.hostname
         self.clock = hs.get_clock()
@@ -150,7 +154,7 @@ class PresenceHandler(object):
 
         # Set of users who have presence in the `user_to_current_state` that
         # have not yet been persisted
-        self.unpersisted_users_changes = set()
+        self.unpersisted_users_changes = set()  # type: Set[str]
 
         hs.get_reactor().addSystemEventTrigger(
             "before",
@@ -160,12 +164,11 @@ class PresenceHandler(object):
             self._on_shutdown,
         )
 
-        self.serial_to_user = {}
         self._next_serial = 1
 
         # Keeps track of the number of *ongoing* syncs on this process. While
         # this is non zero a user will never go offline.
-        self.user_to_num_current_syncs = {}
+        self.user_to_num_current_syncs = {}  # type: Dict[str, int]
 
         # Keeps track of the number of *ongoing* syncs on other processes.
         # While any sync is ongoing on another process the user will never
@@ -213,8 +216,7 @@ class PresenceHandler(object):
         self._event_pos = self.store.get_current_events_token()
         self._event_processing = False
 
-    @defer.inlineCallbacks
-    def _on_shutdown(self):
+    async def _on_shutdown(self):
         """Gets called when shutting down. This lets us persist any updates that
         we haven't yet persisted, e.g. updates that only changes some internal
         timers. This allows changes to persist across startup without having to
@@ -235,7 +237,7 @@ class PresenceHandler(object):
 
         if self.unpersisted_users_changes:
 
-            yield self.store.update_presence(
+            await self.store.update_presence(
                 [
                     self.user_to_current_state[user_id]
                     for user_id in self.unpersisted_users_changes
@@ -243,8 +245,7 @@ class PresenceHandler(object):
             )
         logger.info("Finished _on_shutdown")
 
-    @defer.inlineCallbacks
-    def _persist_unpersisted_changes(self):
+    async def _persist_unpersisted_changes(self):
         """We periodically persist the unpersisted changes, as otherwise they
         may stack up and slow down shutdown times.
         """
@@ -253,12 +254,11 @@ class PresenceHandler(object):
 
         if unpersisted:
             logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
-            yield self.store.update_presence(
+            await self.store.update_presence(
                 [self.user_to_current_state[user_id] for user_id in unpersisted]
             )
 
-    @defer.inlineCallbacks
-    def _update_states(self, new_states):
+    async def _update_states(self, new_states):
         """Updates presence of users. Sets the appropriate timeouts. Pokes
         the notifier and federation if and only if the changed presence state
         should be sent to clients/servers.
@@ -267,7 +267,7 @@ class PresenceHandler(object):
 
         with Measure(self.clock, "presence_update_states"):
 
-            # NOTE: We purposefully don't yield between now and when we've
+            # NOTE: We purposefully don't await between now and when we've
             # calculated what we want to do with the new states, to avoid races.
 
             to_notify = {}  # Changes we want to notify everyone about
@@ -311,7 +311,7 @@ class PresenceHandler(object):
 
             if to_notify:
                 notified_presence_counter.inc(len(to_notify))
-                yield self._persist_and_notify(list(to_notify.values()))
+                await self._persist_and_notify(list(to_notify.values()))
 
             self.unpersisted_users_changes |= {s.user_id for s in new_states}
             self.unpersisted_users_changes -= set(to_notify.keys())
@@ -326,7 +326,7 @@ class PresenceHandler(object):
 
                 self._push_to_remotes(to_federation_ping.values())
 
-    def _handle_timeouts(self):
+    async def _handle_timeouts(self):
         """Checks the presence of users that have timed out and updates as
         appropriate.
         """
@@ -368,10 +368,9 @@ class PresenceHandler(object):
             now=now,
         )
 
-        return self._update_states(changes)
+        return await self._update_states(changes)
 
-    @defer.inlineCallbacks
-    def bump_presence_active_time(self, user):
+    async def bump_presence_active_time(self, user):
         """We've seen the user do something that indicates they're interacting
         with the app.
         """
@@ -383,16 +382,17 @@ class PresenceHandler(object):
 
         bump_active_time_counter.inc()
 
-        prev_state = yield self.current_state_for_user(user_id)
+        prev_state = await self.current_state_for_user(user_id)
 
         new_fields = {"last_active_ts": self.clock.time_msec()}
         if prev_state.state == PresenceState.UNAVAILABLE:
             new_fields["state"] = PresenceState.ONLINE
 
-        yield self._update_states([prev_state.copy_and_replace(**new_fields)])
+        await self._update_states([prev_state.copy_and_replace(**new_fields)])
 
-    @defer.inlineCallbacks
-    def user_syncing(self, user_id, affect_presence=True):
+    async def user_syncing(
+        self, user_id: str, affect_presence: bool = True
+    ) -> ContextManager[None]:
         """Returns a context manager that should surround any stream requests
         from the user.
 
@@ -415,11 +415,11 @@ class PresenceHandler(object):
             curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
             self.user_to_num_current_syncs[user_id] = curr_sync + 1
 
-            prev_state = yield self.current_state_for_user(user_id)
+            prev_state = await self.current_state_for_user(user_id)
             if prev_state.state == PresenceState.OFFLINE:
                 # If they're currently offline then bring them online, otherwise
                 # just update the last sync times.
-                yield self._update_states(
+                await self._update_states(
                     [
                         prev_state.copy_and_replace(
                             state=PresenceState.ONLINE,
@@ -429,7 +429,7 @@ class PresenceHandler(object):
                     ]
                 )
             else:
-                yield self._update_states(
+                await self._update_states(
                     [
                         prev_state.copy_and_replace(
                             last_user_sync_ts=self.clock.time_msec()
@@ -437,13 +437,12 @@ class PresenceHandler(object):
                     ]
                 )
 
-        @defer.inlineCallbacks
-        def _end():
+        async def _end():
             try:
                 self.user_to_num_current_syncs[user_id] -= 1
 
-                prev_state = yield self.current_state_for_user(user_id)
-                yield self._update_states(
+                prev_state = await self.current_state_for_user(user_id)
+                await self._update_states(
                     [
                         prev_state.copy_and_replace(
                             last_user_sync_ts=self.clock.time_msec()
@@ -480,8 +479,7 @@ class PresenceHandler(object):
         else:
             return set()
 
-    @defer.inlineCallbacks
-    def update_external_syncs_row(
+    async def update_external_syncs_row(
         self, process_id, user_id, is_syncing, sync_time_msec
     ):
         """Update the syncing users for an external process as a delta.
@@ -494,8 +492,8 @@ class PresenceHandler(object):
             is_syncing (bool): Whether or not the user is now syncing
             sync_time_msec(int): Time in ms when the user was last syncing
         """
-        with (yield self.external_sync_linearizer.queue(process_id)):
-            prev_state = yield self.current_state_for_user(user_id)
+        with (await self.external_sync_linearizer.queue(process_id)):
+            prev_state = await self.current_state_for_user(user_id)
 
             process_presence = self.external_process_to_current_syncs.setdefault(
                 process_id, set()
@@ -525,25 +523,24 @@ class PresenceHandler(object):
                 process_presence.discard(user_id)
 
             if updates:
-                yield self._update_states(updates)
+                await self._update_states(updates)
 
             self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
 
-    @defer.inlineCallbacks
-    def update_external_syncs_clear(self, process_id):
+    async def update_external_syncs_clear(self, process_id):
         """Marks all users that had been marked as syncing by a given process
         as offline.
 
         Used when the process has stopped/disappeared.
         """
-        with (yield self.external_sync_linearizer.queue(process_id)):
+        with (await self.external_sync_linearizer.queue(process_id)):
             process_presence = self.external_process_to_current_syncs.pop(
                 process_id, set()
             )
-            prev_states = yield self.current_state_for_users(process_presence)
+            prev_states = await self.current_state_for_users(process_presence)
             time_now_ms = self.clock.time_msec()
 
-            yield self._update_states(
+            await self._update_states(
                 [
                     prev_state.copy_and_replace(last_user_sync_ts=time_now_ms)
                     for prev_state in itervalues(prev_states)
@@ -551,15 +548,13 @@ class PresenceHandler(object):
             )
             self.external_process_last_updated_ms.pop(process_id, None)
 
-    @defer.inlineCallbacks
-    def current_state_for_user(self, user_id):
+    async def current_state_for_user(self, user_id):
         """Get the current presence state for a user.
         """
-        res = yield self.current_state_for_users([user_id])
+        res = await self.current_state_for_users([user_id])
         return res[user_id]
 
-    @defer.inlineCallbacks
-    def current_state_for_users(self, user_ids):
+    async def current_state_for_users(self, user_ids):
         """Get the current presence state for multiple users.
 
         Returns:
@@ -574,7 +569,7 @@ class PresenceHandler(object):
         if missing:
             # There are things not in our in memory cache. Lets pull them out of
             # the database.
-            res = yield self.store.get_presence_for_users(missing)
+            res = await self.store.get_presence_for_users(missing)
             states.update(res)
 
             missing = [user_id for user_id, state in iteritems(states) if not state]
@@ -587,14 +582,13 @@ class PresenceHandler(object):
 
         return states
 
-    @defer.inlineCallbacks
-    def _persist_and_notify(self, states):
+    async def _persist_and_notify(self, states):
         """Persist states in the database, poke the notifier and send to
         interested remote servers
         """
-        stream_id, max_token = yield self.store.update_presence(states)
+        stream_id, max_token = await self.store.update_presence(states)
 
-        parties = yield get_interested_parties(self.store, states)
+        parties = await get_interested_parties(self.store, states)
         room_ids_to_states, users_to_states = parties
 
         self.notifier.on_new_event(
@@ -606,9 +600,8 @@ class PresenceHandler(object):
 
         self._push_to_remotes(states)
 
-    @defer.inlineCallbacks
-    def notify_for_states(self, state, stream_id):
-        parties = yield get_interested_parties(self.store, [state])
+    async def notify_for_states(self, state, stream_id):
+        parties = await get_interested_parties(self.store, [state])
         room_ids_to_states, users_to_states = parties
 
         self.notifier.on_new_event(
@@ -626,8 +619,7 @@ class PresenceHandler(object):
         """
         self.federation.send_presence(states)
 
-    @defer.inlineCallbacks
-    def incoming_presence(self, origin, content):
+    async def incoming_presence(self, origin, content):
         """Called when we receive a `m.presence` EDU from a remote server.
         """
         now = self.clock.time_msec()
@@ -670,21 +662,19 @@ class PresenceHandler(object):
             new_fields["status_msg"] = push.get("status_msg", None)
             new_fields["currently_active"] = push.get("currently_active", False)
 
-            prev_state = yield self.current_state_for_user(user_id)
+            prev_state = await self.current_state_for_user(user_id)
             updates.append(prev_state.copy_and_replace(**new_fields))
 
         if updates:
             federation_presence_counter.inc(len(updates))
-            yield self._update_states(updates)
+            await self._update_states(updates)
 
-    @defer.inlineCallbacks
-    def get_state(self, target_user, as_event=False):
-        results = yield self.get_states([target_user.to_string()], as_event=as_event)
+    async def get_state(self, target_user, as_event=False):
+        results = await self.get_states([target_user.to_string()], as_event=as_event)
 
         return results[0]
 
-    @defer.inlineCallbacks
-    def get_states(self, target_user_ids, as_event=False):
+    async def get_states(self, target_user_ids, as_event=False):
         """Get the presence state for users.
 
         Args:
@@ -695,7 +685,7 @@ class PresenceHandler(object):
             list
         """
 
-        updates = yield self.current_state_for_users(target_user_ids)
+        updates = await self.current_state_for_users(target_user_ids)
         updates = list(updates.values())
 
         for user_id in set(target_user_ids) - {u.user_id for u in updates}:
@@ -713,8 +703,7 @@ class PresenceHandler(object):
         else:
             return updates
 
-    @defer.inlineCallbacks
-    def set_state(self, target_user, state, ignore_status_msg=False):
+    async def set_state(self, target_user, state, ignore_status_msg=False):
         """Set the presence state of the user.
         """
         status_msg = state.get("status_msg", None)
@@ -730,7 +719,7 @@ class PresenceHandler(object):
 
         user_id = target_user.to_string()
 
-        prev_state = yield self.current_state_for_user(user_id)
+        prev_state = await self.current_state_for_user(user_id)
 
         new_fields = {"state": presence}
 
@@ -741,16 +730,15 @@ class PresenceHandler(object):
         if presence == PresenceState.ONLINE:
             new_fields["last_active_ts"] = self.clock.time_msec()
 
-        yield self._update_states([prev_state.copy_and_replace(**new_fields)])
+        await self._update_states([prev_state.copy_and_replace(**new_fields)])
 
-    @defer.inlineCallbacks
-    def is_visible(self, observed_user, observer_user):
+    async def is_visible(self, observed_user, observer_user):
         """Returns whether a user can see another user's presence.
         """
-        observer_room_ids = yield self.store.get_rooms_for_user(
+        observer_room_ids = await self.store.get_rooms_for_user(
             observer_user.to_string()
         )
-        observed_room_ids = yield self.store.get_rooms_for_user(
+        observed_room_ids = await self.store.get_rooms_for_user(
             observed_user.to_string()
         )
 
@@ -759,8 +747,7 @@ class PresenceHandler(object):
 
         return False
 
-    @defer.inlineCallbacks
-    def get_all_presence_updates(self, last_id, current_id):
+    async def get_all_presence_updates(self, last_id, current_id):
         """
         Gets a list of presence update rows from between the given stream ids.
         Each row has:
@@ -775,7 +762,7 @@ class PresenceHandler(object):
         """
         # TODO(markjh): replicate the unpersisted changes.
         # This could use the in-memory stores for recent changes.
-        rows = yield self.store.get_all_presence_updates(last_id, current_id)
+        rows = await self.store.get_all_presence_updates(last_id, current_id)
         return rows
 
     def notify_new_event(self):
@@ -786,20 +773,18 @@ class PresenceHandler(object):
         if self._event_processing:
             return
 
-        @defer.inlineCallbacks
-        def _process_presence():
+        async def _process_presence():
             assert not self._event_processing
 
             self._event_processing = True
             try:
-                yield self._unsafe_process()
+                await self._unsafe_process()
             finally:
                 self._event_processing = False
 
         run_as_background_process("presence.notify_new_event", _process_presence)
 
-    @defer.inlineCallbacks
-    def _unsafe_process(self):
+    async def _unsafe_process(self):
         # Loop round handling deltas until we're up to date
         while True:
             with Measure(self.clock, "presence_delta"):
@@ -812,10 +797,10 @@ class PresenceHandler(object):
                     self._event_pos,
                     room_max_stream_ordering,
                 )
-                max_pos, deltas = yield self.store.get_current_state_deltas(
+                max_pos, deltas = await self.store.get_current_state_deltas(
                     self._event_pos, room_max_stream_ordering
                 )
-                yield self._handle_state_delta(deltas)
+                await self._handle_state_delta(deltas)
 
                 self._event_pos = max_pos
 
@@ -824,8 +809,7 @@ class PresenceHandler(object):
                     max_pos
                 )
 
-    @defer.inlineCallbacks
-    def _handle_state_delta(self, deltas):
+    async def _handle_state_delta(self, deltas):
         """Process current state deltas to find new joins that need to be
         handled.
         """
@@ -846,13 +830,13 @@ class PresenceHandler(object):
                 # joins.
                 continue
 
-            event = yield self.store.get_event(event_id, allow_none=True)
+            event = await self.store.get_event(event_id, allow_none=True)
             if not event or event.content.get("membership") != Membership.JOIN:
                 # We only care about joins
                 continue
 
             if prev_event_id:
-                prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+                prev_event = await self.store.get_event(prev_event_id, allow_none=True)
                 if (
                     prev_event
                     and prev_event.content.get("membership") == Membership.JOIN
@@ -860,10 +844,9 @@ class PresenceHandler(object):
                     # Ignore changes to join events.
                     continue
 
-            yield self._on_user_joined_room(room_id, state_key)
+            await self._on_user_joined_room(room_id, state_key)
 
-    @defer.inlineCallbacks
-    def _on_user_joined_room(self, room_id, user_id):
+    async def _on_user_joined_room(self, room_id, user_id):
         """Called when we detect a user joining the room via the current state
         delta stream.
 
@@ -882,8 +865,8 @@ class PresenceHandler(object):
             # TODO: We should be able to filter the hosts down to those that
             # haven't previously seen the user
 
-            state = yield self.current_state_for_user(user_id)
-            hosts = yield self.state.get_current_hosts_in_room(room_id)
+            state = await self.current_state_for_user(user_id)
+            hosts = await self.state.get_current_hosts_in_room(room_id)
 
             # Filter out ourselves.
             hosts = {host for host in hosts if host != self.server_name}
@@ -903,10 +886,10 @@ class PresenceHandler(object):
             # TODO: Check that this is actually a new server joining the
             # room.
 
-            user_ids = yield self.state.get_current_users_in_room(room_id)
+            user_ids = await self.state.get_current_users_in_room(room_id)
             user_ids = list(filter(self.is_mine_id, user_ids))
 
-            states = yield self.current_state_for_users(user_ids)
+            states = await self.current_state_for_users(user_ids)
 
             # Filter out old presence, i.e. offline presence states where
             # the user hasn't been active for a week. We can change this
@@ -996,9 +979,8 @@ class PresenceEventSource(object):
         self.store = hs.get_datastore()
         self.state = hs.get_state_handler()
 
-    @defer.inlineCallbacks
     @log_function
-    def get_new_events(
+    async def get_new_events(
         self,
         user,
         from_key,
@@ -1045,7 +1027,7 @@ class PresenceEventSource(object):
             presence = self.get_presence_handler()
             stream_change_cache = self.store.presence_stream_cache
 
-            users_interested_in = yield self._get_interested_in(user, explicit_room_id)
+            users_interested_in = await self._get_interested_in(user, explicit_room_id)
 
             user_ids_changed = set()
             changed = None
@@ -1071,7 +1053,7 @@ class PresenceEventSource(object):
                 else:
                     user_ids_changed = users_interested_in
 
-            updates = yield presence.current_state_for_users(user_ids_changed)
+            updates = await presence.current_state_for_users(user_ids_changed)
 
         if include_offline:
             return (list(updates.values()), max_token)
@@ -1084,11 +1066,11 @@ class PresenceEventSource(object):
     def get_current_key(self):
         return self.store.get_current_presence_token()
 
-    def get_pagination_rows(self, user, pagination_config, key):
-        return self.get_new_events(user, from_key=None, include_offline=False)
+    async def get_pagination_rows(self, user, pagination_config, key):
+        return await self.get_new_events(user, from_key=None, include_offline=False)
 
-    @cachedInlineCallbacks(num_args=2, cache_context=True)
-    def _get_interested_in(self, user, explicit_room_id, cache_context):
+    @cached(num_args=2, cache_context=True)
+    async def _get_interested_in(self, user, explicit_room_id, cache_context):
         """Returns the set of users that the given user should see presence
         updates for
         """
@@ -1096,13 +1078,13 @@ class PresenceEventSource(object):
         users_interested_in = set()
         users_interested_in.add(user_id)  # So that we receive our own presence
 
-        users_who_share_room = yield self.store.get_users_who_share_room_with_user(
+        users_who_share_room = await self.store.get_users_who_share_room_with_user(
             user_id, on_invalidate=cache_context.invalidate
         )
         users_interested_in.update(users_who_share_room)
 
         if explicit_room_id:
-            user_ids = yield self.store.get_users_in_room(
+            user_ids = await self.store.get_users_in_room(
                 explicit_room_id, on_invalidate=cache_context.invalidate
             )
             users_interested_in.update(user_ids)
@@ -1277,8 +1259,8 @@ def get_interested_parties(store, states):
         2-tuple: `(room_ids_to_states, users_to_states)`,
         with each item being a dict of `entity_name` -> `[UserPresenceState]`
     """
-    room_ids_to_states = {}
-    users_to_states = {}
+    room_ids_to_states = {}  # type: Dict[str, List[UserPresenceState]]
+    users_to_states = {}  # type: Dict[str, List[UserPresenceState]]
     for state in states:
         room_ids = yield store.get_rooms_for_user(state.user_id)
         for room_id in room_ids:
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index ce60ae2e07..ce9d1fae12 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -323,7 +323,11 @@ class ReplicationStreamer(object):
 
         # We need to tell the presence handler that the connection has been
         # lost so that it can handle any ongoing syncs on that connection.
-        self.presence_handler.update_external_syncs_clear(connection.conn_id)
+        run_as_background_process(
+            "update_external_syncs_clear",
+            self.presence_handler.update_external_syncs_clear,
+            connection.conn_id,
+        )
 
 
 def _batch_updates(updates):
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 40eabfe5d9..3844f0e12f 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -3,6 +3,7 @@ import twisted.internet
 import synapse.api.auth
 import synapse.config.homeserver
 import synapse.crypto.keyring
+import synapse.federation.federation_server
 import synapse.federation.sender
 import synapse.federation.transport.client
 import synapse.handlers
@@ -107,5 +108,9 @@ class HomeServer(object):
         self,
     ) -> synapse.replication.tcp.client.ReplicationClientHandler:
         pass
+    def get_federation_registry(
+        self,
+    ) -> synapse.federation.federation_server.FederationHandlerRegistry:
+        pass
     def is_mine_id(self, domain_id: str) -> bool:
         pass
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 64915bafcd..05ea40a7de 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -494,8 +494,10 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
         self.helper.join(room_id, "@test2:server")
 
         # Mark test2 as online, test will be offline with a last_active of 0
-        self.presence_handler.set_state(
-            UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
+        self.get_success(
+            self.presence_handler.set_state(
+                UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
+            )
         )
         self.reactor.pump([0])  # Wait for presence updates to be handled
 
@@ -543,14 +545,18 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
         room_id = self.helper.create_room_as(self.user_id)
 
         # Mark test as online
-        self.presence_handler.set_state(
-            UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE}
+        self.get_success(
+            self.presence_handler.set_state(
+                UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE}
+            )
         )
 
         # Mark test2 as online, test will be offline with a last_active of 0.
         # Note we don't join them to the room yet
-        self.presence_handler.set_state(
-            UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
+        self.get_success(
+            self.presence_handler.set_state(
+                UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
+            )
         )
 
         # Add servers to the room
diff --git a/tox.ini b/tox.ini
index b715ea0bff..4ccfde01b5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -183,6 +183,7 @@ commands = mypy \
             synapse/events/spamcheck.py \
             synapse/federation/sender \
             synapse/federation/transport \
+            synapse/handlers/presence.py \
             synapse/handlers/sync.py \
             synapse/handlers/ui_auth \
             synapse/logging/ \
-- 
cgit 1.4.1


From 380122866f8cf7b891c95f10a60c83537ef6c780 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Wed, 26 Feb 2020 11:32:13 -0500
Subject: Cast a coroutine into a Deferred in the federation base (#6996)

Properly convert a coroutine into a Deferred in federation_base to fix an error when joining a room.
---
 changelog.d/6996.bugfix               |  1 +
 synapse/federation/federation_base.py | 14 ++++++++------
 2 files changed, 9 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/6996.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6996.bugfix b/changelog.d/6996.bugfix
new file mode 100644
index 0000000000..765d376c7c
--- /dev/null
+++ b/changelog.d/6996.bugfix
@@ -0,0 +1 @@
+Fix bug which caused an error when joining a room, with `'coroutine' object has no attribute 'event_id'`.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index eea64c1c9f..9fff65716a 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -96,12 +96,14 @@ class FederationBase(object):
 
             if not res and pdu.origin != origin:
                 try:
-                    res = yield self.get_pdu(
-                        destinations=[pdu.origin],
-                        event_id=pdu.event_id,
-                        room_version=room_version,
-                        outlier=outlier,
-                        timeout=10000,
+                    res = yield defer.ensureDeferred(
+                        self.get_pdu(
+                            destinations=[pdu.origin],
+                            event_id=pdu.event_id,
+                            room_version=room_version,
+                            outlier=outlier,
+                            timeout=10000,
+                        )
                     )
                 except SynapseError:
                     pass
-- 
cgit 1.4.1


From 3e99528f2bfaa686c4708fb8efcddce935b2397d Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 26 Feb 2020 16:58:33 +0000
Subject: Store room version on invite (#6983)

When we get an invite over federation, store the room version in the rooms table.

The general idea here is that, when we pull the invite out again, we'll want to know what room_version it belongs to (so that we can later redact it if need be). So we need to store it somewhere...
---
 changelog.d/6983.misc                    |  1 +
 synapse/handlers/federation.py           | 12 +++++++++++
 synapse/replication/http/_base.py        |  2 +-
 synapse/replication/http/federation.py   | 36 +++++++++++++++++++++++++++++++-
 synapse/storage/data_stores/main/room.py | 20 ++++++++++++++++++
 tests/app/test_openid_listener.py        |  8 +++++++
 tests/handlers/test_typing.py            |  1 +
 7 files changed, 78 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6983.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6983.misc b/changelog.d/6983.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6983.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c2e6ee266d..38ab6a8fc3 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -60,6 +60,7 @@ from synapse.replication.http.devices import ReplicationUserDevicesResyncRestSer
 from synapse.replication.http.federation import (
     ReplicationCleanRoomRestServlet,
     ReplicationFederationSendEventsRestServlet,
+    ReplicationStoreRoomOnInviteRestServlet,
 )
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
@@ -160,8 +161,12 @@ class FederationHandler(BaseHandler):
             self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client(
                 hs
             )
+            self._maybe_store_room_on_invite = ReplicationStoreRoomOnInviteRestServlet.make_client(
+                hs
+            )
         else:
             self._device_list_updater = hs.get_device_handler().device_list_updater
+            self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite
 
         # When joining a room we need to queue any events for that room up
         self.room_queues = {}
@@ -1537,6 +1542,13 @@ class FederationHandler(BaseHandler):
         if event.state_key == self._server_notices_mxid:
             raise SynapseError(http_client.FORBIDDEN, "Cannot invite this user")
 
+        # keep a record of the room version, if we don't yet know it.
+        # (this may get overwritten if we later get a different room version in a
+        # join dance).
+        await self._maybe_store_room_on_invite(
+            room_id=event.room_id, room_version=room_version
+        )
+
         event.internal_metadata.outlier = True
         event.internal_metadata.out_of_band_membership = True
 
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 444eb7b7f4..1be1ccbdf3 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -44,7 +44,7 @@ class ReplicationEndpoint(object):
     """Helper base class for defining new replication HTTP endpoints.
 
     This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
-    (with an `/:txn_id` prefix for cached requests.), where NAME is a name,
+    (with a `/:txn_id` suffix for cached requests), where NAME is a name,
     PATH_ARGS are a tuple of parameters to be encoded in the URL.
 
     For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`,
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 49a3251372..8794720101 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -17,6 +17,7 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.events import event_type_from_format_version
 from synapse.events.snapshot import EventContext
 from synapse.http.servlet import parse_json_object_from_request
@@ -211,7 +212,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
 
     Request format:
 
-        POST /_synapse/replication/fed_query/:fed_cleanup_room/:txn_id
+        POST /_synapse/replication/fed_cleanup_room/:room_id/:txn_id
 
         {}
     """
@@ -238,8 +239,41 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
         return 200, {}
 
 
+class ReplicationStoreRoomOnInviteRestServlet(ReplicationEndpoint):
+    """Called to clean up any data in DB for a given room, ready for the
+    server to join the room.
+
+    Request format:
+
+        POST /_synapse/replication/store_room_on_invite/:room_id/:txn_id
+
+        {
+            "room_version": "1",
+        }
+    """
+
+    NAME = "store_room_on_invite"
+    PATH_ARGS = ("room_id",)
+
+    def __init__(self, hs):
+        super().__init__(hs)
+
+        self.store = hs.get_datastore()
+
+    @staticmethod
+    def _serialize_payload(room_id, room_version):
+        return {"room_version": room_version.identifier}
+
+    async def _handle_request(self, request, room_id):
+        content = parse_json_object_from_request(request)
+        room_version = KNOWN_ROOM_VERSIONS[content["room_version"]]
+        await self.store.maybe_store_room_on_invite(room_id, room_version)
+        return 200, {}
+
+
 def register_servlets(hs, http_server):
     ReplicationFederationSendEventsRestServlet(hs).register(http_server)
     ReplicationFederationSendEduRestServlet(hs).register(http_server)
     ReplicationGetQueryRestServlet(hs).register(http_server)
     ReplicationCleanRoomRestServlet(hs).register(http_server)
+    ReplicationStoreRoomOnInviteRestServlet(hs).register(http_server)
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 70137dfbe4..e6c10c6316 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -1020,6 +1020,26 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
             logger.error("store_room with room_id=%s failed: %s", room_id, e)
             raise StoreError(500, "Problem creating room.")
 
+    async def maybe_store_room_on_invite(self, room_id: str, room_version: RoomVersion):
+        """
+        When we receive an invite over federation, store the version of the room if we
+        don't already know the room version.
+        """
+        await self.db.simple_upsert(
+            desc="maybe_store_room_on_invite",
+            table="rooms",
+            keyvalues={"room_id": room_id},
+            values={},
+            insertion_values={
+                "room_version": room_version.identifier,
+                "is_public": False,
+                "creator": "",
+            },
+            # rooms has a unique constraint on room_id, so no need to lock when doing an
+            # emulated upsert.
+            lock=False,
+        )
+
     @defer.inlineCallbacks
     def set_room_is_public(self, room_id, is_public):
         def set_room_is_public_txn(txn, next_id):
diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py
index 1fe048048b..89fcc3889a 100644
--- a/tests/app/test_openid_listener.py
+++ b/tests/app/test_openid_listener.py
@@ -29,6 +29,14 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase):
         )
         return hs
 
+    def default_config(self, name="test"):
+        conf = super().default_config(name)
+        # we're using FederationReaderServer, which uses a SlavedStore, so we
+        # have to tell the FederationHandler not to try to access stuff that is only
+        # in the primary store.
+        conf["worker_app"] = "yes"
+        return conf
+
     @parameterized.expand(
         [
             (["federation"], "auth_fail"),
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 07b204666e..51e2b37218 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -74,6 +74,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
                 "set_received_txn_response",
                 "get_destination_retry_timings",
                 "get_devices_by_remote",
+                "maybe_store_room_on_invite",
                 # Bits that user_directory needs
                 "get_user_directory_stream_pos",
                 "get_current_state_deltas",
-- 
cgit 1.4.1


From 132b673dbefa42eb7669a11522426f26e225ac05 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 27 Feb 2020 11:53:40 +0000
Subject: Add some type annotations in `synapse.storage` (#6987)

I cracked, and added some type definitions in synapse.storage.
---
 changelog.d/6987.misc               |   1 +
 synapse/storage/database.py         | 143 +++++++++++++++++++++---------------
 synapse/storage/engines/__init__.py |  28 +++----
 synapse/storage/engines/_base.py    |  87 ++++++++++++++++++++++
 synapse/storage/engines/postgres.py |  12 +--
 synapse/storage/engines/sqlite.py   |  13 ++--
 synapse/storage/types.py            |  65 ++++++++++++++++
 tox.ini                             |   5 +-
 8 files changed, 270 insertions(+), 84 deletions(-)
 create mode 100644 changelog.d/6987.misc
 create mode 100644 synapse/storage/types.py

(limited to 'changelog.d')

diff --git a/changelog.d/6987.misc b/changelog.d/6987.misc
new file mode 100644
index 0000000000..7ff74cda55
--- /dev/null
+++ b/changelog.d/6987.misc
@@ -0,0 +1 @@
+Add some type annotations to the database storage classes.
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 1953614401..609db40616 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -15,9 +15,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-import sys
 import time
-from typing import Iterable, Tuple
+from time import monotonic as monotonic_time
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
 
 from six import iteritems, iterkeys, itervalues
 from six.moves import intern, range
@@ -32,24 +32,14 @@ from synapse.config.database import DatabaseConnectionConfig
 from synapse.logging.context import LoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.background_updates import BackgroundUpdater
-from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
+from synapse.storage.types import Connection, Cursor
 from synapse.util.stringutils import exception_to_unicode
 
-# import a function which will return a monotonic time, in seconds
-try:
-    # on python 3, use time.monotonic, since time.clock can go backwards
-    from time import monotonic as monotonic_time
-except ImportError:
-    # ... but python 2 doesn't have it
-    from time import clock as monotonic_time
-
 logger = logging.getLogger(__name__)
 
-try:
-    MAX_TXN_ID = sys.maxint - 1
-except AttributeError:
-    # python 3 does not have a maximum int value
-    MAX_TXN_ID = 2 ** 63 - 1
+# python 3 does not have a maximum int value
+MAX_TXN_ID = 2 ** 63 - 1
 
 sql_logger = logging.getLogger("synapse.storage.SQL")
 transaction_logger = logging.getLogger("synapse.storage.txn")
@@ -77,7 +67,7 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
 
 
 def make_pool(
-    reactor, db_config: DatabaseConnectionConfig, engine
+    reactor, db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine
 ) -> adbapi.ConnectionPool:
     """Get the connection pool for the database.
     """
@@ -90,7 +80,9 @@ def make_pool(
     )
 
 
-def make_conn(db_config: DatabaseConnectionConfig, engine):
+def make_conn(
+    db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine
+) -> Connection:
     """Make a new connection to the database and return it.
 
     Returns:
@@ -107,20 +99,27 @@ def make_conn(db_config: DatabaseConnectionConfig, engine):
     return db_conn
 
 
-class LoggingTransaction(object):
+# The type of entry which goes on our after_callbacks and exception_callbacks lists.
+#
+# Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so
+# that mypy sees the type but the runtime python doesn't.
+_CallbackListEntry = Tuple["Callable[..., None]", Iterable[Any], Dict[str, Any]]
+
+
+class LoggingTransaction:
     """An object that almost-transparently proxies for the 'txn' object
     passed to the constructor. Adds logging and metrics to the .execute()
     method.
 
     Args:
         txn: The database transcation object to wrap.
-        name (str): The name of this transactions for logging.
-        database_engine (Sqlite3Engine|PostgresEngine)
-        after_callbacks(list|None): A list that callbacks will be appended to
+        name: The name of this transactions for logging.
+        database_engine
+        after_callbacks: A list that callbacks will be appended to
             that have been added by `call_after` which should be run on
             successful completion of the transaction. None indicates that no
             callbacks should be allowed to be scheduled to run.
-        exception_callbacks(list|None): A list that callbacks will be appended
+        exception_callbacks: A list that callbacks will be appended
             to that have been added by `call_on_exception` which should be run
             if transaction ends with an error. None indicates that no callbacks
             should be allowed to be scheduled to run.
@@ -135,46 +134,67 @@ class LoggingTransaction(object):
     ]
 
     def __init__(
-        self, txn, name, database_engine, after_callbacks=None, exception_callbacks=None
+        self,
+        txn: Cursor,
+        name: str,
+        database_engine: BaseDatabaseEngine,
+        after_callbacks: Optional[List[_CallbackListEntry]] = None,
+        exception_callbacks: Optional[List[_CallbackListEntry]] = None,
     ):
-        object.__setattr__(self, "txn", txn)
-        object.__setattr__(self, "name", name)
-        object.__setattr__(self, "database_engine", database_engine)
-        object.__setattr__(self, "after_callbacks", after_callbacks)
-        object.__setattr__(self, "exception_callbacks", exception_callbacks)
+        self.txn = txn
+        self.name = name
+        self.database_engine = database_engine
+        self.after_callbacks = after_callbacks
+        self.exception_callbacks = exception_callbacks
 
-    def call_after(self, callback, *args, **kwargs):
+    def call_after(self, callback: "Callable[..., None]", *args, **kwargs):
         """Call the given callback on the main twisted thread after the
         transaction has finished. Used to invalidate the caches on the
         correct thread.
         """
+        # if self.after_callbacks is None, that means that whatever constructed the
+        # LoggingTransaction isn't expecting there to be any callbacks; assert that
+        # is not the case.
+        assert self.after_callbacks is not None
         self.after_callbacks.append((callback, args, kwargs))
 
-    def call_on_exception(self, callback, *args, **kwargs):
+    def call_on_exception(self, callback: "Callable[..., None]", *args, **kwargs):
+        # if self.exception_callbacks is None, that means that whatever constructed the
+        # LoggingTransaction isn't expecting there to be any callbacks; assert that
+        # is not the case.
+        assert self.exception_callbacks is not None
         self.exception_callbacks.append((callback, args, kwargs))
 
-    def __getattr__(self, name):
-        return getattr(self.txn, name)
+    def fetchall(self) -> List[Tuple]:
+        return self.txn.fetchall()
 
-    def __setattr__(self, name, value):
-        setattr(self.txn, name, value)
+    def fetchone(self) -> Tuple:
+        return self.txn.fetchone()
 
-    def __iter__(self):
+    def __iter__(self) -> Iterator[Tuple]:
         return self.txn.__iter__()
 
+    @property
+    def rowcount(self) -> int:
+        return self.txn.rowcount
+
+    @property
+    def description(self) -> Any:
+        return self.txn.description
+
     def execute_batch(self, sql, args):
         if isinstance(self.database_engine, PostgresEngine):
-            from psycopg2.extras import execute_batch
+            from psycopg2.extras import execute_batch  # type: ignore
 
             self._do_execute(lambda *x: execute_batch(self.txn, *x), sql, args)
         else:
             for val in args:
                 self.execute(sql, val)
 
-    def execute(self, sql, *args):
+    def execute(self, sql: str, *args: Any):
         self._do_execute(self.txn.execute, sql, *args)
 
-    def executemany(self, sql, *args):
+    def executemany(self, sql: str, *args: Any):
         self._do_execute(self.txn.executemany, sql, *args)
 
     def _make_sql_one_line(self, sql):
@@ -207,6 +227,9 @@ class LoggingTransaction(object):
             sql_logger.debug("[SQL time] {%s} %f sec", self.name, secs)
             sql_query_timer.labels(sql.split()[0]).observe(secs)
 
+    def close(self):
+        self.txn.close()
+
 
 class PerformanceCounters(object):
     def __init__(self):
@@ -251,7 +274,9 @@ class Database(object):
 
     _TXN_ID = 0
 
-    def __init__(self, hs, database_config: DatabaseConnectionConfig, engine):
+    def __init__(
+        self, hs, database_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine
+    ):
         self.hs = hs
         self._clock = hs.get_clock()
         self._database_config = database_config
@@ -259,9 +284,9 @@ class Database(object):
 
         self.updates = BackgroundUpdater(hs, self)
 
-        self._previous_txn_total_time = 0
-        self._current_txn_total_time = 0
-        self._previous_loop_ts = 0
+        self._previous_txn_total_time = 0.0
+        self._current_txn_total_time = 0.0
+        self._previous_loop_ts = 0.0
 
         # TODO(paul): These can eventually be removed once the metrics code
         #   is running in mainline, and we have some nice monitoring frontends
@@ -463,23 +488,23 @@ class Database(object):
             sql_txn_timer.labels(desc).observe(duration)
 
     @defer.inlineCallbacks
-    def runInteraction(self, desc, func, *args, **kwargs):
+    def runInteraction(self, desc: str, func: Callable, *args: Any, **kwargs: Any):
         """Starts a transaction on the database and runs a given function
 
         Arguments:
-            desc (str): description of the transaction, for logging and metrics
-            func (func): callback function, which will be called with a
+            desc: description of the transaction, for logging and metrics
+            func: callback function, which will be called with a
                 database transaction (twisted.enterprise.adbapi.Transaction) as
                 its first argument, followed by `args` and `kwargs`.
 
-            args (list): positional args to pass to `func`
-            kwargs (dict): named args to pass to `func`
+            args: positional args to pass to `func`
+            kwargs: named args to pass to `func`
 
         Returns:
             Deferred: The result of func
         """
-        after_callbacks = []
-        exception_callbacks = []
+        after_callbacks = []  # type: List[_CallbackListEntry]
+        exception_callbacks = []  # type: List[_CallbackListEntry]
 
         if LoggingContext.current_context() == LoggingContext.sentinel:
             logger.warning("Starting db txn '%s' from sentinel context", desc)
@@ -505,15 +530,15 @@ class Database(object):
         return result
 
     @defer.inlineCallbacks
-    def runWithConnection(self, func, *args, **kwargs):
+    def runWithConnection(self, func: Callable, *args: Any, **kwargs: Any):
         """Wraps the .runWithConnection() method on the underlying db_pool.
 
         Arguments:
-            func (func): callback function, which will be called with a
+            func: callback function, which will be called with a
                 database connection (twisted.enterprise.adbapi.Connection) as
                 its first argument, followed by `args` and `kwargs`.
-            args (list): positional args to pass to `func`
-            kwargs (dict): named args to pass to `func`
+            args: positional args to pass to `func`
+            kwargs: named args to pass to `func`
 
         Returns:
             Deferred: The result of func
@@ -800,7 +825,7 @@ class Database(object):
                 return False
 
         # We didn't find any existing rows, so insert a new one
-        allvalues = {}
+        allvalues = {}  # type: Dict[str, Any]
         allvalues.update(keyvalues)
         allvalues.update(values)
         allvalues.update(insertion_values)
@@ -829,7 +854,7 @@ class Database(object):
         Returns:
             None
         """
-        allvalues = {}
+        allvalues = {}  # type: Dict[str, Any]
         allvalues.update(keyvalues)
         allvalues.update(insertion_values)
 
@@ -916,7 +941,7 @@ class Database(object):
         Returns:
             None
         """
-        allnames = []
+        allnames = []  # type: List[str]
         allnames.extend(key_names)
         allnames.extend(value_names)
 
@@ -1100,7 +1125,7 @@ class Database(object):
             keyvalues : dict of column names and values to select the rows with
             retcols : list of strings giving the names of the columns to return
         """
-        results = []
+        results = []  # type: List[Dict[str, Any]]
 
         if not iterable:
             return results
@@ -1439,7 +1464,7 @@ class Database(object):
             raise ValueError("order_direction must be one of 'ASC' or 'DESC'.")
 
         where_clause = "WHERE " if filters or keyvalues else ""
-        arg_list = []
+        arg_list = []  # type: List[Any]
         if filters:
             where_clause += " AND ".join("%s LIKE ?" % (k,) for k in filters)
             arg_list += list(filters.values())
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index 9d2d519922..035f9ea6e9 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -12,29 +12,31 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-import importlib
 import platform
 
-from ._base import IncorrectDatabaseSetup
+from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup
 from .postgres import PostgresEngine
 from .sqlite import Sqlite3Engine
 
-SUPPORTED_MODULE = {"sqlite3": Sqlite3Engine, "psycopg2": PostgresEngine}
-
 
-def create_engine(database_config):
+def create_engine(database_config) -> BaseDatabaseEngine:
     name = database_config["name"]
-    engine_class = SUPPORTED_MODULE.get(name, None)
 
-    if engine_class:
+    if name == "sqlite3":
+        import sqlite3
+
+        return Sqlite3Engine(sqlite3, database_config)
+
+    if name == "psycopg2":
         # pypy requires psycopg2cffi rather than psycopg2
-        if name == "psycopg2" and platform.python_implementation() == "PyPy":
-            name = "psycopg2cffi"
-        module = importlib.import_module(name)
-        return engine_class(module, database_config)
+        if platform.python_implementation() == "PyPy":
+            import psycopg2cffi as psycopg2  # type: ignore
+        else:
+            import psycopg2  # type: ignore
+
+        return PostgresEngine(psycopg2, database_config)
 
     raise RuntimeError("Unsupported database engine '%s'" % (name,))
 
 
-__all__ = ["create_engine", "IncorrectDatabaseSetup"]
+__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"]
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index ec5a4d198b..ab0bbe4bd3 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -12,7 +12,94 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import abc
+from typing import Generic, TypeVar
+
+from synapse.storage.types import Connection
 
 
 class IncorrectDatabaseSetup(RuntimeError):
     pass
+
+
+ConnectionType = TypeVar("ConnectionType", bound=Connection)
+
+
+class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
+    def __init__(self, module, database_config: dict):
+        self.module = module
+
+    @property
+    @abc.abstractmethod
+    def single_threaded(self) -> bool:
+        ...
+
+    @property
+    @abc.abstractmethod
+    def can_native_upsert(self) -> bool:
+        """
+        Do we support native UPSERTs?
+        """
+        ...
+
+    @property
+    @abc.abstractmethod
+    def supports_tuple_comparison(self) -> bool:
+        """
+        Do we support comparing tuples, i.e. `(a, b) > (c, d)`?
+        """
+        ...
+
+    @property
+    @abc.abstractmethod
+    def supports_using_any_list(self) -> bool:
+        """
+        Do we support using `a = ANY(?)` and passing a list
+        """
+        ...
+
+    @abc.abstractmethod
+    def check_database(
+        self, db_conn: ConnectionType, allow_outdated_version: bool = False
+    ) -> None:
+        ...
+
+    @abc.abstractmethod
+    def check_new_database(self, txn) -> None:
+        """Gets called when setting up a brand new database. This allows us to
+        apply stricter checks on new databases versus existing database.
+        """
+        ...
+
+    @abc.abstractmethod
+    def convert_param_style(self, sql: str) -> str:
+        ...
+
+    @abc.abstractmethod
+    def on_new_connection(self, db_conn: ConnectionType) -> None:
+        ...
+
+    @abc.abstractmethod
+    def is_deadlock(self, error: Exception) -> bool:
+        ...
+
+    @abc.abstractmethod
+    def is_connection_closed(self, conn: ConnectionType) -> bool:
+        ...
+
+    @abc.abstractmethod
+    def lock_table(self, txn, table: str) -> None:
+        ...
+
+    @abc.abstractmethod
+    def get_next_state_group_id(self, txn) -> int:
+        """Returns an int that can be used as a new state_group ID
+        """
+        ...
+
+    @property
+    @abc.abstractmethod
+    def server_version(self) -> str:
+        """Gets a string giving the server version. For example: '3.22.0'
+        """
+        ...
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 53b3f372b0..6c7d08a6f2 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -15,16 +15,14 @@
 
 import logging
 
-from ._base import IncorrectDatabaseSetup
+from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup
 
 logger = logging.getLogger(__name__)
 
 
-class PostgresEngine(object):
-    single_threaded = False
-
+class PostgresEngine(BaseDatabaseEngine):
     def __init__(self, database_module, database_config):
-        self.module = database_module
+        super().__init__(database_module, database_config)
         self.module.extensions.register_type(self.module.extensions.UNICODE)
 
         # Disables passing `bytes` to txn.execute, c.f. #6186. If you do
@@ -36,6 +34,10 @@ class PostgresEngine(object):
         self.synchronous_commit = database_config.get("synchronous_commit", True)
         self._version = None  # unknown as yet
 
+    @property
+    def single_threaded(self) -> bool:
+        return False
+
     def check_database(self, db_conn, allow_outdated_version: bool = False):
         # Get the version of PostgreSQL that we're using. As per the psycopg2
         # docs: The number is formed by converting the major, minor, and
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 641e490697..2bfeefd54e 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -12,16 +12,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import sqlite3
 import struct
 import threading
 
+from synapse.storage.engines import BaseDatabaseEngine
 
-class Sqlite3Engine(object):
-    single_threaded = True
 
+class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
     def __init__(self, database_module, database_config):
-        self.module = database_module
+        super().__init__(database_module, database_config)
 
         database = database_config.get("args", {}).get("database")
         self._is_in_memory = database in (None, ":memory:",)
@@ -31,6 +31,10 @@ class Sqlite3Engine(object):
         self._current_state_group_id = None
         self._current_state_group_id_lock = threading.Lock()
 
+    @property
+    def single_threaded(self) -> bool:
+        return True
+
     @property
     def can_native_upsert(self):
         """
@@ -68,7 +72,6 @@ class Sqlite3Engine(object):
         return sql
 
     def on_new_connection(self, db_conn):
-
         # We need to import here to avoid an import loop.
         from synapse.storage.prepare_database import prepare_database
 
diff --git a/synapse/storage/types.py b/synapse/storage/types.py
new file mode 100644
index 0000000000..daff81c5ee
--- /dev/null
+++ b/synapse/storage/types.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Iterable, Iterator, List, Tuple
+
+from typing_extensions import Protocol
+
+
+"""
+Some very basic protocol definitions for the DB-API2 classes specified in PEP-249
+"""
+
+
+class Cursor(Protocol):
+    def execute(self, sql: str, parameters: Iterable[Any] = ...) -> Any:
+        ...
+
+    def executemany(self, sql: str, parameters: Iterable[Iterable[Any]]) -> Any:
+        ...
+
+    def fetchall(self) -> List[Tuple]:
+        ...
+
+    def fetchone(self) -> Tuple:
+        ...
+
+    @property
+    def description(self) -> Any:
+        return None
+
+    @property
+    def rowcount(self) -> int:
+        return 0
+
+    def __iter__(self) -> Iterator[Tuple]:
+        ...
+
+    def close(self) -> None:
+        ...
+
+
+class Connection(Protocol):
+    def cursor(self) -> Cursor:
+        ...
+
+    def close(self) -> None:
+        ...
+
+    def commit(self) -> None:
+        ...
+
+    def rollback(self, *args, **kwargs) -> None:
+        ...
diff --git a/tox.ini b/tox.ini
index 4ccfde01b5..6521535137 100644
--- a/tox.ini
+++ b/tox.ini
@@ -168,7 +168,6 @@ commands=
     coverage html
 
 [testenv:mypy]
-basepython = python3.7
 skip_install = True
 deps =
     {[base]deps}
@@ -179,7 +178,8 @@ env =
 extras = all
 commands = mypy \
             synapse/api \
-            synapse/config/ \
+            synapse/appservice \
+            synapse/config \
             synapse/events/spamcheck.py \
             synapse/federation/sender \
             synapse/federation/transport \
@@ -192,6 +192,7 @@ commands = mypy \
             synapse/rest \
             synapse/spam_checker_api \
             synapse/storage/engines \
+            synapse/storage/database.py \
             synapse/streams
 
 # To find all folders that pass mypy you run:
-- 
cgit 1.4.1


From b32ac60c22493cd191d63eae5104fa9d69c37495 Mon Sep 17 00:00:00 2001
From: James 
Date: Thu, 27 Feb 2020 23:47:40 +1100
Subject: Expose common commands via snap run interface to allow easier
 invocation (#6315)

Signed-off-by: James Hebden 
---
 changelog.d/6315.feature |  1 +
 snap/snapcraft.yaml      | 21 ++++++++++++++++-----
 2 files changed, 17 insertions(+), 5 deletions(-)
 create mode 100644 changelog.d/6315.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6315.feature b/changelog.d/6315.feature
new file mode 100644
index 0000000000..c5377dd1e9
--- /dev/null
+++ b/changelog.d/6315.feature
@@ -0,0 +1 @@
+Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0.
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 9e644e8567..6b62b79114 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,20 +1,31 @@
 name: matrix-synapse
 base: core18
-version: git 
+version: git
 summary: Reference Matrix homeserver
 description: |
   Synapse is the reference Matrix homeserver.
   Matrix is a federated and decentralised instant messaging and VoIP system.
 
-grade: stable 
-confinement: strict 
+grade: stable
+confinement: strict
 
 apps:
-  matrix-synapse: 
+  matrix-synapse:
     command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
     stop-command: synctl -c $SNAP_COMMON stop
     plugs: [network-bind, network]
-    daemon: simple 
+    daemon: simple
+  hash-password:
+    command: hash_password
+  generate-config:
+    command: generate_config
+  generate-signing-key:
+    command: generate_signing_key.py
+  register-new-matrix-user:
+    command: register_new_matrix_user
+    plugs: [network]
+  synctl:
+    command: synctl
 parts:
   matrix-synapse:
     source: .
-- 
cgit 1.4.1


From cab4a52535097c5836fe67c5e09e8350d7ccf03c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 27 Feb 2020 13:08:43 +0000
Subject: set worker_app for frontend proxy test (#7003)

to stop the federationhandler trying to do master stuff
---
 changelog.d/7003.misc            | 1 +
 tests/app/test_frontend_proxy.py | 5 +++++
 2 files changed, 6 insertions(+)
 create mode 100644 changelog.d/7003.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7003.misc b/changelog.d/7003.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/7003.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py
index 160e55aca9..d3feafa1b7 100644
--- a/tests/app/test_frontend_proxy.py
+++ b/tests/app/test_frontend_proxy.py
@@ -27,6 +27,11 @@ class FrontendProxyTests(HomeserverTestCase):
 
         return hs
 
+    def default_config(self, name="test"):
+        c = super().default_config(name)
+        c["worker_app"] = "synapse.app.frontend_proxy"
+        return c
+
     def test_listen_http_with_presence_enabled(self):
         """
         When presence is on, the stub servlet will not register.
-- 
cgit 1.4.1


From 2201bc979588720bd99880b9cd8df2292b2d483f Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Thu, 27 Feb 2020 16:33:21 +0000
Subject: Don't refuse to start worker if media listener configured. (#7002)

Instead lets just warn if the worker has a media listener configured but
has the media repository disabled.

Previously non media repository workers would just ignore the media
listener.
---
 changelog.d/7002.misc         |  1 +
 synapse/app/generic_worker.py | 34 ++++++++++++++++++++--------------
 2 files changed, 21 insertions(+), 14 deletions(-)
 create mode 100644 changelog.d/7002.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7002.misc b/changelog.d/7002.misc
new file mode 100644
index 0000000000..ec5c004bbe
--- /dev/null
+++ b/changelog.d/7002.misc
@@ -0,0 +1 @@
+Merge worker apps together.
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 30efd39092..b2c764bfe8 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -494,20 +494,26 @@ class GenericWorkerServer(HomeServer):
                 elif name == "federation":
                     resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
                 elif name == "media":
-                    media_repo = self.get_media_repository_resource()
-
-                    # We need to serve the admin servlets for media on the
-                    # worker.
-                    admin_resource = JsonResource(self, canonical_json=False)
-                    register_servlets_for_media_repo(self, admin_resource)
-
-                    resources.update(
-                        {
-                            MEDIA_PREFIX: media_repo,
-                            LEGACY_MEDIA_PREFIX: media_repo,
-                            "/_synapse/admin": admin_resource,
-                        }
-                    )
+                    if self.config.can_load_media_repo:
+                        media_repo = self.get_media_repository_resource()
+
+                        # We need to serve the admin servlets for media on the
+                        # worker.
+                        admin_resource = JsonResource(self, canonical_json=False)
+                        register_servlets_for_media_repo(self, admin_resource)
+
+                        resources.update(
+                            {
+                                MEDIA_PREFIX: media_repo,
+                                LEGACY_MEDIA_PREFIX: media_repo,
+                                "/_synapse/admin": admin_resource,
+                            }
+                        )
+                    else:
+                        logger.warning(
+                            "A 'media' listener is configured but the media"
+                            " repository is disabled. Ignoring."
+                        )
 
                 if name == "openid" and "federation" not in res["names"]:
                     # Only load the openid resource separately if federation resource
-- 
cgit 1.4.1


From 9b06d8f8a62dc5c423aa9a694e0759eaf1c3c77e Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 28 Feb 2020 10:58:05 +0100
Subject: Fixed set a user as an admin with the new API (#6928)

Fix #6910
---
 changelog.d/6910.bugfix                          |   1 +
 synapse/rest/admin/users.py                      |   6 +-
 synapse/storage/data_stores/main/registration.py |  16 +-
 tests/rest/admin/test_user.py                    | 218 +++++++++++++++++++----
 4 files changed, 199 insertions(+), 42 deletions(-)
 create mode 100644 changelog.d/6910.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix
new file mode 100644
index 0000000000..707f1ff7b5
--- /dev/null
+++ b/changelog.d/6910.bugfix
@@ -0,0 +1 @@
+Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel.
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index c5b461a236..80f959248d 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -211,9 +211,7 @@ class UserRestServletV2(RestServlet):
                     if target_user == auth_user and not set_admin_to:
                         raise SynapseError(400, "You may not demote yourself.")
 
-                    await self.admin_handler.set_user_server_admin(
-                        target_user, set_admin_to
-                    )
+                    await self.store.set_server_admin(target_user, set_admin_to)
 
             if "password" in body:
                 if (
@@ -651,6 +649,6 @@ class UserAdminServlet(RestServlet):
         if target_user == auth_user and not set_admin_to:
             raise SynapseError(400, "You may not demote yourself.")
 
-        await self.store.set_user_server_admin(target_user, set_admin_to)
+        await self.store.set_server_admin(target_user, set_admin_to)
 
         return 200, {}
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 49306642ed..3e53c8568a 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -301,12 +301,16 @@ class RegistrationWorkerStore(SQLBaseStore):
             admin (bool): true iff the user is to be a server admin,
                 false otherwise.
         """
-        return self.db.simple_update_one(
-            table="users",
-            keyvalues={"name": user.to_string()},
-            updatevalues={"admin": 1 if admin else 0},
-            desc="set_server_admin",
-        )
+
+        def set_server_admin_txn(txn):
+            self.db.simple_update_one_txn(
+                txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0}
+            )
+            self._invalidate_cache_and_stream(
+                txn, self.get_user_by_id, (user.to_string(),)
+            )
+
+        return self.db.runInteraction("set_server_admin", set_server_admin_txn)
 
     def _query_for_auth(self, txn, token):
         sql = (
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index cbe4a6a51f..6416fb5d2a 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -16,6 +16,7 @@
 import hashlib
 import hmac
 import json
+import urllib.parse
 
 from mock import Mock
 
@@ -371,22 +372,24 @@ class UserRestTestCase(unittest.HomeserverTestCase):
     def prepare(self, reactor, clock, hs):
         self.store = hs.get_datastore()
 
-        self.url = "/_synapse/admin/v2/users/@bob:test"
-
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
         self.other_user = self.register_user("user", "pass")
         self.other_user_token = self.login("user", "pass")
+        self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote(
+            self.other_user
+        )
 
     def test_requester_is_no_admin(self):
         """
         If the user is not a server admin, an error is returned.
         """
         self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
 
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.other_user_token,
+            "GET", url, access_token=self.other_user_token,
         )
         self.render(request)
 
@@ -394,7 +397,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("You are not a server admin", channel.json_body["error"])
 
         request, channel = self.make_request(
-            "PUT", self.url, access_token=self.other_user_token, content=b"{}",
+            "PUT", url, access_token=self.other_user_token, content=b"{}",
         )
         self.render(request)
 
@@ -417,24 +420,73 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(404, channel.code, msg=channel.json_body)
         self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
 
-    def test_requester_is_admin(self):
+    def test_create_server_admin(self):
         """
-        If the user is a server admin, a new user is created.
+        Check that a new admin user is created successfully.
         """
         self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
 
+        # Create user (server admin)
         body = json.dumps(
             {
                 "password": "abc123",
                 "admin": True,
+                "displayname": "Bob's name",
                 "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
             }
         )
 
+        request, channel = self.make_request(
+            "PUT",
+            url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(True, channel.json_body["admin"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(True, channel.json_body["admin"])
+        self.assertEqual(False, channel.json_body["is_guest"])
+        self.assertEqual(False, channel.json_body["deactivated"])
+
+    def test_create_user(self):
+        """
+        Check that a new regular user is created successfully.
+        """
+        self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
+
         # Create user
+        body = json.dumps(
+            {
+                "password": "abc123",
+                "admin": False,
+                "displayname": "Bob's name",
+                "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+            }
+        )
+
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            url,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -442,29 +494,38 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("bob", channel.json_body["displayname"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
         self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
         self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(False, channel.json_body["admin"])
 
         # Get user
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", url, access_token=self.admin_user_tok,
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("bob", channel.json_body["displayname"])
-        self.assertEqual(1, channel.json_body["admin"])
-        self.assertEqual(0, channel.json_body["is_guest"])
-        self.assertEqual(0, channel.json_body["deactivated"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(False, channel.json_body["admin"])
+        self.assertEqual(False, channel.json_body["is_guest"])
+        self.assertEqual(False, channel.json_body["deactivated"])
+
+    def test_set_password(self):
+        """
+        Test setting a new password for another user.
+        """
+        self.hs.config.registration_shared_secret = None
 
         # Change password
         body = json.dumps({"password": "hahaha"})
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            self.url_other_user,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -472,41 +533,133 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
 
+    def test_set_displayname(self):
+        """
+        Test setting the displayname of another user.
+        """
+        self.hs.config.registration_shared_secret = None
+
         # Modify user
+        body = json.dumps({"displayname": "foobar"})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+
+    def test_set_threepid(self):
+        """
+        Test setting threepid for an other user.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        # Delete old and add new threepid to user
         body = json.dumps(
-            {
-                "displayname": "foobar",
-                "deactivated": True,
-                "threepids": [{"medium": "email", "address": "bob2@bob.bob"}],
-            }
+            {"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]}
         )
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            self.url_other_user,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("foobar", channel.json_body["displayname"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
+
+    def test_deactivate_user(self):
+        """
+        Test deactivating another user.
+        """
+
+        # Deactivate user
+        body = json.dumps({"deactivated": True})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
         self.assertEqual(True, channel.json_body["deactivated"])
         # the user is deactivated, the threepid will be deleted
 
         # Get user
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("foobar", channel.json_body["displayname"])
-        self.assertEqual(1, channel.json_body["admin"])
-        self.assertEqual(0, channel.json_body["is_guest"])
-        self.assertEqual(1, channel.json_body["deactivated"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["deactivated"])
+
+    def test_set_user_as_admin(self):
+        """
+        Test setting the admin flag on a user.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        # Set a user as an admin
+        body = json.dumps({"admin": True})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["admin"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["admin"])
 
     def test_accidental_deactivation_prevention(self):
         """
@@ -514,13 +667,14 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         for the deactivated body parameter
         """
         self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
 
         # Create user
         body = json.dumps({"password": "abc123"})
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            url,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -532,7 +686,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         # Get user
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", url, access_token=self.admin_user_tok,
         )
         self.render(request)
 
@@ -546,7 +700,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            url,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -556,7 +710,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         # Check user is not deactivated
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", url, access_token=self.admin_user_tok,
         )
         self.render(request)
 
-- 
cgit 1.4.1


From 59ad93d2a415cd07ab6f6afd490d0a5ceeec93a0 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 28 Feb 2020 11:27:37 +0000
Subject: Newsfile

---
 changelog.d/7010.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7010.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7010.misc b/changelog.d/7010.misc
new file mode 100644
index 0000000000..4ba1f6cdf8
--- /dev/null
+++ b/changelog.d/7010.misc
@@ -0,0 +1 @@
+Change device list streams to have one row per ID.
-- 
cgit 1.4.1


From 12d425900048b29a95b06428f04ed6ecc9e09d15 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 28 Feb 2020 07:31:07 -0500
Subject:  Add some type annotations to the federation base & client classes
 (#6995)

---
 changelog.d/6995.misc                   |  1 +
 synapse/federation/federation_base.py   | 60 ++++++++++++++++++++-------------
 synapse/federation/federation_client.py | 10 +++---
 tox.ini                                 |  2 ++
 4 files changed, 45 insertions(+), 28 deletions(-)
 create mode 100644 changelog.d/6995.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6995.misc b/changelog.d/6995.misc
new file mode 100644
index 0000000000..884b4cf4ee
--- /dev/null
+++ b/changelog.d/6995.misc
@@ -0,0 +1 @@
+Add some type annotations to the federation base & client classes.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 9fff65716a..190ea1fba1 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -15,11 +15,13 @@
 # limitations under the License.
 import logging
 from collections import namedtuple
+from typing import Iterable, List
 
 import six
 
 from twisted.internet import defer
-from twisted.internet.defer import DeferredList
+from twisted.internet.defer import Deferred, DeferredList
+from twisted.python.failure import Failure
 
 from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
@@ -29,6 +31,7 @@ from synapse.api.room_versions import (
     RoomVersion,
 )
 from synapse.crypto.event_signing import check_event_content_hash
+from synapse.crypto.keyring import Keyring
 from synapse.events import EventBase, make_event_from_dict
 from synapse.events.utils import prune_event
 from synapse.http.servlet import assert_params_in_dict
@@ -56,7 +59,12 @@ class FederationBase(object):
 
     @defer.inlineCallbacks
     def _check_sigs_and_hash_and_fetch(
-        self, origin, pdus, room_version, outlier=False, include_none=False
+        self,
+        origin: str,
+        pdus: List[EventBase],
+        room_version: str,
+        outlier: bool = False,
+        include_none: bool = False,
     ):
         """Takes a list of PDUs and checks the signatures and hashs of each
         one. If a PDU fails its signature check then we check if we have it in
@@ -69,11 +77,11 @@ class FederationBase(object):
         a new list.
 
         Args:
-            origin (str)
-            pdu (list)
-            room_version (str)
-            outlier (bool): Whether the events are outliers or not
-            include_none (str): Whether to include None in the returned list
+            origin
+            pdu
+            room_version
+            outlier: Whether the events are outliers or not
+            include_none: Whether to include None in the returned list
                 for events that have failed their checks
 
         Returns:
@@ -82,7 +90,7 @@ class FederationBase(object):
         deferreds = self._check_sigs_and_hashes(room_version, pdus)
 
         @defer.inlineCallbacks
-        def handle_check_result(pdu, deferred):
+        def handle_check_result(pdu: EventBase, deferred: Deferred):
             try:
                 res = yield make_deferred_yieldable(deferred)
             except SynapseError:
@@ -96,8 +104,10 @@ class FederationBase(object):
 
             if not res and pdu.origin != origin:
                 try:
+                    # This should not exist in the base implementation, until
+                    # this is fixed, ignore it for typing. See issue #6997.
                     res = yield defer.ensureDeferred(
-                        self.get_pdu(
+                        self.get_pdu(  # type: ignore
                             destinations=[pdu.origin],
                             event_id=pdu.event_id,
                             room_version=room_version,
@@ -127,21 +137,23 @@ class FederationBase(object):
         else:
             return [p for p in valid_pdus if p]
 
-    def _check_sigs_and_hash(self, room_version, pdu):
+    def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred:
         return make_deferred_yieldable(
             self._check_sigs_and_hashes(room_version, [pdu])[0]
         )
 
-    def _check_sigs_and_hashes(self, room_version, pdus):
+    def _check_sigs_and_hashes(
+        self, room_version: str, pdus: List[EventBase]
+    ) -> List[Deferred]:
         """Checks that each of the received events is correctly signed by the
         sending server.
 
         Args:
-            room_version (str): The room version of the PDUs
-            pdus (list[FrozenEvent]): the events to be checked
+            room_version: The room version of the PDUs
+            pdus: the events to be checked
 
         Returns:
-            list[Deferred]: for each input event, a deferred which:
+            For each input event, a deferred which:
               * returns the original event if the checks pass
               * returns a redacted version of the event (if the signature
                 matched but the hash did not)
@@ -152,7 +164,7 @@ class FederationBase(object):
 
         ctx = LoggingContext.current_context()
 
-        def callback(_, pdu):
+        def callback(_, pdu: EventBase):
             with PreserveLoggingContext(ctx):
                 if not check_event_content_hash(pdu):
                     # let's try to distinguish between failures because the event was
@@ -189,7 +201,7 @@ class FederationBase(object):
 
                 return pdu
 
-        def errback(failure, pdu):
+        def errback(failure: Failure, pdu: EventBase):
             failure.trap(SynapseError)
             with PreserveLoggingContext(ctx):
                 logger.warning(
@@ -215,16 +227,18 @@ class PduToCheckSig(
     pass
 
 
-def _check_sigs_on_pdus(keyring, room_version, pdus):
+def _check_sigs_on_pdus(
+    keyring: Keyring, room_version: str, pdus: Iterable[EventBase]
+) -> List[Deferred]:
     """Check that the given events are correctly signed
 
     Args:
-        keyring (synapse.crypto.Keyring): keyring object to do the checks
-        room_version (str): the room version of the PDUs
-        pdus (Collection[EventBase]): the events to be checked
+        keyring: keyring object to do the checks
+        room_version: the room version of the PDUs
+        pdus: the events to be checked
 
     Returns:
-        List[Deferred]: a Deferred for each event in pdus, which will either succeed if
+        A Deferred for each event in pdus, which will either succeed if
            the signatures are valid, or fail (with a SynapseError) if not.
     """
 
@@ -329,7 +343,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
     return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
 
 
-def _flatten_deferred_list(deferreds):
+def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
     """Given a list of deferreds, either return the single deferred,
     combine into a DeferredList, or return an already resolved deferred.
     """
@@ -341,7 +355,7 @@ def _flatten_deferred_list(deferreds):
         return defer.succeed(None)
 
 
-def _is_invite_via_3pid(event):
+def _is_invite_via_3pid(event: EventBase) -> bool:
     return (
         event.type == EventTypes.Member
         and event.membership == Membership.INVITE
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 4870e39652..b5538bc07a 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -187,7 +187,7 @@ class FederationClient(FederationBase):
 
     async def backfill(
         self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
-    ) -> List[EventBase]:
+    ) -> Optional[List[EventBase]]:
         """Requests some more historic PDUs for the given room from the
         given destination server.
 
@@ -199,9 +199,9 @@ class FederationClient(FederationBase):
         """
         logger.debug("backfill extrem=%s", extremities)
 
-        # If there are no extremeties then we've (probably) reached the start.
+        # If there are no extremities then we've (probably) reached the start.
         if not extremities:
-            return
+            return None
 
         transaction_data = await self.transport_layer.backfill(
             dest, room_id, extremities, limit
@@ -284,7 +284,7 @@ class FederationClient(FederationBase):
                 pdu_list = [
                     event_from_pdu_json(p, room_version, outlier=outlier)
                     for p in transaction_data["pdus"]
-                ]
+                ]  # type: List[EventBase]
 
                 if pdu_list and pdu_list[0]:
                     pdu = pdu_list[0]
@@ -615,7 +615,7 @@ class FederationClient(FederationBase):
             ]
             if auth_chain_create_events != [create_event.event_id]:
                 raise InvalidResponseError(
-                    "Unexpected create event(s) in auth chain"
+                    "Unexpected create event(s) in auth chain: %s"
                     % (auth_chain_create_events,)
                 )
 
diff --git a/tox.ini b/tox.ini
index 6521535137..097ebb8774 100644
--- a/tox.ini
+++ b/tox.ini
@@ -181,6 +181,8 @@ commands = mypy \
             synapse/appservice \
             synapse/config \
             synapse/events/spamcheck.py \
+            synapse/federation/federation_base.py \
+            synapse/federation/federation_client.py \
             synapse/federation/sender \
             synapse/federation/transport \
             synapse/handlers/presence.py \
-- 
cgit 1.4.1


From e4ffb14d5764d49efc28e7f3970d443eae11f087 Mon Sep 17 00:00:00 2001
From: Uday Bansal <43824981+udaybansal19@users.noreply.github.com>
Date: Sun, 1 Mar 2020 05:07:23 +0530
Subject: Fix last date for ACMEv1 install (#7015)

Support for getting TLS certificates through ACMEv1 ended on November 2019.

Signed-off-by: Uday Bansal <43824981+udaybansal19@users.noreply.github.com>
---
 INSTALL.md            | 2 +-
 changelog.d/7015.misc | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7015.misc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index aa5eb882bb..ffb82bdcc3 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -418,7 +418,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
   for having Synapse automatically provision and renew federation
   certificates through ACME can be found at [ACME.md](docs/ACME.md).
   Note that, as pointed out in that document, this feature will not
-  work with installs set up after November 2020. 
+  work with installs set up after November 2019. 
   
   If you are using your own certificate, be sure to use a `.pem` file that
   includes the full certificate chain including any intermediate certificates
diff --git a/changelog.d/7015.misc b/changelog.d/7015.misc
new file mode 100644
index 0000000000..9709dc606e
--- /dev/null
+++ b/changelog.d/7015.misc
@@ -0,0 +1 @@
+Change date in INSTALL.md#tls-certificates for last date of getting TLS certificates to November 2019.
\ No newline at end of file
-- 
cgit 1.4.1


From bbeee33d63c43cb80118c0dccf8abd9d4ac1b8f3 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 28 Feb 2020 10:58:05 +0100
Subject: Fixed set a user as an admin with the new API (#6928)

Fix #6910
---
 changelog.d/6910.bugfix                          |   1 +
 synapse/rest/admin/users.py                      |   6 +-
 synapse/storage/data_stores/main/registration.py |  16 +-
 tests/rest/admin/test_user.py                    | 209 ++++++++++++++++++++---
 4 files changed, 194 insertions(+), 38 deletions(-)
 create mode 100644 changelog.d/6910.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix
new file mode 100644
index 0000000000..707f1ff7b5
--- /dev/null
+++ b/changelog.d/6910.bugfix
@@ -0,0 +1 @@
+Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel.
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 2107b5dc56..064908fbb0 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -211,9 +211,7 @@ class UserRestServletV2(RestServlet):
                     if target_user == auth_user and not set_admin_to:
                         raise SynapseError(400, "You may not demote yourself.")
 
-                    await self.admin_handler.set_user_server_admin(
-                        target_user, set_admin_to
-                    )
+                    await self.store.set_server_admin(target_user, set_admin_to)
 
             if "password" in body:
                 if (
@@ -648,6 +646,6 @@ class UserAdminServlet(RestServlet):
         if target_user == auth_user and not set_admin_to:
             raise SynapseError(400, "You may not demote yourself.")
 
-        await self.store.set_user_server_admin(target_user, set_admin_to)
+        await self.store.set_server_admin(target_user, set_admin_to)
 
         return 200, {}
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 49306642ed..3e53c8568a 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -301,12 +301,16 @@ class RegistrationWorkerStore(SQLBaseStore):
             admin (bool): true iff the user is to be a server admin,
                 false otherwise.
         """
-        return self.db.simple_update_one(
-            table="users",
-            keyvalues={"name": user.to_string()},
-            updatevalues={"admin": 1 if admin else 0},
-            desc="set_server_admin",
-        )
+
+        def set_server_admin_txn(txn):
+            self.db.simple_update_one_txn(
+                txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0}
+            )
+            self._invalidate_cache_and_stream(
+                txn, self.get_user_by_id, (user.to_string(),)
+            )
+
+        return self.db.runInteraction("set_server_admin", set_server_admin_txn)
 
     def _query_for_auth(self, txn, token):
         sql = (
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 490ce8f55d..70688c2494 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -16,6 +16,7 @@
 import hashlib
 import hmac
 import json
+import urllib.parse
 
 from mock import Mock
 
@@ -371,22 +372,24 @@ class UserRestTestCase(unittest.HomeserverTestCase):
     def prepare(self, reactor, clock, hs):
         self.store = hs.get_datastore()
 
-        self.url = "/_synapse/admin/v2/users/@bob:test"
-
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
         self.other_user = self.register_user("user", "pass")
         self.other_user_token = self.login("user", "pass")
+        self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote(
+            self.other_user
+        )
 
     def test_requester_is_no_admin(self):
         """
         If the user is not a server admin, an error is returned.
         """
         self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
 
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.other_user_token,
+            "GET", url, access_token=self.other_user_token,
         )
         self.render(request)
 
@@ -394,7 +397,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("You are not a server admin", channel.json_body["error"])
 
         request, channel = self.make_request(
-            "PUT", self.url, access_token=self.other_user_token, content=b"{}",
+            "PUT", url, access_token=self.other_user_token, content=b"{}",
         )
         self.render(request)
 
@@ -417,24 +420,73 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(404, channel.code, msg=channel.json_body)
         self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
 
-    def test_requester_is_admin(self):
+    def test_create_server_admin(self):
         """
-        If the user is a server admin, a new user is created.
+        Check that a new admin user is created successfully.
         """
         self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
 
+        # Create user (server admin)
         body = json.dumps(
             {
                 "password": "abc123",
                 "admin": True,
+                "displayname": "Bob's name",
                 "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
             }
         )
 
+        request, channel = self.make_request(
+            "PUT",
+            url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(True, channel.json_body["admin"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", url, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@bob:test", channel.json_body["name"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(True, channel.json_body["admin"])
+        self.assertEqual(False, channel.json_body["is_guest"])
+        self.assertEqual(False, channel.json_body["deactivated"])
+
+    def test_create_user(self):
+        """
+        Check that a new regular user is created successfully.
+        """
+        self.hs.config.registration_shared_secret = None
+        url = "/_synapse/admin/v2/users/@bob:test"
+
         # Create user
+        body = json.dumps(
+            {
+                "password": "abc123",
+                "admin": False,
+                "displayname": "Bob's name",
+                "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+            }
+        )
+
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            url,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -442,29 +494,38 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("bob", channel.json_body["displayname"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
         self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
         self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(False, channel.json_body["admin"])
 
         # Get user
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", url, access_token=self.admin_user_tok,
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
         self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("bob", channel.json_body["displayname"])
-        self.assertEqual(1, channel.json_body["admin"])
-        self.assertEqual(0, channel.json_body["is_guest"])
-        self.assertEqual(0, channel.json_body["deactivated"])
+        self.assertEqual("Bob's name", channel.json_body["displayname"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
+        self.assertEqual(False, channel.json_body["admin"])
+        self.assertEqual(False, channel.json_body["is_guest"])
+        self.assertEqual(False, channel.json_body["deactivated"])
+
+    def test_set_password(self):
+        """
+        Test setting a new password for another user.
+        """
+        self.hs.config.registration_shared_secret = None
 
         # Change password
         body = json.dumps({"password": "hahaha"})
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            self.url_other_user,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
@@ -472,38 +533,130 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
 
+    def test_set_displayname(self):
+        """
+        Test setting the displayname of another user.
+        """
+        self.hs.config.registration_shared_secret = None
+
         # Modify user
+        body = json.dumps({"displayname": "foobar"})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("foobar", channel.json_body["displayname"])
+
+    def test_set_threepid(self):
+        """
+        Test setting threepid for an other user.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        # Delete old and add new threepid to user
         body = json.dumps(
-            {
-                "displayname": "foobar",
-                "deactivated": True,
-                "threepids": [{"medium": "email", "address": "bob2@bob.bob"}],
-            }
+            {"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]}
         )
 
         request, channel = self.make_request(
             "PUT",
-            self.url,
+            self.url_other_user,
             access_token=self.admin_user_tok,
             content=body.encode(encoding="utf_8"),
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("foobar", channel.json_body["displayname"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
+
+    def test_deactivate_user(self):
+        """
+        Test deactivating another user.
+        """
+
+        # Deactivate user
+        body = json.dumps({"deactivated": True})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
         self.assertEqual(True, channel.json_body["deactivated"])
         # the user is deactivated, the threepid will be deleted
 
         # Get user
         request, channel = self.make_request(
-            "GET", self.url, access_token=self.admin_user_tok,
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
         )
         self.render(request)
 
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("@bob:test", channel.json_body["name"])
-        self.assertEqual("foobar", channel.json_body["displayname"])
-        self.assertEqual(1, channel.json_body["admin"])
-        self.assertEqual(0, channel.json_body["is_guest"])
-        self.assertEqual(1, channel.json_body["deactivated"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["deactivated"])
+
+    def test_set_user_as_admin(self):
+        """
+        Test setting the admin flag on a user.
+        """
+        self.hs.config.registration_shared_secret = None
+
+        # Set a user as an admin
+        body = json.dumps({"admin": True})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url_other_user,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["admin"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_other_user, access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("@user:test", channel.json_body["name"])
+        self.assertEqual(True, channel.json_body["admin"])
-- 
cgit 1.4.1


From 174aaa1d62e54b57499d0606bf0f24bf81c6adf2 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Mon, 2 Mar 2020 14:53:56 +0000
Subject: remove spurious changelog

---
 changelog.d/6910.bugfix | 1 -
 1 file changed, 1 deletion(-)
 delete mode 100644 changelog.d/6910.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix
deleted file mode 100644
index 707f1ff7b5..0000000000
--- a/changelog.d/6910.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel.
-- 
cgit 1.4.1


From 3ab8e9c2932476d18af94b6c60cc3613139148ec Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 2 Mar 2020 16:17:11 +0000
Subject: Fix py35-old CI by using native tox. (#7018)

I'm not really sure how this was going wrong, but this seems like the
right approach anyway.
---
 .buildkite/scripts/test_old_deps.sh | 7 +------
 changelog.d/7018.bugfix             | 1 +
 2 files changed, 2 insertions(+), 6 deletions(-)
 create mode 100644 changelog.d/7018.bugfix

(limited to 'changelog.d')

diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh
index dfd71b2511..cdb77b556c 100755
--- a/.buildkite/scripts/test_old_deps.sh
+++ b/.buildkite/scripts/test_old_deps.sh
@@ -6,12 +6,7 @@
 set -ex
 
 apt-get update
-apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
-
-# workaround for https://github.com/jaraco/zipp/issues/40
-python3.5 -m pip install 'setuptools>=34.4.0'
-
-python3.5 -m pip install tox
+apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
 
 export LANG="C.UTF-8"
 
diff --git a/changelog.d/7018.bugfix b/changelog.d/7018.bugfix
new file mode 100644
index 0000000000..d1b6c1d464
--- /dev/null
+++ b/changelog.d/7018.bugfix
@@ -0,0 +1 @@
+Fix py35-old CI by using native tox package.
-- 
cgit 1.4.1


From b29474e0aa866a50ec96cd921cc5025fc9718e73 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 2 Mar 2020 16:52:15 +0000
Subject: Always return a deferred from `get_current_state_deltas`. (#7019)

This currently causes presence notify code to log exceptions when there
is no state changes to process. This doesn't actually cause any problems
as we'd simply do nothing anyway.
---
 changelog.d/7019.misc                            | 1 +
 synapse/storage/data_stores/main/state_deltas.py | 4 +++-
 2 files changed, 4 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7019.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7019.misc b/changelog.d/7019.misc
new file mode 100644
index 0000000000..5130f4e8af
--- /dev/null
+++ b/changelog.d/7019.misc
@@ -0,0 +1 @@
+Port `synapse.handlers.presence` to async/await.
diff --git a/synapse/storage/data_stores/main/state_deltas.py b/synapse/storage/data_stores/main/state_deltas.py
index 12c982cb26..725e12507f 100644
--- a/synapse/storage/data_stores/main/state_deltas.py
+++ b/synapse/storage/data_stores/main/state_deltas.py
@@ -15,6 +15,8 @@
 
 import logging
 
+from twisted.internet import defer
+
 from synapse.storage._base import SQLBaseStore
 
 logger = logging.getLogger(__name__)
@@ -56,7 +58,7 @@ class StateDeltasStore(SQLBaseStore):
             # if the CSDs haven't changed between prev_stream_id and now, we
             # know for certain that they haven't changed between prev_stream_id and
             # max_stream_id.
-            return max_stream_id, []
+            return defer.succeed((max_stream_id, []))
 
         def get_current_state_deltas_txn(txn):
             # First we calculate the max stream id that will give us less than
-- 
cgit 1.4.1


From 7dcbc33a1be04c46b930699c03c15bc759f4b22c Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Tue, 3 Mar 2020 07:12:45 -0500
Subject: Validate the alt_aliases property of canonical alias events (#6971)

---
 changelog.d/6971.feature           |   1 +
 synapse/api/errors.py              |   1 +
 synapse/handlers/directory.py      |  14 ++--
 synapse/handlers/message.py        |  47 ++++++++++-
 synapse/types.py                   |  15 ++--
 tests/handlers/test_directory.py   |  66 +++++++--------
 tests/rest/client/v1/test_rooms.py | 160 +++++++++++++++++++++++++++++++++++++
 tests/test_types.py                |   2 +-
 8 files changed, 254 insertions(+), 52 deletions(-)
 create mode 100644 changelog.d/6971.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6971.feature b/changelog.d/6971.feature
new file mode 100644
index 0000000000..ccf02a61df
--- /dev/null
+++ b/changelog.d/6971.feature
@@ -0,0 +1 @@
+Validate the alt_aliases property of canonical alias events.
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 0c20601600..616942b057 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -66,6 +66,7 @@ class Codes(object):
     EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
     INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
     USER_DEACTIVATED = "M_USER_DEACTIVATED"
+    BAD_ALIAS = "M_BAD_ALIAS"
 
 
 class CodeMessageException(RuntimeError):
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 0b23ca919a..61eb49059b 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -13,8 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-import collections
 import logging
 import string
 from typing import List
@@ -307,15 +305,17 @@ class DirectoryHandler(BaseHandler):
             send_update = True
             content.pop("alias", "")
 
-        # Filter alt_aliases for the removed alias.
-        alt_aliases = content.pop("alt_aliases", None)
-        # If the aliases are not a list (or not found) do not attempt to modify
-        # the list.
-        if isinstance(alt_aliases, collections.Sequence):
+        # Filter the alt_aliases property for the removed alias. Note that the
+        # value is not modified if alt_aliases is of an unexpected form.
+        alt_aliases = content.get("alt_aliases")
+        if isinstance(alt_aliases, (list, tuple)) and alias_str in alt_aliases:
             send_update = True
             alt_aliases = [alias for alias in alt_aliases if alias != alias_str]
+
             if alt_aliases:
                 content["alt_aliases"] = alt_aliases
+            else:
+                del content["alt_aliases"]
 
         if send_update:
             yield self.event_creation_handler.create_and_send_nonmember_event(
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index a0103addd3..0c84c6cec4 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -888,19 +888,60 @@ class EventCreationHandler(object):
         yield self.base_handler.maybe_kick_guest_users(event, context)
 
         if event.type == EventTypes.CanonicalAlias:
-            # Check the alias is acually valid (at this time at least)
+            # Validate a newly added alias or newly added alt_aliases.
+
+            original_alias = None
+            original_alt_aliases = set()
+
+            original_event_id = event.unsigned.get("replaces_state")
+            if original_event_id:
+                original_event = yield self.store.get_event(original_event_id)
+
+                if original_event:
+                    original_alias = original_event.content.get("alias", None)
+                    original_alt_aliases = original_event.content.get("alt_aliases", [])
+
+            # Check the alias is currently valid (if it has changed).
             room_alias_str = event.content.get("alias", None)
-            if room_alias_str:
+            directory_handler = self.hs.get_handlers().directory_handler
+            if room_alias_str and room_alias_str != original_alias:
                 room_alias = RoomAlias.from_string(room_alias_str)
-                directory_handler = self.hs.get_handlers().directory_handler
                 mapping = yield directory_handler.get_association(room_alias)
 
                 if mapping["room_id"] != event.room_id:
                     raise SynapseError(
                         400,
                         "Room alias %s does not point to the room" % (room_alias_str,),
+                        Codes.BAD_ALIAS,
                     )
 
+            # Check that alt_aliases is the proper form.
+            alt_aliases = event.content.get("alt_aliases", [])
+            if not isinstance(alt_aliases, (list, tuple)):
+                raise SynapseError(
+                    400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
+                )
+
+            # If the old version of alt_aliases is of an unknown form,
+            # completely replace it.
+            if not isinstance(original_alt_aliases, (list, tuple)):
+                original_alt_aliases = []
+
+            # Check that each alias is currently valid.
+            new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
+            if new_alt_aliases:
+                for alias_str in new_alt_aliases:
+                    room_alias = RoomAlias.from_string(alias_str)
+                    mapping = yield directory_handler.get_association(room_alias)
+
+                    if mapping["room_id"] != event.room_id:
+                        raise SynapseError(
+                            400,
+                            "Room alias %s does not point to the room"
+                            % (room_alias_str,),
+                            Codes.BAD_ALIAS,
+                        )
+
         federation_handler = self.hs.get_handlers().federation_handler
 
         if event.type == EventTypes.Member:
diff --git a/synapse/types.py b/synapse/types.py
index f3cd465735..acf60baddc 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -23,7 +23,7 @@ import attr
 from signedjson.key import decode_verify_key_bytes
 from unpaddedbase64 import decode_base64
 
-from synapse.api.errors import SynapseError
+from synapse.api.errors import Codes, SynapseError
 
 # define a version of typing.Collection that works on python 3.5
 if sys.version_info[:3] >= (3, 6, 0):
@@ -166,11 +166,13 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom
         return self
 
     @classmethod
-    def from_string(cls, s):
+    def from_string(cls, s: str):
         """Parse the string given by 's' into a structure object."""
         if len(s) < 1 or s[0:1] != cls.SIGIL:
             raise SynapseError(
-                400, "Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL)
+                400,
+                "Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL),
+                Codes.INVALID_PARAM,
             )
 
         parts = s[1:].split(":", 1)
@@ -179,6 +181,7 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom
                 400,
                 "Expected %s of the form '%slocalname:domain'"
                 % (cls.__name__, cls.SIGIL),
+                Codes.INVALID_PARAM,
             )
 
         domain = parts[1]
@@ -235,11 +238,13 @@ class GroupID(DomainSpecificString):
     def from_string(cls, s):
         group_id = super(GroupID, cls).from_string(s)
         if not group_id.localpart:
-            raise SynapseError(400, "Group ID cannot be empty")
+            raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
 
         if contains_invalid_mxid_characters(group_id.localpart):
             raise SynapseError(
-                400, "Group ID can only contain characters a-z, 0-9, or '=_-./'"
+                400,
+                "Group ID can only contain characters a-z, 0-9, or '=_-./'",
+                Codes.INVALID_PARAM,
             )
 
         return group_id
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 27b916aed4..3397cfa485 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -88,6 +88,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         )
 
     def test_delete_alias_not_allowed(self):
+        """Removing an alias should be denied if a user does not have the proper permissions."""
         room_id = "!8765qwer:test"
         self.get_success(
             self.store.create_room_alias_association(self.my_room, room_id, ["test"])
@@ -101,6 +102,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         )
 
     def test_delete_alias(self):
+        """Removing an alias should work when a user does has the proper permissions."""
         room_id = "!8765qwer:test"
         user_id = "@user:test"
         self.get_success(
@@ -159,30 +161,42 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
         )
 
         self.test_alias = "#test:test"
-        self.room_alias = RoomAlias.from_string(self.test_alias)
+        self.room_alias = self._add_alias(self.test_alias)
+
+    def _add_alias(self, alias: str) -> RoomAlias:
+        """Add an alias to the test room."""
+        room_alias = RoomAlias.from_string(alias)
 
         # Create a new alias to this room.
         self.get_success(
             self.store.create_room_alias_association(
-                self.room_alias, self.room_id, ["test"], self.admin_user
+                room_alias, self.room_id, ["test"], self.admin_user
             )
         )
+        return room_alias
 
-    def test_remove_alias(self):
-        """Removing an alias that is the canonical alias should remove it there too."""
-        # Set this new alias as the canonical alias for this room
+    def _set_canonical_alias(self, content):
+        """Configure the canonical alias state on the room."""
         self.helper.send_state(
-            self.room_id,
-            "m.room.canonical_alias",
-            {"alias": self.test_alias, "alt_aliases": [self.test_alias]},
-            tok=self.admin_user_tok,
+            self.room_id, "m.room.canonical_alias", content, tok=self.admin_user_tok,
         )
 
-        data = self.get_success(
+    def _get_canonical_alias(self):
+        """Get the canonical alias state of the room."""
+        return self.get_success(
             self.state_handler.get_current_state(
                 self.room_id, EventTypes.CanonicalAlias, ""
             )
         )
+
+    def test_remove_alias(self):
+        """Removing an alias that is the canonical alias should remove it there too."""
+        # Set this new alias as the canonical alias for this room
+        self._set_canonical_alias(
+            {"alias": self.test_alias, "alt_aliases": [self.test_alias]}
+        )
+
+        data = self._get_canonical_alias()
         self.assertEqual(data["content"]["alias"], self.test_alias)
         self.assertEqual(data["content"]["alt_aliases"], [self.test_alias])
 
@@ -193,11 +207,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
             )
         )
 
-        data = self.get_success(
-            self.state_handler.get_current_state(
-                self.room_id, EventTypes.CanonicalAlias, ""
-            )
-        )
+        data = self._get_canonical_alias()
         self.assertNotIn("alias", data["content"])
         self.assertNotIn("alt_aliases", data["content"])
 
@@ -205,29 +215,17 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
         """Removing an alias listed as in alt_aliases should remove it there too."""
         # Create a second alias.
         other_test_alias = "#test2:test"
-        other_room_alias = RoomAlias.from_string(other_test_alias)
-        self.get_success(
-            self.store.create_room_alias_association(
-                other_room_alias, self.room_id, ["test"], self.admin_user
-            )
-        )
+        other_room_alias = self._add_alias(other_test_alias)
 
         # Set the alias as the canonical alias for this room.
-        self.helper.send_state(
-            self.room_id,
-            "m.room.canonical_alias",
+        self._set_canonical_alias(
             {
                 "alias": self.test_alias,
                 "alt_aliases": [self.test_alias, other_test_alias],
-            },
-            tok=self.admin_user_tok,
+            }
         )
 
-        data = self.get_success(
-            self.state_handler.get_current_state(
-                self.room_id, EventTypes.CanonicalAlias, ""
-            )
-        )
+        data = self._get_canonical_alias()
         self.assertEqual(data["content"]["alias"], self.test_alias)
         self.assertEqual(
             data["content"]["alt_aliases"], [self.test_alias, other_test_alias]
@@ -240,11 +238,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
             )
         )
 
-        data = self.get_success(
-            self.state_handler.get_current_state(
-                self.room_id, EventTypes.CanonicalAlias, ""
-            )
-        )
+        data = self._get_canonical_alias()
         self.assertEqual(data["content"]["alias"], self.test_alias)
         self.assertEqual(data["content"]["alt_aliases"], [self.test_alias])
 
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 2f3df5f88f..7dd86d0c27 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -1821,3 +1821,163 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(channel.code, expected_code, channel.result)
+
+
+class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        directory.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, homeserver):
+        self.room_owner = self.register_user("room_owner", "test")
+        self.room_owner_tok = self.login("room_owner", "test")
+
+        self.room_id = self.helper.create_room_as(
+            self.room_owner, tok=self.room_owner_tok
+        )
+
+        self.alias = "#alias:test"
+        self._set_alias_via_directory(self.alias)
+
+    def _set_alias_via_directory(self, alias: str, expected_code: int = 200):
+        url = "/_matrix/client/r0/directory/room/" + alias
+        data = {"room_id": self.room_id}
+        request_data = json.dumps(data)
+
+        request, channel = self.make_request(
+            "PUT", url, request_data, access_token=self.room_owner_tok
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
+
+    def _get_canonical_alias(self, expected_code: int = 200) -> JsonDict:
+        """Calls the endpoint under test. returns the json response object."""
+        request, channel = self.make_request(
+            "GET",
+            "rooms/%s/state/m.room.canonical_alias" % (self.room_id,),
+            access_token=self.room_owner_tok,
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
+        res = channel.json_body
+        self.assertIsInstance(res, dict)
+        return res
+
+    def _set_canonical_alias(self, content: str, expected_code: int = 200) -> JsonDict:
+        """Calls the endpoint under test. returns the json response object."""
+        request, channel = self.make_request(
+            "PUT",
+            "rooms/%s/state/m.room.canonical_alias" % (self.room_id,),
+            json.dumps(content),
+            access_token=self.room_owner_tok,
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
+        res = channel.json_body
+        self.assertIsInstance(res, dict)
+        return res
+
+    def test_canonical_alias(self):
+        """Test a basic alias message."""
+        # There is no canonical alias to start with.
+        self._get_canonical_alias(expected_code=404)
+
+        # Create an alias.
+        self._set_canonical_alias({"alias": self.alias})
+
+        # Canonical alias now exists!
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {"alias": self.alias})
+
+        # Now remove the alias.
+        self._set_canonical_alias({})
+
+        # There is an alias event, but it is empty.
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {})
+
+    def test_alt_aliases(self):
+        """Test a canonical alias message with alt_aliases."""
+        # Create an alias.
+        self._set_canonical_alias({"alt_aliases": [self.alias]})
+
+        # Canonical alias now exists!
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {"alt_aliases": [self.alias]})
+
+        # Now remove the alt_aliases.
+        self._set_canonical_alias({})
+
+        # There is an alias event, but it is empty.
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {})
+
+    def test_alias_alt_aliases(self):
+        """Test a canonical alias message with an alias and alt_aliases."""
+        # Create an alias.
+        self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]})
+
+        # Canonical alias now exists!
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {"alias": self.alias, "alt_aliases": [self.alias]})
+
+        # Now remove the alias and alt_aliases.
+        self._set_canonical_alias({})
+
+        # There is an alias event, but it is empty.
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {})
+
+    def test_partial_modify(self):
+        """Test removing only the alt_aliases."""
+        # Create an alias.
+        self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]})
+
+        # Canonical alias now exists!
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {"alias": self.alias, "alt_aliases": [self.alias]})
+
+        # Now remove the alt_aliases.
+        self._set_canonical_alias({"alias": self.alias})
+
+        # There is an alias event, but it is empty.
+        res = self._get_canonical_alias()
+        self.assertEqual(res, {"alias": self.alias})
+
+    def test_add_alias(self):
+        """Test removing only the alt_aliases."""
+        # Create an additional alias.
+        second_alias = "#second:test"
+        self._set_alias_via_directory(second_alias)
+
+        # Add the canonical alias.
+        self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]})
+
+        # Then add the second alias.
+        self._set_canonical_alias(
+            {"alias": self.alias, "alt_aliases": [self.alias, second_alias]}
+        )
+
+        # Canonical alias now exists!
+        res = self._get_canonical_alias()
+        self.assertEqual(
+            res, {"alias": self.alias, "alt_aliases": [self.alias, second_alias]}
+        )
+
+    def test_bad_data(self):
+        """Invalid data for alt_aliases should cause errors."""
+        self._set_canonical_alias({"alt_aliases": "@bad:test"}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": None}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": 0}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": 1}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": False}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": True}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": {}}, expected_code=400)
+
+    def test_bad_alias(self):
+        """An alias which does not point to the room raises a SynapseError."""
+        self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400)
+        self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400)
diff --git a/tests/test_types.py b/tests/test_types.py
index 8d97c751ea..480bea1bdc 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -75,7 +75,7 @@ class GroupIDTestCase(unittest.TestCase):
                 self.fail("Parsing '%s' should raise exception" % id_string)
             except SynapseError as exc:
                 self.assertEqual(400, exc.code)
-                self.assertEqual("M_UNKNOWN", exc.errcode)
+                self.assertEqual("M_INVALID_PARAM", exc.errcode)
 
 
 class MapUsernameTestCase(unittest.TestCase):
-- 
cgit 1.4.1


From fd983fad968941987314501b67147a264e2e927a Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 3 Mar 2020 14:58:37 +0000
Subject: v1.11.1

---
 CHANGES.md              | 15 +++++++++++++++
 changelog.d/6910.bugfix |  1 -
 changelog.d/6996.bugfix |  1 -
 synapse/__init__.py     |  2 +-
 4 files changed, 16 insertions(+), 3 deletions(-)
 delete mode 100644 changelog.d/6910.bugfix
 delete mode 100644 changelog.d/6996.bugfix

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index ff681762cd..dc9ca05ad1 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,18 @@
+Synapse 1.11.1 (2020-03-03)
+===========================
+
+This release includes a security fix impacting installations using Single Sign-On (i.e. SAML2 or CAS) for authentication. Administrators of such installations are encouraged to upgrade as soon as possible.
+
+The release also includes fixes for a couple of other bugs.
+
+Bugfixes
+--------
+
+- Add a confirmation step to the SSO login flow before redirecting users to the redirect URL. ([b2bd54a2](https://github.com/matrix-org/synapse/commit/b2bd54a2e31d9a248f73fadb184ae9b4cbdb49f9), [65c73cdf](https://github.com/matrix-org/synapse/commit/65c73cdfec1876a9fec2fd2c3a74923cd146fe0b), [a0178df1](https://github.com/matrix-org/synapse/commit/a0178df10422a76fd403b82d2b2a4ed28a9a9d1e))
+- Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. ([\#6910](https://github.com/matrix-org/synapse/issues/6910))
+- Fix bug introduced in Synapse 1.11.0 which sometimes caused errors when joining rooms over federation, with `'coroutine' object has no attribute 'event_id'`. ([\#6996](https://github.com/matrix-org/synapse/issues/6996))
+
+
 Synapse 1.11.0 (2020-02-21)
 ===========================
 
diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix
deleted file mode 100644
index 707f1ff7b5..0000000000
--- a/changelog.d/6910.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel.
diff --git a/changelog.d/6996.bugfix b/changelog.d/6996.bugfix
deleted file mode 100644
index 765d376c7c..0000000000
--- a/changelog.d/6996.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug which caused an error when joining a room, with `'coroutine' object has no attribute 'event_id'`.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 3406ce634f..e56ba89ff4 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.11.0"
+__version__ = "1.11.1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From 8ef8fb2c1c7c4aeb80fce4deea477b37754ce539 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Wed, 4 Mar 2020 13:11:04 +0000
Subject: Read the room version from database when fetching events (#6874)

This is a precursor to giving EventBase objects the knowledge of which room version they belong to.
---
 changelog.d/6874.misc                             |  1 +
 synapse/storage/data_stores/main/events_worker.py | 84 ++++++++++++++++++-----
 tests/replication/slave/storage/test_events.py    | 10 +++
 3 files changed, 79 insertions(+), 16 deletions(-)
 create mode 100644 changelog.d/6874.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6874.misc b/changelog.d/6874.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6874.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 47a3a26072..ca237c6f12 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -28,9 +28,12 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.api.errors import NotFoundError
-from synapse.api.room_versions import EventFormatVersions
-from synapse.events import FrozenEvent, event_type_from_format_version  # noqa: F401
-from synapse.events.snapshot import EventContext  # noqa: F401
+from synapse.api.room_versions import (
+    KNOWN_ROOM_VERSIONS,
+    EventFormatVersions,
+    RoomVersions,
+)
+from synapse.events import make_event_from_dict
 from synapse.events.utils import prune_event
 from synapse.logging.context import LoggingContext, PreserveLoggingContext
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -580,8 +583,49 @@ class EventsWorkerStore(SQLBaseStore):
                 # of a event format version, so it must be a V1 event.
                 format_version = EventFormatVersions.V1
 
-            original_ev = event_type_from_format_version(format_version)(
+            room_version_id = row["room_version_id"]
+
+            if not room_version_id:
+                # this should only happen for out-of-band membership events
+                if not internal_metadata.get("out_of_band_membership"):
+                    logger.warning(
+                        "Room %s for event %s is unknown", d["room_id"], event_id
+                    )
+                    continue
+
+                # take a wild stab at the room version based on the event format
+                if format_version == EventFormatVersions.V1:
+                    room_version = RoomVersions.V1
+                elif format_version == EventFormatVersions.V2:
+                    room_version = RoomVersions.V3
+                else:
+                    room_version = RoomVersions.V5
+            else:
+                room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
+                if not room_version:
+                    logger.error(
+                        "Event %s in room %s has unknown room version %s",
+                        event_id,
+                        d["room_id"],
+                        room_version_id,
+                    )
+                    continue
+
+                if room_version.event_format != format_version:
+                    logger.error(
+                        "Event %s in room %s with version %s has wrong format: "
+                        "expected %s, was %s",
+                        event_id,
+                        d["room_id"],
+                        room_version_id,
+                        room_version.event_format,
+                        format_version,
+                    )
+                    continue
+
+            original_ev = make_event_from_dict(
                 event_dict=d,
+                room_version=room_version,
                 internal_metadata_dict=internal_metadata,
                 rejected_reason=rejected_reason,
             )
@@ -661,6 +705,12 @@ class EventsWorkerStore(SQLBaseStore):
            of EventFormatVersions. 'None' means the event predates
            EventFormatVersions (so the event is format V1).
 
+         * room_version_id (str|None): The version of the room which contains the event.
+           Hopefully one of RoomVersions.
+
+           Due to historical reasons, there may be a few events in the database which
+           do not have an associated room; in this case None will be returned here.
+
          * rejected_reason (str|None): if the event was rejected, the reason
            why.
 
@@ -676,17 +726,18 @@ class EventsWorkerStore(SQLBaseStore):
         """
         event_dict = {}
         for evs in batch_iter(event_ids, 200):
-            sql = (
-                "SELECT "
-                " e.event_id, "
-                " e.internal_metadata,"
-                " e.json,"
-                " e.format_version, "
-                " rej.reason "
-                " FROM event_json as e"
-                " LEFT JOIN rejections as rej USING (event_id)"
-                " WHERE "
-            )
+            sql = """\
+                SELECT
+                  e.event_id,
+                  e.internal_metadata,
+                  e.json,
+                  e.format_version,
+                  r.room_version,
+                  rej.reason
+                FROM event_json as e
+                  LEFT JOIN rooms r USING (room_id)
+                  LEFT JOIN rejections as rej USING (event_id)
+                WHERE """
 
             clause, args = make_in_list_sql_clause(
                 txn.database_engine, "e.event_id", evs
@@ -701,7 +752,8 @@ class EventsWorkerStore(SQLBaseStore):
                     "internal_metadata": row[1],
                     "json": row[2],
                     "format_version": row[3],
-                    "rejected_reason": row[4],
+                    "room_version_id": row[4],
+                    "rejected_reason": row[5],
                     "redactions": [],
                 }
 
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index d31210fbe4..f0561b30e3 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -15,6 +15,7 @@ import logging
 
 from canonicaljson import encode_canonical_json
 
+from synapse.api.room_versions import RoomVersions
 from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict
 from synapse.events.snapshot import EventContext
 from synapse.handlers.room import RoomEventSource
@@ -58,6 +59,15 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)]
         return super(SlavedEventStoreTestCase, self).setUp()
 
+    def prepare(self, *args, **kwargs):
+        super().prepare(*args, **kwargs)
+
+        self.get_success(
+            self.master_store.store_room(
+                ROOM_ID, USER_ID, is_public=False, room_version=RoomVersions.V1,
+            )
+        )
+
     def tearDown(self):
         [unpatch() for unpatch in self.unpatches]
 
-- 
cgit 1.4.1


From 13892776ef7e0b1af2f82c9ca53f7bbd1c60d66f Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Wed, 4 Mar 2020 11:30:46 -0500
Subject: Allow deleting an alias if the user has sufficient power level
 (#6986)

---
 changelog.d/6986.feature         |   1 +
 synapse/api/auth.py              |   9 +--
 synapse/handlers/directory.py    | 107 ++++++++++++++++++++++----------
 tests/handlers/test_directory.py | 128 +++++++++++++++++++++++++++++++--------
 tox.ini                          |   1 +
 5 files changed, 182 insertions(+), 64 deletions(-)
 create mode 100644 changelog.d/6986.feature

(limited to 'changelog.d')

diff --git a/changelog.d/6986.feature b/changelog.d/6986.feature
new file mode 100644
index 0000000000..16dea8bd7f
--- /dev/null
+++ b/changelog.d/6986.feature
@@ -0,0 +1 @@
+Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 5ca18b4301..c1ade1333b 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -539,7 +539,7 @@ class Auth(object):
 
     @defer.inlineCallbacks
     def check_can_change_room_list(self, room_id: str, user: UserID):
-        """Check if the user is allowed to edit the room's entry in the
+        """Determine whether the user is allowed to edit the room's entry in the
         published room list.
 
         Args:
@@ -570,12 +570,7 @@ class Auth(object):
         )
         user_level = event_auth.get_user_power_level(user_id, auth_events)
 
-        if user_level < send_level:
-            raise AuthError(
-                403,
-                "This server requires you to be a moderator in the room to"
-                " edit its room list entry",
-            )
+        return user_level >= send_level
 
     @staticmethod
     def has_access_token(request):
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 61eb49059b..1d842c369b 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -15,7 +15,7 @@
 
 import logging
 import string
-from typing import List
+from typing import Iterable, List, Optional
 
 from twisted.internet import defer
 
@@ -28,6 +28,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
+from synapse.appservice import ApplicationService
 from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
 
 from ._base import BaseHandler
@@ -55,7 +56,13 @@ class DirectoryHandler(BaseHandler):
         self.spam_checker = hs.get_spam_checker()
 
     @defer.inlineCallbacks
-    def _create_association(self, room_alias, room_id, servers=None, creator=None):
+    def _create_association(
+        self,
+        room_alias: RoomAlias,
+        room_id: str,
+        servers: Optional[Iterable[str]] = None,
+        creator: Optional[str] = None,
+    ):
         # general association creation for both human users and app services
 
         for wchar in string.whitespace:
@@ -81,17 +88,21 @@ class DirectoryHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def create_association(
-        self, requester, room_alias, room_id, servers=None, check_membership=True,
+        self,
+        requester: Requester,
+        room_alias: RoomAlias,
+        room_id: str,
+        servers: Optional[List[str]] = None,
+        check_membership: bool = True,
     ):
         """Attempt to create a new alias
 
         Args:
-            requester (Requester)
-            room_alias (RoomAlias)
-            room_id (str)
-            servers (list[str]|None): List of servers that others servers
-                should try and join via
-            check_membership (bool): Whether to check if the user is in the room
+            requester
+            room_alias
+            room_id
+            servers: Iterable of servers that others servers should try and join via
+            check_membership: Whether to check if the user is in the room
                 before the alias can be set (if the server's config requires it).
 
         Returns:
@@ -145,15 +156,15 @@ class DirectoryHandler(BaseHandler):
         yield self._create_association(room_alias, room_id, servers, creator=user_id)
 
     @defer.inlineCallbacks
-    def delete_association(self, requester, room_alias):
+    def delete_association(self, requester: Requester, room_alias: RoomAlias):
         """Remove an alias from the directory
 
         (this is only meant for human users; AS users should call
         delete_appservice_association)
 
         Args:
-            requester (Requester):
-            room_alias (RoomAlias):
+            requester
+            room_alias
 
         Returns:
             Deferred[unicode]: room id that the alias used to point to
@@ -189,16 +200,16 @@ class DirectoryHandler(BaseHandler):
         room_id = yield self._delete_association(room_alias)
 
         try:
-            yield self._update_canonical_alias(
-                requester, requester.user.to_string(), room_id, room_alias
-            )
+            yield self._update_canonical_alias(requester, user_id, room_id, room_alias)
         except AuthError as e:
             logger.info("Failed to update alias events: %s", e)
 
         return room_id
 
     @defer.inlineCallbacks
-    def delete_appservice_association(self, service, room_alias):
+    def delete_appservice_association(
+        self, service: ApplicationService, room_alias: RoomAlias
+    ):
         if not service.is_interested_in_alias(room_alias.to_string()):
             raise SynapseError(
                 400,
@@ -208,7 +219,7 @@ class DirectoryHandler(BaseHandler):
         yield self._delete_association(room_alias)
 
     @defer.inlineCallbacks
-    def _delete_association(self, room_alias):
+    def _delete_association(self, room_alias: RoomAlias):
         if not self.hs.is_mine(room_alias):
             raise SynapseError(400, "Room alias must be local")
 
@@ -217,7 +228,7 @@ class DirectoryHandler(BaseHandler):
         return room_id
 
     @defer.inlineCallbacks
-    def get_association(self, room_alias):
+    def get_association(self, room_alias: RoomAlias):
         room_id = None
         if self.hs.is_mine(room_alias):
             result = yield self.get_association_from_room_alias(room_alias)
@@ -282,7 +293,9 @@ class DirectoryHandler(BaseHandler):
             )
 
     @defer.inlineCallbacks
-    def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
+    def _update_canonical_alias(
+        self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
+    ):
         """
         Send an updated canonical alias event if the removed alias was set as
         the canonical alias or listed in the alt_aliases field.
@@ -331,7 +344,7 @@ class DirectoryHandler(BaseHandler):
             )
 
     @defer.inlineCallbacks
-    def get_association_from_room_alias(self, room_alias):
+    def get_association_from_room_alias(self, room_alias: RoomAlias):
         result = yield self.store.get_association_from_room_alias(room_alias)
         if not result:
             # Query AS to see if it exists
@@ -339,7 +352,7 @@ class DirectoryHandler(BaseHandler):
             result = yield as_handler.query_room_alias_exists(room_alias)
         return result
 
-    def can_modify_alias(self, alias, user_id=None):
+    def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None):
         # Any application service "interested" in an alias they are regexing on
         # can modify the alias.
         # Users can only modify the alias if ALL the interested services have
@@ -360,22 +373,42 @@ class DirectoryHandler(BaseHandler):
         return defer.succeed(True)
 
     @defer.inlineCallbacks
-    def _user_can_delete_alias(self, alias, user_id):
+    def _user_can_delete_alias(self, alias: RoomAlias, user_id: str):
+        """Determine whether a user can delete an alias.
+
+        One of the following must be true:
+
+        1. The user created the alias.
+        2. The user is a server administrator.
+        3. The user has a power-level sufficient to send a canonical alias event
+           for the current room.
+
+        """
         creator = yield self.store.get_room_alias_creator(alias.to_string())
 
         if creator is not None and creator == user_id:
             return True
 
-        is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
-        return is_admin
+        # Resolve the alias to the corresponding room.
+        room_mapping = yield self.get_association(alias)
+        room_id = room_mapping["room_id"]
+        if not room_id:
+            return False
+
+        res = yield self.auth.check_can_change_room_list(
+            room_id, UserID.from_string(user_id)
+        )
+        return res
 
     @defer.inlineCallbacks
-    def edit_published_room_list(self, requester, room_id, visibility):
+    def edit_published_room_list(
+        self, requester: Requester, room_id: str, visibility: str
+    ):
         """Edit the entry of the room in the published room list.
 
         requester
-        room_id (str)
-        visibility (str): "public" or "private"
+        room_id
+        visibility: "public" or "private"
         """
         user_id = requester.user.to_string()
 
@@ -400,7 +433,15 @@ class DirectoryHandler(BaseHandler):
         if room is None:
             raise SynapseError(400, "Unknown room")
 
-        yield self.auth.check_can_change_room_list(room_id, requester.user)
+        can_change_room_list = yield self.auth.check_can_change_room_list(
+            room_id, requester.user
+        )
+        if not can_change_room_list:
+            raise AuthError(
+                403,
+                "This server requires you to be a moderator in the room to"
+                " edit its room list entry",
+            )
 
         making_public = visibility == "public"
         if making_public:
@@ -421,16 +462,16 @@ class DirectoryHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def edit_published_appservice_room_list(
-        self, appservice_id, network_id, room_id, visibility
+        self, appservice_id: str, network_id: str, room_id: str, visibility: str
     ):
         """Add or remove a room from the appservice/network specific public
         room list.
 
         Args:
-            appservice_id (str): ID of the appservice that owns the list
-            network_id (str): The ID of the network the list is associated with
-            room_id (str)
-            visibility (str): either "public" or "private"
+            appservice_id: ID of the appservice that owns the list
+            network_id: The ID of the network the list is associated with
+            room_id
+            visibility: either "public" or "private"
         """
         if visibility not in ["public", "private"]:
             raise SynapseError(400, "Invalid visibility setting")
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 3397cfa485..5e40adba52 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -18,6 +18,7 @@ from mock import Mock
 
 from twisted.internet import defer
 
+import synapse
 import synapse.api.errors
 from synapse.api.constants import EventTypes
 from synapse.config.room_directory import RoomDirectoryConfig
@@ -87,52 +88,131 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
             ignore_backoff=True,
         )
 
-    def test_delete_alias_not_allowed(self):
-        """Removing an alias should be denied if a user does not have the proper permissions."""
-        room_id = "!8765qwer:test"
+    def test_incoming_fed_query(self):
+        self.get_success(
+            self.store.create_room_alias_association(
+                self.your_room, "!8765asdf:test", ["test"]
+            )
+        )
+
+        response = self.get_success(
+            self.handler.on_directory_query({"room_alias": "#your-room:test"})
+        )
+
+        self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
+
+
+class TestDeleteAlias(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        directory.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+        self.handler = hs.get_handlers().directory_handler
+        self.state_handler = hs.get_state_handler()
+
+        # Create user
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        # Create a test room
+        self.room_id = self.helper.create_room_as(
+            self.admin_user, tok=self.admin_user_tok
+        )
+
+        self.test_alias = "#test:test"
+        self.room_alias = RoomAlias.from_string(self.test_alias)
+
+        # Create a test user.
+        self.test_user = self.register_user("user", "pass", admin=False)
+        self.test_user_tok = self.login("user", "pass")
+        self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok)
+
+    def _create_alias(self, user):
+        # Create a new alias to this room.
         self.get_success(
-            self.store.create_room_alias_association(self.my_room, room_id, ["test"])
+            self.store.create_room_alias_association(
+                self.room_alias, self.room_id, ["test"], user
+            )
         )
 
+    def test_delete_alias_not_allowed(self):
+        """A user that doesn't meet the expected guidelines cannot delete an alias."""
+        self._create_alias(self.admin_user)
         self.get_failure(
             self.handler.delete_association(
-                create_requester("@user:test"), self.my_room
+                create_requester(self.test_user), self.room_alias
             ),
             synapse.api.errors.AuthError,
         )
 
-    def test_delete_alias(self):
-        """Removing an alias should work when a user does has the proper permissions."""
-        room_id = "!8765qwer:test"
-        user_id = "@user:test"
-        self.get_success(
-            self.store.create_room_alias_association(
-                self.my_room, room_id, ["test"], user_id
+    def test_delete_alias_creator(self):
+        """An alias creator can delete their own alias."""
+        # Create an alias from a different user.
+        self._create_alias(self.test_user)
+
+        # Delete the user's alias.
+        result = self.get_success(
+            self.handler.delete_association(
+                create_requester(self.test_user), self.room_alias
             )
         )
+        self.assertEquals(self.room_id, result)
 
+        # Confirm the alias is gone.
+        self.get_failure(
+            self.handler.get_association(self.room_alias),
+            synapse.api.errors.SynapseError,
+        )
+
+    def test_delete_alias_admin(self):
+        """A server admin can delete an alias created by another user."""
+        # Create an alias from a different user.
+        self._create_alias(self.test_user)
+
+        # Delete the user's alias as the admin.
         result = self.get_success(
-            self.handler.delete_association(create_requester(user_id), self.my_room)
+            self.handler.delete_association(
+                create_requester(self.admin_user), self.room_alias
+            )
         )
-        self.assertEquals(room_id, result)
+        self.assertEquals(self.room_id, result)
 
-        # The alias should not be found.
+        # Confirm the alias is gone.
         self.get_failure(
-            self.handler.get_association(self.my_room), synapse.api.errors.SynapseError
+            self.handler.get_association(self.room_alias),
+            synapse.api.errors.SynapseError,
         )
 
-    def test_incoming_fed_query(self):
-        self.get_success(
-            self.store.create_room_alias_association(
-                self.your_room, "!8765asdf:test", ["test"]
-            )
+    def test_delete_alias_sufficient_power(self):
+        """A user with a sufficient power level should be able to delete an alias."""
+        self._create_alias(self.admin_user)
+
+        # Increase the user's power level.
+        self.helper.send_state(
+            self.room_id,
+            "m.room.power_levels",
+            {"users": {self.test_user: 100}},
+            tok=self.admin_user_tok,
         )
 
-        response = self.get_success(
-            self.handler.on_directory_query({"room_alias": "#your-room:test"})
+        # They can now delete the alias.
+        result = self.get_success(
+            self.handler.delete_association(
+                create_requester(self.test_user), self.room_alias
+            )
         )
+        self.assertEquals(self.room_id, result)
 
-        self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
+        # Confirm the alias is gone.
+        self.get_failure(
+            self.handler.get_association(self.room_alias),
+            synapse.api.errors.SynapseError,
+        )
 
 
 class CanonicalAliasTestCase(unittest.HomeserverTestCase):
diff --git a/tox.ini b/tox.ini
index 097ebb8774..7622aa19f1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -185,6 +185,7 @@ commands = mypy \
             synapse/federation/federation_client.py \
             synapse/federation/sender \
             synapse/federation/transport \
+            synapse/handlers/directory.py \
             synapse/handlers/presence.py \
             synapse/handlers/sync.py \
             synapse/handlers/ui_auth \
-- 
cgit 1.4.1


From 83b6c69d3d0c6249610ed33a86f3d0526334089c Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 4 Mar 2020 17:29:09 +0000
Subject: Changelog

---
 changelog.d/7035.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7035.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7035.bugfix b/changelog.d/7035.bugfix
new file mode 100644
index 0000000000..56292dc8ac
--- /dev/null
+++ b/changelog.d/7035.bugfix
@@ -0,0 +1 @@
+Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`.
-- 
cgit 1.4.1


From 78a15b1f9d3ba3aca49dc4332e86203180d5c863 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 5 Mar 2020 15:46:44 +0000
Subject: Store room_versions in EventBase objects (#6875)

This is a bit fiddly because it all has to be done on one fell swoop:

* Wherever we create a new event, pass in the room version (and check it matches the format version)
* When we prune an event, use the room version of the unpruned event to create the pruned version.
* When we pass an event over the replication protocol, pass the room version over alongside it, and use it when deserialising the event again.
---
 changelog.d/6875.misc                  |  1 +
 synapse/events/__init__.py             | 53 +++++++++++++++++++++++++---------
 synapse/events/utils.py                | 14 +++------
 synapse/replication/http/federation.py | 13 ++++++---
 synapse/replication/http/send_event.py | 14 ++++++---
 5 files changed, 63 insertions(+), 32 deletions(-)
 create mode 100644 changelog.d/6875.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6875.misc b/changelog.d/6875.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6875.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 7307116556..533ba327f5 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -15,9 +15,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import abc
 import os
 from distutils.util import strtobool
-from typing import Optional, Type
+from typing import Dict, Optional, Type
 
 import six
 
@@ -199,15 +200,25 @@ class _EventInternalMetadata(object):
         return self._dict.get("redacted", False)
 
 
-class EventBase(object):
+class EventBase(metaclass=abc.ABCMeta):
+    @property
+    @abc.abstractmethod
+    def format_version(self) -> int:
+        """The EventFormatVersion implemented by this event"""
+        ...
+
     def __init__(
         self,
-        event_dict,
-        signatures={},
-        unsigned={},
-        internal_metadata_dict={},
-        rejected_reason=None,
+        event_dict: JsonDict,
+        room_version: RoomVersion,
+        signatures: Dict[str, Dict[str, str]],
+        unsigned: JsonDict,
+        internal_metadata_dict: JsonDict,
+        rejected_reason: Optional[str],
     ):
+        assert room_version.event_format == self.format_version
+
+        self.room_version = room_version
         self.signatures = signatures
         self.unsigned = unsigned
         self.rejected_reason = rejected_reason
@@ -303,7 +314,13 @@ class EventBase(object):
 class FrozenEvent(EventBase):
     format_version = EventFormatVersions.V1  # All events of this type are V1
 
-    def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
+    def __init__(
+        self,
+        event_dict: JsonDict,
+        room_version: RoomVersion,
+        internal_metadata_dict: JsonDict = {},
+        rejected_reason: Optional[str] = None,
+    ):
         event_dict = dict(event_dict)
 
         # Signatures is a dict of dicts, and this is faster than doing a
@@ -326,8 +343,9 @@ class FrozenEvent(EventBase):
 
         self._event_id = event_dict["event_id"]
 
-        super(FrozenEvent, self).__init__(
+        super().__init__(
             frozen_dict,
+            room_version=room_version,
             signatures=signatures,
             unsigned=unsigned,
             internal_metadata_dict=internal_metadata_dict,
@@ -352,7 +370,13 @@ class FrozenEvent(EventBase):
 class FrozenEventV2(EventBase):
     format_version = EventFormatVersions.V2  # All events of this type are V2
 
-    def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
+    def __init__(
+        self,
+        event_dict: JsonDict,
+        room_version: RoomVersion,
+        internal_metadata_dict: JsonDict = {},
+        rejected_reason: Optional[str] = None,
+    ):
         event_dict = dict(event_dict)
 
         # Signatures is a dict of dicts, and this is faster than doing a
@@ -377,8 +401,9 @@ class FrozenEventV2(EventBase):
 
         self._event_id = None
 
-        super(FrozenEventV2, self).__init__(
+        super().__init__(
             frozen_dict,
+            room_version=room_version,
             signatures=signatures,
             unsigned=unsigned,
             internal_metadata_dict=internal_metadata_dict,
@@ -445,7 +470,7 @@ class FrozenEventV3(FrozenEventV2):
         return self._event_id
 
 
-def event_type_from_format_version(format_version: int) -> Type[EventBase]:
+def _event_type_from_format_version(format_version: int) -> Type[EventBase]:
     """Returns the python type to use to construct an Event object for the
     given event format version.
 
@@ -474,5 +499,5 @@ def make_event_from_dict(
     rejected_reason: Optional[str] = None,
 ) -> EventBase:
     """Construct an EventBase from the given event dict"""
-    event_type = event_type_from_format_version(room_version.event_format)
-    return event_type(event_dict, internal_metadata_dict, rejected_reason)
+    event_type = _event_type_from_format_version(room_version.event_format)
+    return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index f70f5032fb..bc6f98ae3b 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -35,26 +35,20 @@ from . import EventBase
 SPLIT_FIELD_REGEX = re.compile(r"(? EventBase:
     """ Returns a pruned version of the given event, which removes all keys we
     don't know about or think could potentially be dodgy.
 
     This is used when we "redact" an event. We want to remove all fields that
     the user has specified, but we do want to keep necessary information like
     type, state_key etc.
-
-    Args:
-        event (FrozenEvent)
-
-    Returns:
-        FrozenEvent
     """
     pruned_event_dict = prune_event_dict(event.get_dict())
 
-    from . import event_type_from_format_version
+    from . import make_event_from_dict
 
-    pruned_event = event_type_from_format_version(event.format_version)(
-        pruned_event_dict, event.internal_metadata.get_dict()
+    pruned_event = make_event_from_dict(
+        pruned_event_dict, event.room_version, event.internal_metadata.get_dict()
     )
 
     # Mark the event as redacted
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 8794720101..7e23b565b9 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -18,7 +18,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.events import event_type_from_format_version
+from synapse.events import make_event_from_dict
 from synapse.events.snapshot import EventContext
 from synapse.http.servlet import parse_json_object_from_request
 from synapse.replication.http._base import ReplicationEndpoint
@@ -38,6 +38,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
         {
             "events": [{
                 "event": { .. serialized event .. },
+                "room_version": .., // "1", "2", "3", etc: the version of the room
+                                    // containing the event
+                "event_format_version": .., // 1,2,3 etc: the event format version
                 "internal_metadata": { .. serialized internal_metadata .. },
                 "rejected_reason": ..,   // The event.rejected_reason field
                 "context": { .. serialized event context .. },
@@ -73,6 +76,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
             event_payloads.append(
                 {
                     "event": event.get_pdu_json(),
+                    "room_version": event.room_version.identifier,
                     "event_format_version": event.format_version,
                     "internal_metadata": event.internal_metadata.get_dict(),
                     "rejected_reason": event.rejected_reason,
@@ -95,12 +99,13 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
             event_and_contexts = []
             for event_payload in event_payloads:
                 event_dict = event_payload["event"]
-                format_ver = event_payload["event_format_version"]
+                room_ver = KNOWN_ROOM_VERSIONS[event_payload["room_version"]]
                 internal_metadata = event_payload["internal_metadata"]
                 rejected_reason = event_payload["rejected_reason"]
 
-                EventType = event_type_from_format_version(format_ver)
-                event = EventType(event_dict, internal_metadata, rejected_reason)
+                event = make_event_from_dict(
+                    event_dict, room_ver, internal_metadata, rejected_reason
+                )
 
                 context = EventContext.deserialize(
                     self.storage, event_payload["context"]
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 84b92f16ad..b74b088ff4 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -17,7 +17,8 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.events import event_type_from_format_version
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import make_event_from_dict
 from synapse.events.snapshot import EventContext
 from synapse.http.servlet import parse_json_object_from_request
 from synapse.replication.http._base import ReplicationEndpoint
@@ -37,6 +38,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
 
         {
             "event": { .. serialized event .. },
+            "room_version": .., // "1", "2", "3", etc: the version of the room
+                                // containing the event
+            "event_format_version": .., // 1,2,3 etc: the event format version
             "internal_metadata": { .. serialized internal_metadata .. },
             "rejected_reason": ..,   // The event.rejected_reason field
             "context": { .. serialized event context .. },
@@ -77,6 +81,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
 
         payload = {
             "event": event.get_pdu_json(),
+            "room_version": event.room_version.identifier,
             "event_format_version": event.format_version,
             "internal_metadata": event.internal_metadata.get_dict(),
             "rejected_reason": event.rejected_reason,
@@ -93,12 +98,13 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             content = parse_json_object_from_request(request)
 
             event_dict = content["event"]
-            format_ver = content["event_format_version"]
+            room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]]
             internal_metadata = content["internal_metadata"]
             rejected_reason = content["rejected_reason"]
 
-            EventType = event_type_from_format_version(format_ver)
-            event = EventType(event_dict, internal_metadata, rejected_reason)
+            event = make_event_from_dict(
+                event_dict, room_ver, internal_metadata, rejected_reason
+            )
 
             requester = Requester.deserialize(self.store, content["requester"])
             context = EventContext.deserialize(self.storage, content["context"])
-- 
cgit 1.4.1


From 87972f07e5da0760ca5e11e62b1bda8c49f6f606 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 5 Mar 2020 11:29:56 -0500
Subject: Convert remote key resource REST layer to async/await. (#7020)

---
 changelog.d/7020.misc                      |  1 +
 synapse/rest/key/v2/remote_key_resource.py | 11 ++++-------
 2 files changed, 5 insertions(+), 7 deletions(-)
 create mode 100644 changelog.d/7020.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7020.misc b/changelog.d/7020.misc
new file mode 100644
index 0000000000..188b4378cb
--- /dev/null
+++ b/changelog.d/7020.misc
@@ -0,0 +1 @@
+Port `synapse.rest.keys` to async/await.
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 4b6d030a57..ab671f7334 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -18,8 +18,6 @@ from typing import Dict, Set
 from canonicaljson import encode_canonical_json, json
 from signedjson.sign import sign_json
 
-from twisted.internet import defer
-
 from synapse.api.errors import Codes, SynapseError
 from synapse.crypto.keyring import ServerKeyFetcher
 from synapse.http.server import (
@@ -125,8 +123,7 @@ class RemoteKey(DirectServeResource):
 
         await self.query_keys(request, query, query_remote_on_cache_miss=True)
 
-    @defer.inlineCallbacks
-    def query_keys(self, request, query, query_remote_on_cache_miss=False):
+    async def query_keys(self, request, query, query_remote_on_cache_miss=False):
         logger.info("Handling query for keys %r", query)
 
         store_queries = []
@@ -143,7 +140,7 @@ class RemoteKey(DirectServeResource):
             for key_id in key_ids:
                 store_queries.append((server_name, key_id, None))
 
-        cached = yield self.store.get_server_keys_json(store_queries)
+        cached = await self.store.get_server_keys_json(store_queries)
 
         json_results = set()
 
@@ -215,8 +212,8 @@ class RemoteKey(DirectServeResource):
                     json_results.add(bytes(result["key_json"]))
 
         if cache_misses and query_remote_on_cache_miss:
-            yield self.fetcher.get_keys(cache_misses)
-            yield self.query_keys(request, query, query_remote_on_cache_miss=False)
+            await self.fetcher.get_keys(cache_misses)
+            await self.query_keys(request, query, query_remote_on_cache_miss=False)
         else:
             signed_keys = []
             for key_json in json_results:
-- 
cgit 1.4.1


From a27056d539724614e960f3da3c2e3443aa8625ad Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 6 Mar 2020 11:06:47 +0000
Subject: Changelog

---
 changelog.d/7045.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7045.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7045.misc b/changelog.d/7045.misc
new file mode 100644
index 0000000000..74c1abea56
--- /dev/null
+++ b/changelog.d/7045.misc
@@ -0,0 +1 @@
+Add a type check to `is_verified` when processing room keys.
-- 
cgit 1.4.1


From 1d66dce83e58827aae12080552edeaeb357b1997 Mon Sep 17 00:00:00 2001
From: Neil Johnson 
Date: Fri, 6 Mar 2020 18:14:19 +0000
Subject: Break down monthly active users by appservice_id  (#7030)

* Break down monthly active users by appservice_id and emit via prometheus.

Co-authored-by: Brendan Abolivier 
---
 changelog.d/7030.feature                           |  1 +
 synapse/app/homeserver.py                          | 13 +++++++
 .../data_stores/main/monthly_active_users.py       | 32 ++++++++++++++++-
 tests/storage/test_monthly_active_users.py         | 42 ++++++++++++++++++++++
 4 files changed, 87 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7030.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7030.feature b/changelog.d/7030.feature
new file mode 100644
index 0000000000..fcfdb8d8a1
--- /dev/null
+++ b/changelog.d/7030.feature
@@ -0,0 +1 @@
+Break down monthly active users by `appservice_id` and emit via Prometheus.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index c2a334a2b0..e0fdddfdc9 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -298,6 +298,11 @@ class SynapseHomeServer(HomeServer):
 
 # Gauges to expose monthly active user control metrics
 current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
+current_mau_by_service_gauge = Gauge(
+    "synapse_admin_mau_current_mau_by_service",
+    "Current MAU by service",
+    ["app_service"],
+)
 max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
 registered_reserved_users_mau_gauge = Gauge(
     "synapse_admin_mau:registered_reserved_users",
@@ -585,12 +590,20 @@ def run(hs):
     @defer.inlineCallbacks
     def generate_monthly_active_users():
         current_mau_count = 0
+        current_mau_count_by_service = {}
         reserved_users = ()
         store = hs.get_datastore()
         if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
             current_mau_count = yield store.get_monthly_active_count()
+            current_mau_count_by_service = (
+                yield store.get_monthly_active_count_by_service()
+            )
             reserved_users = yield store.get_registered_reserved_users()
         current_mau_gauge.set(float(current_mau_count))
+
+        for app_service, count in current_mau_count_by_service.items():
+            current_mau_by_service_gauge.labels(app_service).set(float(count))
+
         registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
         max_mau_gauge.set(float(hs.config.max_mau_value))
 
diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py
index 1507a14e09..925bc5691b 100644
--- a/synapse/storage/data_stores/main/monthly_active_users.py
+++ b/synapse/storage/data_stores/main/monthly_active_users.py
@@ -43,13 +43,40 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
 
         def _count_users(txn):
             sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
-
             txn.execute(sql)
             (count,) = txn.fetchone()
             return count
 
         return self.db.runInteraction("count_users", _count_users)
 
+    @cached(num_args=0)
+    def get_monthly_active_count_by_service(self):
+        """Generates current count of monthly active users broken down by service.
+        A service is typically an appservice but also includes native matrix users.
+        Since the `monthly_active_users` table is populated from the `user_ips` table
+        `config.track_appservice_user_ips` must be set to `true` for this
+        method to return anything other than native matrix users.
+
+        Returns:
+            Deferred[dict]: dict that includes a mapping between app_service_id
+                and the number of occurrences.
+
+        """
+
+        def _count_users_by_service(txn):
+            sql = """
+                SELECT COALESCE(appservice_id, 'native'), COALESCE(count(*), 0)
+                FROM monthly_active_users
+                LEFT JOIN users ON monthly_active_users.user_id=users.name
+                GROUP BY appservice_id;
+            """
+
+            txn.execute(sql)
+            result = txn.fetchall()
+            return dict(result)
+
+        return self.db.runInteraction("count_users_by_service", _count_users_by_service)
+
     @defer.inlineCallbacks
     def get_registered_reserved_users(self):
         """Of the reserved threepids defined in config, which are associated
@@ -291,6 +318,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         )
 
         self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+        self._invalidate_cache_and_stream(
+            txn, self.get_monthly_active_count_by_service, ()
+        )
         self._invalidate_cache_and_stream(
             txn, self.user_last_seen_monthly_active, (user_id,)
         )
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 3c78faab45..bc53bf0951 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -303,3 +303,45 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         self.pump()
 
         self.store.upsert_monthly_active_user.assert_not_called()
+
+    def test_get_monthly_active_count_by_service(self):
+        appservice1_user1 = "@appservice1_user1:example.com"
+        appservice1_user2 = "@appservice1_user2:example.com"
+
+        appservice2_user1 = "@appservice2_user1:example.com"
+        native_user1 = "@native_user1:example.com"
+
+        service1 = "service1"
+        service2 = "service2"
+        native = "native"
+
+        self.store.register_user(
+            user_id=appservice1_user1, password_hash=None, appservice_id=service1
+        )
+        self.store.register_user(
+            user_id=appservice1_user2, password_hash=None, appservice_id=service1
+        )
+        self.store.register_user(
+            user_id=appservice2_user1, password_hash=None, appservice_id=service2
+        )
+        self.store.register_user(user_id=native_user1, password_hash=None)
+        self.pump()
+
+        count = self.store.get_monthly_active_count_by_service()
+        self.assertEqual({}, self.get_success(count))
+
+        self.store.upsert_monthly_active_user(native_user1)
+        self.store.upsert_monthly_active_user(appservice1_user1)
+        self.store.upsert_monthly_active_user(appservice1_user2)
+        self.store.upsert_monthly_active_user(appservice2_user1)
+        self.pump()
+
+        count = self.store.get_monthly_active_count()
+        self.assertEqual(4, self.get_success(count))
+
+        count = self.store.get_monthly_active_count_by_service()
+        result = self.get_success(count)
+
+        self.assertEqual(2, result[service1])
+        self.assertEqual(1, result[service2])
+        self.assertEqual(1, result[native])
-- 
cgit 1.4.1


From 2bff4457d9a40ffdd8ae1b5d1249a5e78fb8da01 Mon Sep 17 00:00:00 2001
From: Neil Pilgrim 
Date: Sat, 7 Mar 2020 09:57:26 -0800
Subject: Add type hints to logging/context.py (#6309)

* Add type hints to logging/context.py

Signed-off-by: neiljp (Neil Pilgrim) 
---
 changelog.d/6309.misc      |   1 +
 synapse/logging/context.py | 121 +++++++++++++++++++++++++++------------------
 2 files changed, 75 insertions(+), 47 deletions(-)
 create mode 100644 changelog.d/6309.misc

(limited to 'changelog.d')

diff --git a/changelog.d/6309.misc b/changelog.d/6309.misc
new file mode 100644
index 0000000000..1aa7294617
--- /dev/null
+++ b/changelog.d/6309.misc
@@ -0,0 +1 @@
+Add type hints to `logging/context.py`.
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 1b940842f6..1eccc0e83f 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -27,10 +27,15 @@ import inspect
 import logging
 import threading
 import types
-from typing import Any, List
+from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union
+
+from typing_extensions import Literal
 
 from twisted.internet import defer, threads
 
+if TYPE_CHECKING:
+    from synapse.logging.scopecontextmanager import _LogContextScope
+
 logger = logging.getLogger(__name__)
 
 try:
@@ -91,7 +96,7 @@ class ContextResourceUsage(object):
         "evt_db_fetch_count",
     ]
 
-    def __init__(self, copy_from=None):
+    def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None:
         """Create a new ContextResourceUsage
 
         Args:
@@ -101,27 +106,28 @@ class ContextResourceUsage(object):
         if copy_from is None:
             self.reset()
         else:
-            self.ru_utime = copy_from.ru_utime
-            self.ru_stime = copy_from.ru_stime
-            self.db_txn_count = copy_from.db_txn_count
+            # FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now
+            self.ru_utime = copy_from.ru_utime  # type: float
+            self.ru_stime = copy_from.ru_stime  # type: float
+            self.db_txn_count = copy_from.db_txn_count  # type: int
 
-            self.db_txn_duration_sec = copy_from.db_txn_duration_sec
-            self.db_sched_duration_sec = copy_from.db_sched_duration_sec
-            self.evt_db_fetch_count = copy_from.evt_db_fetch_count
+            self.db_txn_duration_sec = copy_from.db_txn_duration_sec  # type: float
+            self.db_sched_duration_sec = copy_from.db_sched_duration_sec  # type: float
+            self.evt_db_fetch_count = copy_from.evt_db_fetch_count  # type: int
 
-    def copy(self):
+    def copy(self) -> "ContextResourceUsage":
         return ContextResourceUsage(copy_from=self)
 
-    def reset(self):
+    def reset(self) -> None:
         self.ru_stime = 0.0
         self.ru_utime = 0.0
         self.db_txn_count = 0
 
-        self.db_txn_duration_sec = 0
-        self.db_sched_duration_sec = 0
+        self.db_txn_duration_sec = 0.0
+        self.db_sched_duration_sec = 0.0
         self.evt_db_fetch_count = 0
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return (
             " "ContextResourceUsage":
         """Add another ContextResourceUsage's stats to this one's.
 
         Args:
@@ -149,7 +155,7 @@ class ContextResourceUsage(object):
         self.evt_db_fetch_count += other.evt_db_fetch_count
         return self
 
-    def __isub__(self, other):
+    def __isub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
         self.ru_utime -= other.ru_utime
         self.ru_stime -= other.ru_stime
         self.db_txn_count -= other.db_txn_count
@@ -158,17 +164,20 @@ class ContextResourceUsage(object):
         self.evt_db_fetch_count -= other.evt_db_fetch_count
         return self
 
-    def __add__(self, other):
+    def __add__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
         res = ContextResourceUsage(copy_from=self)
         res += other
         return res
 
-    def __sub__(self, other):
+    def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
         res = ContextResourceUsage(copy_from=self)
         res -= other
         return res
 
 
+LoggingContextOrSentinel = Union["LoggingContext", "LoggingContext.Sentinel"]
+
+
 class LoggingContext(object):
     """Additional context for log formatting. Contexts are scoped within a
     "with" block.
@@ -201,7 +210,14 @@ class LoggingContext(object):
     class Sentinel(object):
         """Sentinel to represent the root context"""
 
-        __slots__ = []  # type: List[Any]
+        __slots__ = ["previous_context", "alive", "request", "scope"]
+
+        def __init__(self) -> None:
+            # Minimal set for compatibility with LoggingContext
+            self.previous_context = None
+            self.alive = None
+            self.request = None
+            self.scope = None
 
         def __str__(self):
             return "sentinel"
@@ -235,7 +251,7 @@ class LoggingContext(object):
 
     sentinel = Sentinel()
 
-    def __init__(self, name=None, parent_context=None, request=None):
+    def __init__(self, name=None, parent_context=None, request=None) -> None:
         self.previous_context = LoggingContext.current_context()
         self.name = name
 
@@ -250,7 +266,7 @@ class LoggingContext(object):
         self.request = None
         self.tag = ""
         self.alive = True
-        self.scope = None
+        self.scope = None  # type: Optional[_LogContextScope]
 
         self.parent_context = parent_context
 
@@ -261,13 +277,13 @@ class LoggingContext(object):
             # the request param overrides the request from the parent context
             self.request = request
 
-    def __str__(self):
+    def __str__(self) -> str:
         if self.request:
             return str(self.request)
         return "%s@%x" % (self.name, id(self))
 
     @classmethod
-    def current_context(cls):
+    def current_context(cls) -> LoggingContextOrSentinel:
         """Get the current logging context from thread local storage
 
         Returns:
@@ -276,7 +292,9 @@ class LoggingContext(object):
         return getattr(cls.thread_local, "current_context", cls.sentinel)
 
     @classmethod
-    def set_current_context(cls, context):
+    def set_current_context(
+        cls, context: LoggingContextOrSentinel
+    ) -> LoggingContextOrSentinel:
         """Set the current logging context in thread local storage
         Args:
             context(LoggingContext): The context to activate.
@@ -291,7 +309,7 @@ class LoggingContext(object):
             context.start()
         return current
 
-    def __enter__(self):
+    def __enter__(self) -> "LoggingContext":
         """Enters this logging context into thread local storage"""
         old_context = self.set_current_context(self)
         if self.previous_context != old_context:
@@ -304,7 +322,7 @@ class LoggingContext(object):
 
         return self
 
-    def __exit__(self, type, value, traceback):
+    def __exit__(self, type, value, traceback) -> None:
         """Restore the logging context in thread local storage to the state it
         was before this context was entered.
         Returns:
@@ -318,7 +336,6 @@ class LoggingContext(object):
                 logger.warning(
                     "Expected logging context %s but found %s", self, current
                 )
-        self.previous_context = None
         self.alive = False
 
         # if we have a parent, pass our CPU usage stats on
@@ -330,7 +347,7 @@ class LoggingContext(object):
             # reset them in case we get entered again
             self._resource_usage.reset()
 
-    def copy_to(self, record):
+    def copy_to(self, record) -> None:
         """Copy logging fields from this context to a log record or
         another LoggingContext
         """
@@ -341,14 +358,14 @@ class LoggingContext(object):
         # we also track the current scope:
         record.scope = self.scope
 
-    def copy_to_twisted_log_entry(self, record):
+    def copy_to_twisted_log_entry(self, record) -> None:
         """
         Copy logging fields from this context to a Twisted log record.
         """
         record["request"] = self.request
         record["scope"] = self.scope
 
-    def start(self):
+    def start(self) -> None:
         if get_thread_id() != self.main_thread:
             logger.warning("Started logcontext %s on different thread", self)
             return
@@ -358,7 +375,7 @@ class LoggingContext(object):
         if not self.usage_start:
             self.usage_start = get_thread_resource_usage()
 
-    def stop(self):
+    def stop(self) -> None:
         if get_thread_id() != self.main_thread:
             logger.warning("Stopped logcontext %s on different thread", self)
             return
@@ -378,7 +395,7 @@ class LoggingContext(object):
 
         self.usage_start = None
 
-    def get_resource_usage(self):
+    def get_resource_usage(self) -> ContextResourceUsage:
         """Get resources used by this logcontext so far.
 
         Returns:
@@ -398,11 +415,13 @@ class LoggingContext(object):
 
         return res
 
-    def _get_cputime(self):
+    def _get_cputime(self) -> Tuple[float, float]:
         """Get the cpu usage time so far
 
         Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
         """
+        assert self.usage_start is not None
+
         current = get_thread_resource_usage()
 
         # Indicate to mypy that we know that self.usage_start is None.
@@ -430,13 +449,13 @@ class LoggingContext(object):
 
         return utime_delta, stime_delta
 
-    def add_database_transaction(self, duration_sec):
+    def add_database_transaction(self, duration_sec: float) -> None:
         if duration_sec < 0:
             raise ValueError("DB txn time can only be non-negative")
         self._resource_usage.db_txn_count += 1
         self._resource_usage.db_txn_duration_sec += duration_sec
 
-    def add_database_scheduled(self, sched_sec):
+    def add_database_scheduled(self, sched_sec: float) -> None:
         """Record a use of the database pool
 
         Args:
@@ -447,7 +466,7 @@ class LoggingContext(object):
             raise ValueError("DB scheduling time can only be non-negative")
         self._resource_usage.db_sched_duration_sec += sched_sec
 
-    def record_event_fetch(self, event_count):
+    def record_event_fetch(self, event_count: int) -> None:
         """Record a number of events being fetched from the db
 
         Args:
@@ -464,10 +483,10 @@ class LoggingContextFilter(logging.Filter):
             missing fields
     """
 
-    def __init__(self, **defaults):
+    def __init__(self, **defaults) -> None:
         self.defaults = defaults
 
-    def filter(self, record):
+    def filter(self, record) -> Literal[True]:
         """Add each fields from the logging contexts to the record.
         Returns:
             True to include the record in the log output.
@@ -492,12 +511,13 @@ class PreserveLoggingContext(object):
 
     __slots__ = ["current_context", "new_context", "has_parent"]
 
-    def __init__(self, new_context=None):
+    def __init__(self, new_context: Optional[LoggingContext] = None) -> None:
         if new_context is None:
-            new_context = LoggingContext.sentinel
-        self.new_context = new_context
+            self.new_context = LoggingContext.sentinel  # type: LoggingContextOrSentinel
+        else:
+            self.new_context = new_context
 
-    def __enter__(self):
+    def __enter__(self) -> None:
         """Captures the current logging context"""
         self.current_context = LoggingContext.set_current_context(self.new_context)
 
@@ -506,7 +526,7 @@ class PreserveLoggingContext(object):
             if not self.current_context.alive:
                 logger.debug("Entering dead context: %s", self.current_context)
 
-    def __exit__(self, type, value, traceback):
+    def __exit__(self, type, value, traceback) -> None:
         """Restores the current logging context"""
         context = LoggingContext.set_current_context(self.current_context)
 
@@ -525,7 +545,9 @@ class PreserveLoggingContext(object):
                 logger.debug("Restoring dead context: %s", self.current_context)
 
 
-def nested_logging_context(suffix, parent_context=None):
+def nested_logging_context(
+    suffix: str, parent_context: Optional[LoggingContext] = None
+) -> LoggingContext:
     """Creates a new logging context as a child of another.
 
     The nested logging context will have a 'request' made up of the parent context's
@@ -546,10 +568,12 @@ def nested_logging_context(suffix, parent_context=None):
     Returns:
         LoggingContext: new logging context.
     """
-    if parent_context is None:
-        parent_context = LoggingContext.current_context()
+    if parent_context is not None:
+        context = parent_context  # type: LoggingContextOrSentinel
+    else:
+        context = LoggingContext.current_context()
     return LoggingContext(
-        parent_context=parent_context, request=parent_context.request + "-" + suffix
+        parent_context=context, request=str(context.request) + "-" + suffix
     )
 
 
@@ -654,7 +678,10 @@ def make_deferred_yieldable(deferred):
     return deferred
 
 
-def _set_context_cb(result, context):
+ResultT = TypeVar("ResultT")
+
+
+def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
     """A callback function which just sets the logging context"""
     LoggingContext.set_current_context(context)
     return result
-- 
cgit 1.4.1


From fb078f921b4d49fe3087d89563bce7b8cee0292c Mon Sep 17 00:00:00 2001
From: dklimpel <5740567+dklimpel@users.noreply.github.com>
Date: Sun, 8 Mar 2020 15:19:07 +0100
Subject: changelog

---
 changelog.d/7053.feature       |  1 +
 docs/sample_config.yaml        |  8 ++++++++
 synapse/config/registration.py |  4 ++--
 synapse/handlers/profile.py    | 10 ++++++++--
 4 files changed, 19 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/7053.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature
new file mode 100644
index 0000000000..79955b9780
--- /dev/null
+++ b/changelog.d/7053.feature
@@ -0,0 +1 @@
+Add options to disable setting profile info for prevent changes.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 54cbe840d5..d646f0cefe 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1057,6 +1057,14 @@ account_threepid_delegates:
     #email: https://example.com     # Delegate email sending to example.com
     #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
+# If enabled, don't let users set their own display names/avatars
+# other than for the very first time (unless they are a server admin).
+# Useful when provisioning users based on the contents of a 3rd party
+# directory and to avoid ambiguities.
+#
+#disable_set_displayname: False
+#disable_set_avatar_url: False
+
 # Users who register on this homeserver will automatically be joined
 # to these rooms
 #
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index d9f452dcea..bdbd6f3130 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -338,8 +338,8 @@ class RegistrationConfig(Config):
         # Useful when provisioning users based on the contents of a 3rd party
         # directory and to avoid ambiguities.
         #
-        # disable_set_displayname: False
-        # disable_set_avatar_url: False
+        #disable_set_displayname: False
+        #disable_set_avatar_url: False
 
         # Users who register on this homeserver will automatically be joined
         # to these rooms
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index fb7e84f3b8..445981bf3d 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -160,7 +160,10 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and self.hs.config.disable_set_displayname:
             profile = yield self.store.get_profileinfo(target_user.localpart)
             if profile.display_name:
-                raise SynapseError(400, "Changing displayname is disabled on this server")
+                raise SynapseError(
+                    400,
+                    "Changing displayname is disabled on this server"
+                )
 
         if len(new_displayname) > MAX_DISPLAYNAME_LEN:
             raise SynapseError(
@@ -226,7 +229,10 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and self.hs.config.disable_set_avatar_url:
             profile = yield self.store.get_profileinfo(target_user.localpart)
             if profile.avatar_url:
-                raise SynapseError(400, "Changing avatar url is disabled on this server")
+                raise SynapseError(
+                    400,
+                    "Changing avatar url is disabled on this server"
+                )
 
         if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
             raise SynapseError(
-- 
cgit 1.4.1


From 66315d862fdec0ddc1414010626b344d48c14167 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Mon, 9 Mar 2020 07:19:24 -0400
Subject: Update routing of fallback auth in the worker docs. (#7048)

---
 changelog.d/7048.doc | 1 +
 docs/workers.md      | 1 +
 2 files changed, 2 insertions(+)
 create mode 100644 changelog.d/7048.doc

(limited to 'changelog.d')

diff --git a/changelog.d/7048.doc b/changelog.d/7048.doc
new file mode 100644
index 0000000000..c9666f333e
--- /dev/null
+++ b/changelog.d/7048.doc
@@ -0,0 +1 @@
+Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints.
diff --git a/docs/workers.md b/docs/workers.md
index 0d84a58958..cf460283d5 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -273,6 +273,7 @@ Additionally, the following REST endpoints can be handled, but all requests must
 be routed to the same instance:
 
     ^/_matrix/client/(r0|unstable)/register$
+    ^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
 
 Pagination requests can also be handled, but all requests with the same path
 room must be routed to the same instance. Additionally, care must be taken to
-- 
cgit 1.4.1


From 06eb5cae08272c401a586991fc81f788825f910b Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Mon, 9 Mar 2020 08:58:25 -0400
Subject: Remove special auth and redaction rules for aliases events in
 experimental room ver. (#7037)

---
 changelog.d/7037.feature                   |  1 +
 synapse/api/room_versions.py               |  9 +--
 synapse/crypto/event_signing.py            |  2 +-
 synapse/event_auth.py                      |  8 +--
 synapse/events/utils.py                    | 12 ++--
 synapse/storage/data_stores/main/events.py | 10 +++-
 tests/events/test_utils.py                 | 35 ++++++++++-
 tests/test_event_auth.py                   | 93 +++++++++++++++++++++++++++++-
 8 files changed, 148 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/7037.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7037.feature b/changelog.d/7037.feature
new file mode 100644
index 0000000000..4bc1b3b19f
--- /dev/null
+++ b/changelog.d/7037.feature
@@ -0,0 +1 @@
+Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index cf7ee60d3a..871179749a 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -57,7 +57,7 @@ class RoomVersion(object):
     state_res = attr.ib()  # int; one of the StateResolutionVersions
     enforce_key_validity = attr.ib()  # bool
 
-    # bool: before MSC2260, anyone was allowed to send an aliases event
+    # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
     special_case_aliases_auth = attr.ib(type=bool, default=False)
 
 
@@ -102,12 +102,13 @@ class RoomVersions(object):
         enforce_key_validity=True,
         special_case_aliases_auth=True,
     )
-    MSC2260_DEV = RoomVersion(
-        "org.matrix.msc2260",
+    MSC2432_DEV = RoomVersion(
+        "org.matrix.msc2432",
         RoomDisposition.UNSTABLE,
         EventFormatVersions.V3,
         StateResolutionVersions.V2,
         enforce_key_validity=True,
+        special_case_aliases_auth=False,
     )
 
 
@@ -119,6 +120,6 @@ KNOWN_ROOM_VERSIONS = {
         RoomVersions.V3,
         RoomVersions.V4,
         RoomVersions.V5,
-        RoomVersions.MSC2260_DEV,
+        RoomVersions.MSC2432_DEV,
     )
 }  # type: Dict[str, RoomVersion]
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 5f733c1cf5..0422c43fab 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -140,7 +140,7 @@ def compute_event_signature(
     Returns:
         a dictionary in the same format of an event's signatures field.
     """
-    redact_json = prune_event_dict(event_dict)
+    redact_json = prune_event_dict(room_version, event_dict)
     redact_json.pop("age_ts", None)
     redact_json.pop("unsigned", None)
     if logger.isEnabledFor(logging.DEBUG):
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 472f165044..46beb5334f 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -137,7 +137,7 @@ def check(
             raise AuthError(403, "This room has been marked as unfederatable.")
 
     # 4. If type is m.room.aliases
-    if event.type == EventTypes.Aliases:
+    if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth:
         # 4a. If event has no state_key, reject
         if not event.is_state():
             raise AuthError(403, "Alias event must be a state event")
@@ -152,10 +152,8 @@ def check(
             )
 
         # 4c. Otherwise, allow.
-        # This is removed by https://github.com/matrix-org/matrix-doc/pull/2260
-        if room_version_obj.special_case_aliases_auth:
-            logger.debug("Allowing! %s", event)
-            return
+        logger.debug("Allowing! %s", event)
+        return
 
     if logger.isEnabledFor(logging.DEBUG):
         logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index bc6f98ae3b..b75b097e5e 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -23,6 +23,7 @@ from frozendict import frozendict
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, RelationTypes
+from synapse.api.room_versions import RoomVersion
 from synapse.util.async_helpers import yieldable_gather_results
 
 from . import EventBase
@@ -43,7 +44,7 @@ def prune_event(event: EventBase) -> EventBase:
     the user has specified, but we do want to keep necessary information like
     type, state_key etc.
     """
-    pruned_event_dict = prune_event_dict(event.get_dict())
+    pruned_event_dict = prune_event_dict(event.room_version, event.get_dict())
 
     from . import make_event_from_dict
 
@@ -57,15 +58,12 @@ def prune_event(event: EventBase) -> EventBase:
     return pruned_event
 
 
-def prune_event_dict(event_dict):
+def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
     """Redacts the event_dict in the same way as `prune_event`, except it
     operates on dicts rather than event objects
 
-    Args:
-        event_dict (dict)
-
     Returns:
-        dict: A copy of the pruned event dict
+        A copy of the pruned event dict
     """
 
     allowed_keys = [
@@ -112,7 +110,7 @@ def prune_event_dict(event_dict):
             "kick",
             "redact",
         )
-    elif event_type == EventTypes.Aliases:
+    elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
         add_fields("aliases")
     elif event_type == EventTypes.RoomHistoryVisibility:
         add_fields("history_visibility")
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 8ae23df00a..d593ef47b8 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -1168,7 +1168,11 @@ class EventsStore(
                 and original_event.internal_metadata.is_redacted()
             ):
                 # Redaction was allowed
-                pruned_json = encode_json(prune_event_dict(original_event.get_dict()))
+                pruned_json = encode_json(
+                    prune_event_dict(
+                        original_event.room_version, original_event.get_dict()
+                    )
+                )
             else:
                 # Redaction wasn't allowed
                 pruned_json = None
@@ -1929,7 +1933,9 @@ class EventsStore(
                 return
 
             # Prune the event's dict then convert it to JSON.
-            pruned_json = encode_json(prune_event_dict(event.get_dict()))
+            pruned_json = encode_json(
+                prune_event_dict(event.room_version, event.get_dict())
+            )
 
             # Update the event_json table to replace the event's JSON with the pruned
             # JSON.
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 45d55b9e94..ab5f5ac549 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from synapse.api.room_versions import RoomVersions
 from synapse.events import make_event_from_dict
 from synapse.events.utils import (
     copy_power_levels_contents,
@@ -36,9 +37,9 @@ class PruneEventTestCase(unittest.TestCase):
     """ Asserts that a new event constructed with `evdict` will look like
     `matchdict` when it is redacted. """
 
-    def run_test(self, evdict, matchdict):
+    def run_test(self, evdict, matchdict, **kwargs):
         self.assertEquals(
-            prune_event(make_event_from_dict(evdict)).get_dict(), matchdict
+            prune_event(make_event_from_dict(evdict, **kwargs)).get_dict(), matchdict
         )
 
     def test_minimal(self):
@@ -128,6 +129,36 @@ class PruneEventTestCase(unittest.TestCase):
             },
         )
 
+    def test_alias_event(self):
+        """Alias events have special behavior up through room version 6."""
+        self.run_test(
+            {
+                "type": "m.room.aliases",
+                "event_id": "$test:domain",
+                "content": {"aliases": ["test"]},
+            },
+            {
+                "type": "m.room.aliases",
+                "event_id": "$test:domain",
+                "content": {"aliases": ["test"]},
+                "signatures": {},
+                "unsigned": {},
+            },
+        )
+
+    def test_msc2432_alias_event(self):
+        """After MSC2432, alias events have no special behavior."""
+        self.run_test(
+            {"type": "m.room.aliases", "content": {"aliases": ["test"]}},
+            {
+                "type": "m.room.aliases",
+                "content": {},
+                "signatures": {},
+                "unsigned": {},
+            },
+            room_version=RoomVersions.MSC2432_DEV,
+        )
+
 
 class SerializeEventTestCase(unittest.TestCase):
     def serialize(self, ev, fields):
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index bfa5d6f510..6c2351cf55 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -19,6 +19,7 @@ from synapse import event_auth
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
 from synapse.events import make_event_from_dict
+from synapse.types import get_domain_from_id
 
 
 class EventAuthTestCase(unittest.TestCase):
@@ -51,7 +52,7 @@ class EventAuthTestCase(unittest.TestCase):
             _random_state_event(joiner),
             auth_events,
             do_sig_check=False,
-        ),
+        )
 
     def test_state_default_level(self):
         """
@@ -87,6 +88,83 @@ class EventAuthTestCase(unittest.TestCase):
             RoomVersions.V1, _random_state_event(king), auth_events, do_sig_check=False,
         )
 
+    def test_alias_event(self):
+        """Alias events have special behavior up through room version 6."""
+        creator = "@creator:example.com"
+        other = "@other:example.com"
+        auth_events = {
+            ("m.room.create", ""): _create_event(creator),
+            ("m.room.member", creator): _join_event(creator),
+        }
+
+        # creator should be able to send aliases
+        event_auth.check(
+            RoomVersions.V1, _alias_event(creator), auth_events, do_sig_check=False,
+        )
+
+        # Reject an event with no state key.
+        with self.assertRaises(AuthError):
+            event_auth.check(
+                RoomVersions.V1,
+                _alias_event(creator, state_key=""),
+                auth_events,
+                do_sig_check=False,
+            )
+
+        # If the domain of the sender does not match the state key, reject.
+        with self.assertRaises(AuthError):
+            event_auth.check(
+                RoomVersions.V1,
+                _alias_event(creator, state_key="test.com"),
+                auth_events,
+                do_sig_check=False,
+            )
+
+        # Note that the member does *not* need to be in the room.
+        event_auth.check(
+            RoomVersions.V1, _alias_event(other), auth_events, do_sig_check=False,
+        )
+
+    def test_msc2432_alias_event(self):
+        """After MSC2432, alias events have no special behavior."""
+        creator = "@creator:example.com"
+        other = "@other:example.com"
+        auth_events = {
+            ("m.room.create", ""): _create_event(creator),
+            ("m.room.member", creator): _join_event(creator),
+        }
+
+        # creator should be able to send aliases
+        event_auth.check(
+            RoomVersions.MSC2432_DEV,
+            _alias_event(creator),
+            auth_events,
+            do_sig_check=False,
+        )
+
+        # No particular checks are done on the state key.
+        event_auth.check(
+            RoomVersions.MSC2432_DEV,
+            _alias_event(creator, state_key=""),
+            auth_events,
+            do_sig_check=False,
+        )
+        event_auth.check(
+            RoomVersions.MSC2432_DEV,
+            _alias_event(creator, state_key="test.com"),
+            auth_events,
+            do_sig_check=False,
+        )
+
+        # Per standard auth rules, the member must be in the room.
+        with self.assertRaises(AuthError):
+            event_auth.check(
+                RoomVersions.MSC2432_DEV,
+                _alias_event(other),
+                auth_events,
+                do_sig_check=False,
+            )
+
 
 # helpers for making events
 
@@ -131,6 +209,19 @@ def _power_levels_event(sender, content):
     )
 
 
+def _alias_event(sender, **kwargs):
+    data = {
+        "room_id": TEST_ROOM_ID,
+        "event_id": _get_event_id(),
+        "type": "m.room.aliases",
+        "sender": sender,
+        "state_key": get_domain_from_id(sender),
+        "content": {"aliases": []},
+    }
+    data.update(**kwargs)
+    return make_event_from_dict(data)
+
+
 def _random_state_event(sender):
     return make_event_from_dict(
         {
-- 
cgit 1.4.1


From f9e3a3f4d0ceef55d7254ba412982edf0192ccc1 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 9 Mar 2020 14:21:01 +0000
Subject: Changelog

It's the same as in #6964 since it's the most likely cause of the bug
and that change hasn't been released yet.
---
 changelog.d/7055.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7055.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7055.misc b/changelog.d/7055.misc
new file mode 100644
index 0000000000..ec5c004bbe
--- /dev/null
+++ b/changelog.d/7055.misc
@@ -0,0 +1 @@
+Merge worker apps together.
-- 
cgit 1.4.1


From 885134529ffd95dd118d3228e69f0e3553f5a6a7 Mon Sep 17 00:00:00 2001
From: dklimpel <5740567+dklimpel@users.noreply.github.com>
Date: Mon, 9 Mar 2020 22:09:29 +0100
Subject: updates after review

---
 changelog.d/7053.feature                   |  2 +-
 docs/sample_config.yaml                    | 10 +++++-----
 synapse/config/registration.py             | 16 ++++++++--------
 synapse/handlers/profile.py                |  8 ++++----
 synapse/rest/client/v2_alpha/account.py    | 18 ++++++++++++------
 tests/handlers/test_profile.py             |  6 +++---
 tests/rest/client/v2_alpha/test_account.py | 17 +++++++----------
 7 files changed, 40 insertions(+), 37 deletions(-)

(limited to 'changelog.d')

diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature
index 79955b9780..00f47b2a14 100644
--- a/changelog.d/7053.feature
+++ b/changelog.d/7053.feature
@@ -1 +1 @@
-Add options to disable setting profile info for prevent changes.
\ No newline at end of file
+Add options to prevent users from changing their profile or associated 3PIDs.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index d3ecffac7d..8333800a10 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1057,18 +1057,18 @@ account_threepid_delegates:
     #email: https://example.com     # Delegate email sending to example.com
     #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
-# If enabled, don't let users set their own display names/avatars
+# If disabled, don't let users set their own display names/avatars
 # other than for the very first time (unless they are a server admin).
 # Useful when provisioning users based on the contents of a 3rd party
 # directory and to avoid ambiguities.
 #
-#disable_set_displayname: false
-#disable_set_avatar_url: false
+#enable_set_displayname: true
+#enable_set_avatar_url: true
 
-# If true, stop users from trying to change the 3PIDs associated with
+# If false, stop users from trying to change the 3PIDs associated with
 # their accounts.
 #
-#disable_3pid_changes: false
+#enable_3pid_changes: true
 
 # Users who register on this homeserver will automatically be joined
 # to these rooms
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 1abc0a79af..d4897ec9b6 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -129,9 +129,9 @@ class RegistrationConfig(Config):
                 raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
         self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
 
-        self.disable_set_displayname = config.get("disable_set_displayname", False)
-        self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
-        self.disable_3pid_changes = config.get("disable_3pid_changes", False)
+        self.enable_set_displayname = config.get("enable_set_displayname", True)
+        self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
+        self.enable_3pid_changes = config.get("enable_3pid_changes", True)
 
         self.disable_msisdn_registration = config.get(
             "disable_msisdn_registration", False
@@ -334,18 +334,18 @@ class RegistrationConfig(Config):
             #email: https://example.com     # Delegate email sending to example.com
             #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
-        # If enabled, don't let users set their own display names/avatars
+        # If disabled, don't let users set their own display names/avatars
         # other than for the very first time (unless they are a server admin).
         # Useful when provisioning users based on the contents of a 3rd party
         # directory and to avoid ambiguities.
         #
-        #disable_set_displayname: false
-        #disable_set_avatar_url: false
+        #enable_set_displayname: true
+        #enable_set_avatar_url: true
 
-        # If true, stop users from trying to change the 3PIDs associated with
+        # If false, stop users from trying to change the 3PIDs associated with
         # their accounts.
         #
-        #disable_3pid_changes: false
+        #enable_3pid_changes: true
 
         # Users who register on this homeserver will automatically be joined
         # to these rooms
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index b049dd8e26..eb85dba015 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -157,11 +157,11 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's displayname")
 
-        if not by_admin and self.hs.config.disable_set_displayname:
+        if not by_admin and not self.hs.config.enable_set_displayname:
             profile = yield self.store.get_profileinfo(target_user.localpart)
             if profile.display_name:
                 raise SynapseError(
-                    400, "Changing displayname is disabled on this server"
+                    400, "Changing display name is disabled on this server", Codes.FORBIDDEN
                 )
 
         if len(new_displayname) > MAX_DISPLAYNAME_LEN:
@@ -225,11 +225,11 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's avatar_url")
 
-        if not by_admin and self.hs.config.disable_set_avatar_url:
+        if not by_admin and not self.hs.config.enable_set_avatar_url:
             profile = yield self.store.get_profileinfo(target_user.localpart)
             if profile.avatar_url:
                 raise SynapseError(
-                    400, "Changing avatar url is disabled on this server"
+                    400, "Changing avatar is disabled on this server", Codes.FORBIDDEN
                 )
 
         if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 97bddf36d9..e40136f2f3 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -599,8 +599,10 @@ class ThreepidRestServlet(RestServlet):
         return 200, {"threepids": threepids}
 
     async def on_POST(self, request):
-        if self.hs.config.disable_3pid_changes:
-            raise SynapseError(400, "3PID changes disabled on this server")
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
 
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
@@ -646,8 +648,10 @@ class ThreepidAddRestServlet(RestServlet):
 
     @interactive_auth_handler
     async def on_POST(self, request):
-        if self.hs.config.disable_3pid_changes:
-            raise SynapseError(400, "3PID changes disabled on this server")
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
 
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
@@ -749,8 +753,10 @@ class ThreepidDeleteRestServlet(RestServlet):
         self.auth_handler = hs.get_auth_handler()
 
     async def on_POST(self, request):
-        if self.hs.config.disable_3pid_changes:
-            raise SynapseError(400, "3PID changes disabled on this server")
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
 
         body = parse_json_object_from_request(request)
         assert_params_in_dict(body, ["medium", "address"])
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index f8c0da5ced..e600b9777b 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -93,7 +93,7 @@ class ProfileTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_set_my_name_if_disabled(self):
-        self.hs.config.disable_set_displayname = True
+        self.hs.config.enable_set_displayname = False
 
         # Set first displayname is allowed, if displayname is null
         yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
@@ -164,9 +164,9 @@ class ProfileTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_set_my_avatar_if_disabled(self):
-        self.hs.config.disable_set_avatar_url = True
+        self.hs.config.enable_set_avatar_url = False
 
-        # Set first time avatar is allowed, if displayname is null
+        # Set first time avatar is allowed, if avatar is null
         yield self.store.set_profile_avatar_url(
             self.frank.localpart, "http://my.server/me.png"
         )
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index e178a53335..34e40a36d0 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -24,6 +24,7 @@ import pkg_resources
 
 import synapse.rest.admin
 from synapse.api.constants import LoginType, Membership
+from synapse.api.errors import Codes
 from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import account, register
 
@@ -412,7 +413,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
     def test_add_email_if_disabled(self):
         """Test add mail to profile if disabled
         """
-        self.hs.config.disable_3pid_changes = True
+        self.hs.config.enable_3pid_changes = True
 
         client_secret = "foobar"
         session_id = self._request_token(self.email, client_secret)
@@ -438,9 +439,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(
-            "3PID changes disabled on this server", channel.json_body["error"]
-        )
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
         # Get user
         request, channel = self.make_request(
@@ -486,7 +485,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
     def test_delete_email_if_disabled(self):
         """Test delete mail from profile if disabled
         """
-        self.hs.config.disable_3pid_changes = True
+        self.hs.config.enable_3pid_changes = True
 
         # Add a threepid
         self.get_success(
@@ -508,9 +507,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
         self.render(request)
 
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(
-            "3PID changes disabled on this server", channel.json_body["error"]
-        )
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
         # Get user
         request, channel = self.make_request(
@@ -547,7 +544,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("No validated 3pid session found", channel.json_body["error"])
+        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
 
         # Get user
         request, channel = self.make_request(
@@ -582,7 +579,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("No validated 3pid session found", channel.json_body["error"])
+        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
 
         # Get user
         request, channel = self.make_request(
-- 
cgit 1.4.1


From 156f2718673f88188627c76952102ef08ea34256 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Tue, 10 Mar 2020 14:01:24 +0000
Subject: Changelog

---
 changelog.d/7058.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7058.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7058.feature b/changelog.d/7058.feature
new file mode 100644
index 0000000000..53ea485e03
--- /dev/null
+++ b/changelog.d/7058.feature
@@ -0,0 +1 @@
+Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process.
-- 
cgit 1.4.1


From 1cde4cf3f15413b941c699ac5048c464a49137a4 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 11 Mar 2020 18:03:56 +0000
Subject: Changelog

---
 changelog.d/7066.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7066.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7066.bugfix b/changelog.d/7066.bugfix
new file mode 100644
index 0000000000..94bb096287
--- /dev/null
+++ b/changelog.d/7066.bugfix
@@ -0,0 +1 @@
+Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token.
-- 
cgit 1.4.1


From e55a240681a2d3adf34eb48198475e9255b53358 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Wed, 11 Mar 2020 19:37:04 +0000
Subject: Changelog

---
 changelog.d/7067.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7067.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7067.feature b/changelog.d/7067.feature
new file mode 100644
index 0000000000..53ea485e03
--- /dev/null
+++ b/changelog.d/7067.feature
@@ -0,0 +1 @@
+Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process.
-- 
cgit 1.4.1


From 77d0a4507b1c8ce3a1195851e87e723287332786 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 12 Mar 2020 11:36:27 -0400
Subject: Add type annotations and comments to auth handler (#7063)

---
 changelog.d/7063.misc    |   1 +
 synapse/handlers/auth.py | 193 +++++++++++++++++++++++++----------------------
 tox.ini                  |   1 +
 3 files changed, 106 insertions(+), 89 deletions(-)
 create mode 100644 changelog.d/7063.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7063.misc b/changelog.d/7063.misc
new file mode 100644
index 0000000000..e7b1cd3cd8
--- /dev/null
+++ b/changelog.d/7063.misc
@@ -0,0 +1 @@
+Add type annotations and comments to the auth handler.
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 7ca90f91c4..7860f9625e 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -18,10 +18,10 @@ import logging
 import time
 import unicodedata
 import urllib.parse
-from typing import Any
+from typing import Any, Dict, Iterable, List, Optional
 
 import attr
-import bcrypt
+import bcrypt  # type: ignore[import]
 import pymacaroons
 
 from twisted.internet import defer
@@ -45,7 +45,7 @@ from synapse.http.site import SynapseRequest
 from synapse.logging.context import defer_to_thread
 from synapse.module_api import ModuleApi
 from synapse.push.mailer import load_jinja2_templates
-from synapse.types import UserID
+from synapse.types import Requester, UserID
 from synapse.util.caches.expiringcache import ExpiringCache
 
 from ._base import BaseHandler
@@ -63,11 +63,11 @@ class AuthHandler(BaseHandler):
         """
         super(AuthHandler, self).__init__(hs)
 
-        self.checkers = {}  # type: dict[str, UserInteractiveAuthChecker]
+        self.checkers = {}  # type: Dict[str, UserInteractiveAuthChecker]
         for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
             inst = auth_checker_class(hs)
             if inst.is_enabled():
-                self.checkers[inst.AUTH_TYPE] = inst
+                self.checkers[inst.AUTH_TYPE] = inst  # type: ignore
 
         self.bcrypt_rounds = hs.config.bcrypt_rounds
 
@@ -124,7 +124,9 @@ class AuthHandler(BaseHandler):
         self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
 
     @defer.inlineCallbacks
-    def validate_user_via_ui_auth(self, requester, request_body, clientip):
+    def validate_user_via_ui_auth(
+        self, requester: Requester, request_body: Dict[str, Any], clientip: str
+    ):
         """
         Checks that the user is who they claim to be, via a UI auth.
 
@@ -133,11 +135,11 @@ class AuthHandler(BaseHandler):
         that it isn't stolen by re-authenticating them.
 
         Args:
-            requester (Requester): The user, as given by the access token
+            requester: The user, as given by the access token
 
-            request_body (dict): The body of the request sent by the client
+            request_body: The body of the request sent by the client
 
-            clientip (str): The IP address of the client.
+            clientip: The IP address of the client.
 
         Returns:
             defer.Deferred[dict]: the parameters for this request (which may
@@ -208,7 +210,9 @@ class AuthHandler(BaseHandler):
         return self.checkers.keys()
 
     @defer.inlineCallbacks
-    def check_auth(self, flows, clientdict, clientip):
+    def check_auth(
+        self, flows: List[List[str]], clientdict: Dict[str, Any], clientip: str
+    ):
         """
         Takes a dictionary sent by the client in the login / registration
         protocol and handles the User-Interactive Auth flow.
@@ -223,14 +227,14 @@ class AuthHandler(BaseHandler):
         decorator.
 
         Args:
-            flows (list): A list of login flows. Each flow is an ordered list of
-                          strings representing auth-types. At least one full
-                          flow must be completed in order for auth to be successful.
+            flows: A list of login flows. Each flow is an ordered list of
+                   strings representing auth-types. At least one full
+                   flow must be completed in order for auth to be successful.
 
             clientdict: The dictionary from the client root level, not the
                         'auth' key: this method prompts for auth if none is sent.
 
-            clientip (str): The IP address of the client.
+            clientip: The IP address of the client.
 
         Returns:
             defer.Deferred[dict, dict, str]: a deferred tuple of
@@ -250,7 +254,7 @@ class AuthHandler(BaseHandler):
         """
 
         authdict = None
-        sid = None
+        sid = None  # type: Optional[str]
         if clientdict and "auth" in clientdict:
             authdict = clientdict["auth"]
             del clientdict["auth"]
@@ -283,9 +287,9 @@ class AuthHandler(BaseHandler):
         creds = session["creds"]
 
         # check auth type currently being presented
-        errordict = {}
+        errordict = {}  # type: Dict[str, Any]
         if "type" in authdict:
-            login_type = authdict["type"]
+            login_type = authdict["type"]  # type: str
             try:
                 result = yield self._check_auth_dict(authdict, clientip)
                 if result:
@@ -326,7 +330,7 @@ class AuthHandler(BaseHandler):
         raise InteractiveAuthIncompleteError(ret)
 
     @defer.inlineCallbacks
-    def add_oob_auth(self, stagetype, authdict, clientip):
+    def add_oob_auth(self, stagetype: str, authdict: Dict[str, Any], clientip: str):
         """
         Adds the result of out-of-band authentication into an existing auth
         session. Currently used for adding the result of fallback auth.
@@ -348,7 +352,7 @@ class AuthHandler(BaseHandler):
             return True
         return False
 
-    def get_session_id(self, clientdict):
+    def get_session_id(self, clientdict: Dict[str, Any]) -> Optional[str]:
         """
         Gets the session ID for a client given the client dictionary
 
@@ -356,7 +360,7 @@ class AuthHandler(BaseHandler):
             clientdict: The dictionary sent by the client in the request
 
         Returns:
-            str|None: The string session ID the client sent. If the client did
+            The string session ID the client sent. If the client did
                 not send a session ID, returns None.
         """
         sid = None
@@ -366,40 +370,42 @@ class AuthHandler(BaseHandler):
                 sid = authdict["session"]
         return sid
 
-    def set_session_data(self, session_id, key, value):
+    def set_session_data(self, session_id: str, key: str, value: Any) -> None:
         """
         Store a key-value pair into the sessions data associated with this
         request. This data is stored server-side and cannot be modified by
         the client.
 
         Args:
-            session_id (string): The ID of this session as returned from check_auth
-            key (string): The key to store the data under
-            value (any): The data to store
+            session_id: The ID of this session as returned from check_auth
+            key: The key to store the data under
+            value: The data to store
         """
         sess = self._get_session_info(session_id)
         sess.setdefault("serverdict", {})[key] = value
         self._save_session(sess)
 
-    def get_session_data(self, session_id, key, default=None):
+    def get_session_data(
+        self, session_id: str, key: str, default: Optional[Any] = None
+    ) -> Any:
         """
         Retrieve data stored with set_session_data
 
         Args:
-            session_id (string): The ID of this session as returned from check_auth
-            key (string): The key to store the data under
-            default (any): Value to return if the key has not been set
+            session_id: The ID of this session as returned from check_auth
+            key: The key to store the data under
+            default: Value to return if the key has not been set
         """
         sess = self._get_session_info(session_id)
         return sess.setdefault("serverdict", {}).get(key, default)
 
     @defer.inlineCallbacks
-    def _check_auth_dict(self, authdict, clientip):
+    def _check_auth_dict(self, authdict: Dict[str, Any], clientip: str):
         """Attempt to validate the auth dict provided by a client
 
         Args:
-            authdict (object): auth dict provided by the client
-            clientip (str): IP address of the client
+            authdict: auth dict provided by the client
+            clientip: IP address of the client
 
         Returns:
             Deferred: result of the stage verification.
@@ -425,10 +431,10 @@ class AuthHandler(BaseHandler):
         (canonical_id, callback) = yield self.validate_login(user_id, authdict)
         return canonical_id
 
-    def _get_params_recaptcha(self):
+    def _get_params_recaptcha(self) -> dict:
         return {"public_key": self.hs.config.recaptcha_public_key}
 
-    def _get_params_terms(self):
+    def _get_params_terms(self) -> dict:
         return {
             "policies": {
                 "privacy_policy": {
@@ -445,7 +451,9 @@ class AuthHandler(BaseHandler):
             }
         }
 
-    def _auth_dict_for_flows(self, flows, session):
+    def _auth_dict_for_flows(
+        self, flows: List[List[str]], session: Dict[str, Any]
+    ) -> Dict[str, Any]:
         public_flows = []
         for f in flows:
             public_flows.append(f)
@@ -455,7 +463,7 @@ class AuthHandler(BaseHandler):
             LoginType.TERMS: self._get_params_terms,
         }
 
-        params = {}
+        params = {}  # type: Dict[str, Any]
 
         for f in public_flows:
             for stage in f:
@@ -468,7 +476,13 @@ class AuthHandler(BaseHandler):
             "params": params,
         }
 
-    def _get_session_info(self, session_id):
+    def _get_session_info(self, session_id: Optional[str]) -> dict:
+        """
+        Gets or creates a session given a session ID.
+
+        The session can be used to track data across multiple requests, e.g. for
+        interactive authentication.
+        """
         if session_id not in self.sessions:
             session_id = None
 
@@ -481,7 +495,9 @@ class AuthHandler(BaseHandler):
         return self.sessions[session_id]
 
     @defer.inlineCallbacks
-    def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms):
+    def get_access_token_for_user_id(
+        self, user_id: str, device_id: Optional[str], valid_until_ms: Optional[int]
+    ):
         """
         Creates a new access token for the user with the given user ID.
 
@@ -491,11 +507,11 @@ class AuthHandler(BaseHandler):
         The device will be recorded in the table if it is not there already.
 
         Args:
-            user_id (str): canonical User ID
-            device_id (str|None): the device ID to associate with the tokens.
+            user_id: canonical User ID
+            device_id: the device ID to associate with the tokens.
                None to leave the tokens unassociated with a device (deprecated:
                we should always have a device ID)
-            valid_until_ms (int|None): when the token is valid until. None for
+            valid_until_ms: when the token is valid until. None for
                 no expiry.
         Returns:
               The access token for the user's session.
@@ -530,13 +546,13 @@ class AuthHandler(BaseHandler):
         return access_token
 
     @defer.inlineCallbacks
-    def check_user_exists(self, user_id):
+    def check_user_exists(self, user_id: str):
         """
         Checks to see if a user with the given id exists. Will check case
         insensitively, but return None if there are multiple inexact matches.
 
         Args:
-            (unicode|bytes) user_id: complete @user:id
+            user_id: complete @user:id
 
         Returns:
             defer.Deferred: (unicode) canonical_user_id, or None if zero or
@@ -551,7 +567,7 @@ class AuthHandler(BaseHandler):
         return None
 
     @defer.inlineCallbacks
-    def _find_user_id_and_pwd_hash(self, user_id):
+    def _find_user_id_and_pwd_hash(self, user_id: str):
         """Checks to see if a user with the given id exists. Will check case
         insensitively, but will return None if there are multiple inexact
         matches.
@@ -581,7 +597,7 @@ class AuthHandler(BaseHandler):
             )
         return result
 
-    def get_supported_login_types(self):
+    def get_supported_login_types(self) -> Iterable[str]:
         """Get a the login types supported for the /login API
 
         By default this is just 'm.login.password' (unless password_enabled is
@@ -589,20 +605,20 @@ class AuthHandler(BaseHandler):
         other login types.
 
         Returns:
-            Iterable[str]: login types
+            login types
         """
         return self._supported_login_types
 
     @defer.inlineCallbacks
-    def validate_login(self, username, login_submission):
+    def validate_login(self, username: str, login_submission: Dict[str, Any]):
         """Authenticates the user for the /login API
 
         Also used by the user-interactive auth flow to validate
         m.login.password auth types.
 
         Args:
-            username (str): username supplied by the user
-            login_submission (dict): the whole of the login submission
+            username: username supplied by the user
+            login_submission: the whole of the login submission
                 (including 'type' and other relevant fields)
         Returns:
             Deferred[str, func]: canonical user id, and optional callback
@@ -690,13 +706,13 @@ class AuthHandler(BaseHandler):
         raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN)
 
     @defer.inlineCallbacks
-    def check_password_provider_3pid(self, medium, address, password):
+    def check_password_provider_3pid(self, medium: str, address: str, password: str):
         """Check if a password provider is able to validate a thirdparty login
 
         Args:
-            medium (str): The medium of the 3pid (ex. email).
-            address (str): The address of the 3pid (ex. jdoe@example.com).
-            password (str): The password of the user.
+            medium: The medium of the 3pid (ex. email).
+            address: The address of the 3pid (ex. jdoe@example.com).
+            password: The password of the user.
 
         Returns:
             Deferred[(str|None, func|None)]: A tuple of `(user_id,
@@ -724,15 +740,15 @@ class AuthHandler(BaseHandler):
         return None, None
 
     @defer.inlineCallbacks
-    def _check_local_password(self, user_id, password):
+    def _check_local_password(self, user_id: str, password: str):
         """Authenticate a user against the local password database.
 
         user_id is checked case insensitively, but will return None if there are
         multiple inexact matches.
 
         Args:
-            user_id (unicode): complete @user:id
-            password (unicode): the provided password
+            user_id: complete @user:id
+            password: the provided password
         Returns:
             Deferred[unicode] the canonical_user_id, or Deferred[None] if
                 unknown user/bad password
@@ -755,7 +771,7 @@ class AuthHandler(BaseHandler):
         return user_id
 
     @defer.inlineCallbacks
-    def validate_short_term_login_token_and_get_user_id(self, login_token):
+    def validate_short_term_login_token_and_get_user_id(self, login_token: str):
         auth_api = self.hs.get_auth()
         user_id = None
         try:
@@ -769,11 +785,11 @@ class AuthHandler(BaseHandler):
         return user_id
 
     @defer.inlineCallbacks
-    def delete_access_token(self, access_token):
+    def delete_access_token(self, access_token: str):
         """Invalidate a single access token
 
         Args:
-            access_token (str): access token to be deleted
+            access_token: access token to be deleted
 
         Returns:
             Deferred
@@ -798,15 +814,17 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def delete_access_tokens_for_user(
-        self, user_id, except_token_id=None, device_id=None
+        self,
+        user_id: str,
+        except_token_id: Optional[str] = None,
+        device_id: Optional[str] = None,
     ):
         """Invalidate access tokens belonging to a user
 
         Args:
-            user_id (str):  ID of user the tokens belong to
-            except_token_id (str|None): access_token ID which should *not* be
-                deleted
-            device_id (str|None):  ID of device the tokens are associated with.
+            user_id:  ID of user the tokens belong to
+            except_token_id: access_token ID which should *not* be deleted
+            device_id:  ID of device the tokens are associated with.
                 If None, tokens associated with any device (or no device) will
                 be deleted
         Returns:
@@ -830,7 +848,7 @@ class AuthHandler(BaseHandler):
         )
 
     @defer.inlineCallbacks
-    def add_threepid(self, user_id, medium, address, validated_at):
+    def add_threepid(self, user_id: str, medium: str, address: str, validated_at: int):
         # check if medium has a valid value
         if medium not in ["email", "msisdn"]:
             raise SynapseError(
@@ -856,19 +874,20 @@ class AuthHandler(BaseHandler):
         )
 
     @defer.inlineCallbacks
-    def delete_threepid(self, user_id, medium, address, id_server=None):
+    def delete_threepid(
+        self, user_id: str, medium: str, address: str, id_server: Optional[str] = None
+    ):
         """Attempts to unbind the 3pid on the identity servers and deletes it
         from the local database.
 
         Args:
-            user_id (str)
-            medium (str)
-            address (str)
-            id_server (str|None): Use the given identity server when unbinding
+            user_id: ID of user to remove the 3pid from.
+            medium: The medium of the 3pid being removed: "email" or "msisdn".
+            address: The 3pid address to remove.
+            id_server: Use the given identity server when unbinding
                 any threepids. If None then will attempt to unbind using the
                 identity server specified when binding (if known).
 
-
         Returns:
             Deferred[bool]: Returns True if successfully unbound the 3pid on
             the identity server, False if identity server doesn't support the
@@ -887,17 +906,18 @@ class AuthHandler(BaseHandler):
         yield self.store.user_delete_threepid(user_id, medium, address)
         return result
 
-    def _save_session(self, session):
+    def _save_session(self, session: Dict[str, Any]) -> None:
+        """Update the last used time on the session to now and add it back to the session store."""
         # TODO: Persistent storage
         logger.debug("Saving session %s", session)
         session["last_used"] = self.hs.get_clock().time_msec()
         self.sessions[session["id"]] = session
 
-    def hash(self, password):
+    def hash(self, password: str):
         """Computes a secure hash of password.
 
         Args:
-            password (unicode): Password to hash.
+            password: Password to hash.
 
         Returns:
             Deferred(unicode): Hashed password.
@@ -914,12 +934,12 @@ class AuthHandler(BaseHandler):
 
         return defer_to_thread(self.hs.get_reactor(), _do_hash)
 
-    def validate_hash(self, password, stored_hash):
+    def validate_hash(self, password: str, stored_hash: bytes):
         """Validates that self.hash(password) == stored_hash.
 
         Args:
-            password (unicode): Password to hash.
-            stored_hash (bytes): Expected hash value.
+            password: Password to hash.
+            stored_hash: Expected hash value.
 
         Returns:
             Deferred(bool): Whether self.hash(password) == stored_hash.
@@ -1007,7 +1027,9 @@ class MacaroonGenerator(object):
 
     hs = attr.ib()
 
-    def generate_access_token(self, user_id, extra_caveats=None):
+    def generate_access_token(
+        self, user_id: str, extra_caveats: Optional[List[str]] = None
+    ) -> str:
         extra_caveats = extra_caveats or []
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = access")
@@ -1020,16 +1042,9 @@ class MacaroonGenerator(object):
             macaroon.add_first_party_caveat(caveat)
         return macaroon.serialize()
 
-    def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
-        """
-
-        Args:
-            user_id (unicode):
-            duration_in_ms (int):
-
-        Returns:
-            unicode
-        """
+    def generate_short_term_login_token(
+        self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
+    ) -> str:
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = login")
         now = self.hs.get_clock().time_msec()
@@ -1037,12 +1052,12 @@ class MacaroonGenerator(object):
         macaroon.add_first_party_caveat("time < %d" % (expiry,))
         return macaroon.serialize()
 
-    def generate_delete_pusher_token(self, user_id):
+    def generate_delete_pusher_token(self, user_id: str) -> str:
         macaroon = self._generate_base_macaroon(user_id)
         macaroon.add_first_party_caveat("type = delete_pusher")
         return macaroon.serialize()
 
-    def _generate_base_macaroon(self, user_id):
+    def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon:
         macaroon = pymacaroons.Macaroon(
             location=self.hs.config.server_name,
             identifier="key",
diff --git a/tox.ini b/tox.ini
index 7622aa19f1..8b4c37c2ee 100644
--- a/tox.ini
+++ b/tox.ini
@@ -185,6 +185,7 @@ commands = mypy \
             synapse/federation/federation_client.py \
             synapse/federation/sender \
             synapse/federation/transport \
+            synapse/handlers/auth.py \
             synapse/handlers/directory.py \
             synapse/handlers/presence.py \
             synapse/handlers/sync.py \
-- 
cgit 1.4.1


From beb19cf61a79e4bfb06b4b1fffd51388b64698ca Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 16 Mar 2020 12:16:30 +0000
Subject: Fix buggy condition in account validity handler (#7074)

---
 changelog.d/7074.bugfix              | 1 +
 synapse/handlers/account_validity.py | 6 +++++-
 2 files changed, 6 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7074.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7074.bugfix b/changelog.d/7074.bugfix
new file mode 100644
index 0000000000..38d7455971
--- /dev/null
+++ b/changelog.d/7074.bugfix
@@ -0,0 +1 @@
+Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases.
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 829f52eca1..590135d19c 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -44,7 +44,11 @@ class AccountValidityHandler(object):
 
         self._account_validity = self.hs.config.account_validity
 
-        if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
+        if (
+            self._account_validity.enabled
+            and self._account_validity.renew_by_email_enabled
+            and load_jinja2_templates
+        ):
             # Don't do email-specific configuration if renewal by email is disabled.
             try:
                 app_name = self.hs.config.email_app_name
-- 
cgit 1.4.1


From 7df04ca0e6c4140f4f30720db0b9b5148a865287 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 16 Mar 2020 22:31:47 +0000
Subject: Populate the room version from state events (#7070)

Fixes #7065

This is basically the same as https://github.com/matrix-org/synapse/pull/6847 except it tries to populate events from `state_events` rather than `current_state_events`, since the latter might have been cleared from the state of some rooms too early, leaving them with a `NULL` room version.
---
 changelog.d/7070.bugfix                            |  1 +
 .../delta/57/rooms_version_column_3.sql.postgres   | 39 ++++++++++++++++++++++
 .../delta/57/rooms_version_column_3.sql.sqlite     | 23 +++++++++++++
 3 files changed, 63 insertions(+)
 create mode 100644 changelog.d/7070.bugfix
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres
 create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite

(limited to 'changelog.d')

diff --git a/changelog.d/7070.bugfix b/changelog.d/7070.bugfix
new file mode 100644
index 0000000000..9031927546
--- /dev/null
+++ b/changelog.d/7070.bugfix
@@ -0,0 +1 @@
+Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres
new file mode 100644
index 0000000000..92aaadde0d
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres
@@ -0,0 +1,39 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- When we first added the room_version column to the rooms table, it was populated from
+-- the current_state_events table. However, there was an issue causing a background
+-- update to clean up the current_state_events table for rooms where the server is no
+-- longer participating, before that column could be populated. Therefore, some rooms had
+-- a NULL room_version.
+
+-- The rooms_version_column_2.sql.* delta files were introduced to make the populating
+-- synchronous instead of running it in a background update, which fixed this issue.
+-- However, all of the instances of Synapse installed or updated in the meantime got
+-- their rooms table corrupted with NULL room_versions.
+
+-- This query fishes out the room versions from the create event using the state_events
+-- table instead of the current_state_events one, as the former still have all of the
+-- create events.
+
+UPDATE rooms SET room_version=(
+    SELECT COALESCE(json::json->'content'->>'room_version','1')
+    FROM state_events se INNER JOIN event_json ej USING (event_id)
+    WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
+    LIMIT 1
+) WHERE rooms.room_version IS NULL;
+
+-- see also rooms_version_column_3.sql.sqlite which has a copy of the above query, using
+-- sqlite syntax for the json extraction.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite
new file mode 100644
index 0000000000..e19dab97cb
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite
@@ -0,0 +1,23 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- see rooms_version_column_3.sql.postgres for details of what's going on here.
+
+UPDATE rooms SET room_version=(
+    SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1')
+    FROM state_events se INNER JOIN event_json ej USING (event_id)
+    WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
+    LIMIT 1
+) WHERE rooms.room_version IS NULL;
-- 
cgit 1.4.1


From 6a35046363a6f5d41199256c80eef4ea7e385986 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Tue, 17 Mar 2020 11:25:01 +0000
Subject: Revert "Add options to disable setting profile info for prevent
 changes. (#7053)"

This reverts commit 54dd28621b070ca67de9f773fe9a89e1f4dc19da, reversing
changes made to 6640460d054e8f4444046a34bdf638921b31c01e.
---
 changelog.d/7053.feature                   |   1 -
 docs/sample_config.yaml                    |  13 --
 synapse/config/registration.py             |  17 --
 synapse/handlers/profile.py                |  16 --
 synapse/rest/client/v2_alpha/account.py    |  16 --
 tests/handlers/test_profile.py             |  33 +---
 tests/rest/client/v2_alpha/test_account.py | 303 -----------------------------
 7 files changed, 1 insertion(+), 398 deletions(-)
 delete mode 100644 changelog.d/7053.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature
deleted file mode 100644
index 00f47b2a14..0000000000
--- a/changelog.d/7053.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add options to prevent users from changing their profile or associated 3PIDs.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 91eff4c8ad..2ff0dd05a2 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1057,19 +1057,6 @@ account_threepid_delegates:
     #email: https://example.com     # Delegate email sending to example.com
     #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
-# If disabled, don't let users set their own display names/avatars
-# (unless they are a server admin) other than for the very first time.
-# Useful when provisioning users based on the contents of a 3rd party
-# directory and to avoid ambiguities.
-#
-#enable_set_displayname: true
-#enable_set_avatar_url: true
-
-# If false, stop users from trying to change the 3PIDs associated with
-# their accounts.
-#
-#enable_3pid_changes: true
-
 # Users who register on this homeserver will automatically be joined
 # to these rooms
 #
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ee737eb40d..9bb3beedbc 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -129,10 +129,6 @@ class RegistrationConfig(Config):
                 raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
         self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
 
-        self.enable_set_displayname = config.get("enable_set_displayname", True)
-        self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
-        self.enable_3pid_changes = config.get("enable_3pid_changes", True)
-
         self.disable_msisdn_registration = config.get(
             "disable_msisdn_registration", False
         )
@@ -334,19 +330,6 @@ class RegistrationConfig(Config):
             #email: https://example.com     # Delegate email sending to example.com
             #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
-        # If disabled, don't let users set their own display names/avatars
-        # (unless they are a server admin) other than for the very first time.
-        # Useful when provisioning users based on the contents of a 3rd party
-        # directory and to avoid ambiguities.
-        #
-        #enable_set_displayname: true
-        #enable_set_avatar_url: true
-
-        # If false, stop users from trying to change the 3PIDs associated with
-        # their accounts.
-        #
-        #enable_3pid_changes: true
-
         # Users who register on this homeserver will automatically be joined
         # to these rooms
         #
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6aa1c0f5e0..50ce0c585b 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -157,15 +157,6 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's displayname")
 
-        if not by_admin and not self.hs.config.enable_set_displayname:
-            profile = yield self.store.get_profileinfo(target_user.localpart)
-            if profile.display_name:
-                raise SynapseError(
-                    400,
-                    "Changing display name is disabled on this server",
-                    Codes.FORBIDDEN,
-                )
-
         if len(new_displayname) > MAX_DISPLAYNAME_LEN:
             raise SynapseError(
                 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
@@ -227,13 +218,6 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's avatar_url")
 
-        if not by_admin and not self.hs.config.enable_set_avatar_url:
-            profile = yield self.store.get_profileinfo(target_user.localpart)
-            if profile.avatar_url:
-                raise SynapseError(
-                    400, "Changing avatar is disabled on this server", Codes.FORBIDDEN
-                )
-
         if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
             raise SynapseError(
                 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index e40136f2f3..dc837d6c75 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -599,11 +599,6 @@ class ThreepidRestServlet(RestServlet):
         return 200, {"threepids": threepids}
 
     async def on_POST(self, request):
-        if not self.hs.config.enable_3pid_changes:
-            raise SynapseError(
-                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
-            )
-
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
         body = parse_json_object_from_request(request)
@@ -648,11 +643,6 @@ class ThreepidAddRestServlet(RestServlet):
 
     @interactive_auth_handler
     async def on_POST(self, request):
-        if not self.hs.config.enable_3pid_changes:
-            raise SynapseError(
-                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
-            )
-
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
         body = parse_json_object_from_request(request)
@@ -748,16 +738,10 @@ class ThreepidDeleteRestServlet(RestServlet):
 
     def __init__(self, hs):
         super(ThreepidDeleteRestServlet, self).__init__()
-        self.hs = hs
         self.auth = hs.get_auth()
         self.auth_handler = hs.get_auth_handler()
 
     async def on_POST(self, request):
-        if not self.hs.config.enable_3pid_changes:
-            raise SynapseError(
-                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
-            )
-
         body = parse_json_object_from_request(request)
         assert_params_in_dict(body, ["medium", "address"])
 
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index e600b9777b..d60c124eec 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -19,7 +19,7 @@ from mock import Mock, NonCallableMock
 from twisted.internet import defer
 
 import synapse.types
-from synapse.api.errors import AuthError, SynapseError
+from synapse.api.errors import AuthError
 from synapse.handlers.profile import MasterProfileHandler
 from synapse.types import UserID
 
@@ -70,7 +70,6 @@ class ProfileTestCase(unittest.TestCase):
         yield self.store.create_profile(self.frank.localpart)
 
         self.handler = hs.get_profile_handler()
-        self.hs = hs
 
     @defer.inlineCallbacks
     def test_get_my_name(self):
@@ -91,19 +90,6 @@ class ProfileTestCase(unittest.TestCase):
             "Frank Jr.",
         )
 
-    @defer.inlineCallbacks
-    def test_set_my_name_if_disabled(self):
-        self.hs.config.enable_set_displayname = False
-
-        # Set first displayname is allowed, if displayname is null
-        yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
-
-        d = self.handler.set_displayname(
-            self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
-        )
-
-        yield self.assertFailure(d, SynapseError)
-
     @defer.inlineCallbacks
     def test_set_my_name_noauth(self):
         d = self.handler.set_displayname(
@@ -161,20 +147,3 @@ class ProfileTestCase(unittest.TestCase):
             (yield self.store.get_profile_avatar_url(self.frank.localpart)),
             "http://my.server/pic.gif",
         )
-
-    @defer.inlineCallbacks
-    def test_set_my_avatar_if_disabled(self):
-        self.hs.config.enable_set_avatar_url = False
-
-        # Set first time avatar is allowed, if avatar is null
-        yield self.store.set_profile_avatar_url(
-            self.frank.localpart, "http://my.server/me.png"
-        )
-
-        d = self.handler.set_avatar_url(
-            self.frank,
-            synapse.types.create_requester(self.frank),
-            "http://my.server/pic.gif",
-        )
-
-        yield self.assertFailure(d, SynapseError)
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index 99cc9163f3..c3facc00eb 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -24,7 +24,6 @@ import pkg_resources
 
 import synapse.rest.admin
 from synapse.api.constants import LoginType, Membership
-from synapse.api.errors import Codes
 from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import account, register
 
@@ -326,305 +325,3 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(request.code, 200)
-
-
-class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
-
-    servlets = [
-        account.register_servlets,
-        login.register_servlets,
-        synapse.rest.admin.register_servlets_for_client_rest_resource,
-    ]
-
-    def make_homeserver(self, reactor, clock):
-        config = self.default_config()
-
-        # Email config.
-        self.email_attempts = []
-
-        def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs):
-            self.email_attempts.append(msg)
-            return
-
-        config["email"] = {
-            "enable_notifs": False,
-            "template_dir": os.path.abspath(
-                pkg_resources.resource_filename("synapse", "res/templates")
-            ),
-            "smtp_host": "127.0.0.1",
-            "smtp_port": 20,
-            "require_transport_security": False,
-            "smtp_user": None,
-            "smtp_pass": None,
-            "notif_from": "test@example.com",
-        }
-        config["public_baseurl"] = "https://example.com"
-
-        self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
-        return self.hs
-
-    def prepare(self, reactor, clock, hs):
-        self.store = hs.get_datastore()
-
-        self.user_id = self.register_user("kermit", "test")
-        self.user_id_tok = self.login("kermit", "test")
-        self.email = "test@example.com"
-        self.url_3pid = b"account/3pid"
-
-    def test_add_email(self):
-        """Test add mail to profile
-        """
-        client_secret = "foobar"
-        session_id = self._request_token(self.email, client_secret)
-
-        self.assertEquals(len(self.email_attempts), 1)
-        link = self._get_link_from_email()
-
-        self._validate_token(link)
-
-        request, channel = self.make_request(
-            "POST",
-            b"/_matrix/client/unstable/account/3pid/add",
-            {
-                "client_secret": client_secret,
-                "sid": session_id,
-                "auth": {
-                    "type": "m.login.password",
-                    "user": self.user_id,
-                    "password": "test",
-                },
-            },
-            access_token=self.user_id_tok,
-        )
-
-        self.render(request)
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
-        self.assertEqual(self.email, channel.json_body["threepids"][0]["address"])
-
-    def test_add_email_if_disabled(self):
-        """Test add mail to profile if disabled
-        """
-        self.hs.config.enable_3pid_changes = False
-
-        client_secret = "foobar"
-        session_id = self._request_token(self.email, client_secret)
-
-        self.assertEquals(len(self.email_attempts), 1)
-        link = self._get_link_from_email()
-
-        self._validate_token(link)
-
-        request, channel = self.make_request(
-            "POST",
-            b"/_matrix/client/unstable/account/3pid/add",
-            {
-                "client_secret": client_secret,
-                "sid": session_id,
-                "auth": {
-                    "type": "m.login.password",
-                    "user": self.user_id,
-                    "password": "test",
-                },
-            },
-            access_token=self.user_id_tok,
-        )
-        self.render(request)
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertFalse(channel.json_body["threepids"])
-
-    def test_delete_email(self):
-        """Test delete mail from profile
-        """
-        # Add a threepid
-        self.get_success(
-            self.store.user_add_threepid(
-                user_id=self.user_id,
-                medium="email",
-                address=self.email,
-                validated_at=0,
-                added_at=0,
-            )
-        )
-
-        request, channel = self.make_request(
-            "POST",
-            b"account/3pid/delete",
-            {"medium": "email", "address": self.email},
-            access_token=self.user_id_tok,
-        )
-        self.render(request)
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertFalse(channel.json_body["threepids"])
-
-    def test_delete_email_if_disabled(self):
-        """Test delete mail from profile if disabled
-        """
-        self.hs.config.enable_3pid_changes = False
-
-        # Add a threepid
-        self.get_success(
-            self.store.user_add_threepid(
-                user_id=self.user_id,
-                medium="email",
-                address=self.email,
-                validated_at=0,
-                added_at=0,
-            )
-        )
-
-        request, channel = self.make_request(
-            "POST",
-            b"account/3pid/delete",
-            {"medium": "email", "address": self.email},
-            access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
-        self.assertEqual(self.email, channel.json_body["threepids"][0]["address"])
-
-    def test_cant_add_email_without_clicking_link(self):
-        """Test that we do actually need to click the link in the email
-        """
-        client_secret = "foobar"
-        session_id = self._request_token(self.email, client_secret)
-
-        self.assertEquals(len(self.email_attempts), 1)
-
-        # Attempt to add email without clicking the link
-        request, channel = self.make_request(
-            "POST",
-            b"/_matrix/client/unstable/account/3pid/add",
-            {
-                "client_secret": client_secret,
-                "sid": session_id,
-                "auth": {
-                    "type": "m.login.password",
-                    "user": self.user_id,
-                    "password": "test",
-                },
-            },
-            access_token=self.user_id_tok,
-        )
-        self.render(request)
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertFalse(channel.json_body["threepids"])
-
-    def test_no_valid_token(self):
-        """Test that we do actually need to request a token and can't just
-        make a session up.
-        """
-        client_secret = "foobar"
-        session_id = "weasle"
-
-        # Attempt to add email without even requesting an email
-        request, channel = self.make_request(
-            "POST",
-            b"/_matrix/client/unstable/account/3pid/add",
-            {
-                "client_secret": client_secret,
-                "sid": session_id,
-                "auth": {
-                    "type": "m.login.password",
-                    "user": self.user_id,
-                    "password": "test",
-                },
-            },
-            access_token=self.user_id_tok,
-        )
-        self.render(request)
-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
-
-        # Get user
-        request, channel = self.make_request(
-            "GET", self.url_3pid, access_token=self.user_id_tok,
-        )
-        self.render(request)
-
-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
-        self.assertFalse(channel.json_body["threepids"])
-
-    def _request_token(self, email, client_secret):
-        request, channel = self.make_request(
-            "POST",
-            b"account/3pid/email/requestToken",
-            {"client_secret": client_secret, "email": email, "send_attempt": 1},
-        )
-        self.render(request)
-        self.assertEquals(200, channel.code, channel.result)
-
-        return channel.json_body["sid"]
-
-    def _validate_token(self, link):
-        # Remove the host
-        path = link.replace("https://example.com", "")
-
-        request, channel = self.make_request("GET", path, shorthand=False)
-        self.render(request)
-        self.assertEquals(200, channel.code, channel.result)
-
-    def _get_link_from_email(self):
-        assert self.email_attempts, "No emails have been sent"
-
-        raw_msg = self.email_attempts[-1].decode("UTF-8")
-        mail = Parser().parsestr(raw_msg)
-
-        text = None
-        for part in mail.walk():
-            if part.get_content_type() == "text/plain":
-                text = part.get_payload(decode=True).decode("UTF-8")
-                break
-
-        if not text:
-            self.fail("Could not find text portion of email to parse")
-
-        match = re.search(r"https://example.com\S+", text)
-        assert match, "Could not find link in email"
-
-        return match.group(0)
-- 
cgit 1.4.1


From 60724c46b7dc5300243fd97d5a485564b3e00afe Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Tue, 17 Mar 2020 07:37:04 -0400
Subject: Remove special casing of `m.room.aliases` events (#7034)

---
 changelog.d/7034.removal               |  1 +
 synapse/handlers/room.py               | 16 +------------
 synapse/rest/client/v1/room.py         | 12 ----------
 tests/rest/admin/test_admin.py         |  7 ++++++
 tests/rest/client/v1/test_directory.py | 41 +++++++++++++++++++++-------------
 5 files changed, 35 insertions(+), 42 deletions(-)
 create mode 100644 changelog.d/7034.removal

(limited to 'changelog.d')

diff --git a/changelog.d/7034.removal b/changelog.d/7034.removal
new file mode 100644
index 0000000000..be8d20e14f
--- /dev/null
+++ b/changelog.d/7034.removal
@@ -0,0 +1 @@
+Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1.
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 8ee870f0bb..f580ab2e9f 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -292,16 +292,6 @@ class RoomCreationHandler(BaseHandler):
             except AuthError as e:
                 logger.warning("Unable to update PLs in old room: %s", e)
 
-        new_pl_content = copy_power_levels_contents(old_room_pl_state.content)
-
-        # pre-msc2260 rooms may not have the right setting for aliases. If no other
-        # value is set, set it now.
-        events_default = new_pl_content.get("events_default", 0)
-        new_pl_content.setdefault("events", {}).setdefault(
-            EventTypes.Aliases, events_default
-        )
-
-        logger.debug("Setting correct PLs in new room to %s", new_pl_content)
         yield self.event_creation_handler.create_and_send_nonmember_event(
             requester,
             {
@@ -309,7 +299,7 @@ class RoomCreationHandler(BaseHandler):
                 "state_key": "",
                 "room_id": new_room_id,
                 "sender": requester.user.to_string(),
-                "content": new_pl_content,
+                "content": old_room_pl_state.content,
             },
             ratelimit=False,
         )
@@ -814,10 +804,6 @@ class RoomCreationHandler(BaseHandler):
                     EventTypes.RoomHistoryVisibility: 100,
                     EventTypes.CanonicalAlias: 50,
                     EventTypes.RoomAvatar: 50,
-                    # MSC2260: Allow everybody to send alias events by default
-                    # This will be reudundant on pre-MSC2260 rooms, since the
-                    # aliases event is special-cased.
-                    EventTypes.Aliases: 0,
                     EventTypes.Tombstone: 100,
                     EventTypes.ServerACL: 100,
                 },
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 64f51406fb..bffd43de5f 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -189,12 +189,6 @@ class RoomStateEventRestServlet(TransactionRestServlet):
 
         content = parse_json_object_from_request(request)
 
-        if event_type == EventTypes.Aliases:
-            # MSC2260
-            raise SynapseError(
-                400, "Cannot send m.room.aliases events via /rooms/{room_id}/state"
-            )
-
         event_dict = {
             "type": event_type,
             "content": content,
@@ -242,12 +236,6 @@ class RoomSendEventRestServlet(TransactionRestServlet):
         requester = await self.auth.get_user_by_req(request, allow_guest=True)
         content = parse_json_object_from_request(request)
 
-        if event_type == EventTypes.Aliases:
-            # MSC2260
-            raise SynapseError(
-                400, "Cannot send m.room.aliases events via /rooms/{room_id}/send"
-            )
-
         event_dict = {
             "type": event_type,
             "content": content,
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index e5984aaad8..0342aed416 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -868,6 +868,13 @@ class RoomTestCase(unittest.HomeserverTestCase):
         self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
 
         # Set this new alias as the canonical alias for this room
+        self.helper.send_state(
+            room_id,
+            "m.room.aliases",
+            {"aliases": [test_alias]},
+            tok=self.admin_user_tok,
+            state_key="test",
+        )
         self.helper.send_state(
             room_id,
             "m.room.canonical_alias",
diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py
index 914cf54927..633b7dbda0 100644
--- a/tests/rest/client/v1/test_directory.py
+++ b/tests/rest/client/v1/test_directory.py
@@ -51,30 +51,26 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.user = self.register_user("user", "test")
         self.user_tok = self.login("user", "test")
 
-    def test_cannot_set_alias_via_state_event(self):
-        self.ensure_user_joined_room()
-        url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % (
-            self.room_id,
-            self.hs.hostname,
-        )
-
-        data = {"aliases": [self.random_alias(5)]}
-        request_data = json.dumps(data)
-
-        request, channel = self.make_request(
-            "PUT", url, request_data, access_token=self.user_tok
-        )
-        self.render(request)
-        self.assertEqual(channel.code, 400, channel.result)
+    def test_state_event_not_in_room(self):
+        self.ensure_user_left_room()
+        self.set_alias_via_state_event(403)
 
     def test_directory_endpoint_not_in_room(self):
         self.ensure_user_left_room()
         self.set_alias_via_directory(403)
 
+    def test_state_event_in_room_too_long(self):
+        self.ensure_user_joined_room()
+        self.set_alias_via_state_event(400, alias_length=256)
+
     def test_directory_in_room_too_long(self):
         self.ensure_user_joined_room()
         self.set_alias_via_directory(400, alias_length=256)
 
+    def test_state_event_in_room(self):
+        self.ensure_user_joined_room()
+        self.set_alias_via_state_event(200)
+
     def test_directory_in_room(self):
         self.ensure_user_joined_room()
         self.set_alias_via_directory(200)
@@ -106,6 +102,21 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.render(request)
         self.assertEqual(channel.code, 200, channel.result)
 
+    def set_alias_via_state_event(self, expected_code, alias_length=5):
+        url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % (
+            self.room_id,
+            self.hs.hostname,
+        )
+
+        data = {"aliases": [self.random_alias(alias_length)]}
+        request_data = json.dumps(data)
+
+        request, channel = self.make_request(
+            "PUT", url, request_data, access_token=self.user_tok
+        )
+        self.render(request)
+        self.assertEqual(channel.code, expected_code, channel.result)
+
     def set_alias_via_directory(self, expected_code, alias_length=5):
         url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length)
         data = {"room_id": self.room_id}
-- 
cgit 1.4.1


From 7581d30e9f939263f9ab07644f269b6e7cd2d226 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Tue, 17 Mar 2020 08:04:49 -0400
Subject: Remove unused federation endpoint (`query_auth`) (#7026)

---
 changelog.d/7026.removal                |  1 +
 synapse/federation/federation_base.py   | 82 ---------------------------------
 synapse/federation/federation_client.py | 80 +++++++++++++++++++++++++++++++-
 synapse/federation/federation_server.py | 51 --------------------
 synapse/federation/transport/server.py  | 12 -----
 5 files changed, 80 insertions(+), 146 deletions(-)
 create mode 100644 changelog.d/7026.removal

(limited to 'changelog.d')

diff --git a/changelog.d/7026.removal b/changelog.d/7026.removal
new file mode 100644
index 0000000000..4c8c563bb0
--- /dev/null
+++ b/changelog.d/7026.removal
@@ -0,0 +1 @@
+Remove the unused query_auth federation endpoint per MSC2451.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 190ea1fba1..5c991e5412 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -39,10 +39,8 @@ from synapse.logging.context import (
     LoggingContext,
     PreserveLoggingContext,
     make_deferred_yieldable,
-    preserve_fn,
 )
 from synapse.types import JsonDict, get_domain_from_id
-from synapse.util import unwrapFirstError
 
 logger = logging.getLogger(__name__)
 
@@ -57,86 +55,6 @@ class FederationBase(object):
         self.store = hs.get_datastore()
         self._clock = hs.get_clock()
 
-    @defer.inlineCallbacks
-    def _check_sigs_and_hash_and_fetch(
-        self,
-        origin: str,
-        pdus: List[EventBase],
-        room_version: str,
-        outlier: bool = False,
-        include_none: bool = False,
-    ):
-        """Takes a list of PDUs and checks the signatures and hashs of each
-        one. If a PDU fails its signature check then we check if we have it in
-        the database and if not then request if from the originating server of
-        that PDU.
-
-        If a PDU fails its content hash check then it is redacted.
-
-        The given list of PDUs are not modified, instead the function returns
-        a new list.
-
-        Args:
-            origin
-            pdu
-            room_version
-            outlier: Whether the events are outliers or not
-            include_none: Whether to include None in the returned list
-                for events that have failed their checks
-
-        Returns:
-            Deferred : A list of PDUs that have valid signatures and hashes.
-        """
-        deferreds = self._check_sigs_and_hashes(room_version, pdus)
-
-        @defer.inlineCallbacks
-        def handle_check_result(pdu: EventBase, deferred: Deferred):
-            try:
-                res = yield make_deferred_yieldable(deferred)
-            except SynapseError:
-                res = None
-
-            if not res:
-                # Check local db.
-                res = yield self.store.get_event(
-                    pdu.event_id, allow_rejected=True, allow_none=True
-                )
-
-            if not res and pdu.origin != origin:
-                try:
-                    # This should not exist in the base implementation, until
-                    # this is fixed, ignore it for typing. See issue #6997.
-                    res = yield defer.ensureDeferred(
-                        self.get_pdu(  # type: ignore
-                            destinations=[pdu.origin],
-                            event_id=pdu.event_id,
-                            room_version=room_version,
-                            outlier=outlier,
-                            timeout=10000,
-                        )
-                    )
-                except SynapseError:
-                    pass
-
-            if not res:
-                logger.warning(
-                    "Failed to find copy of %s with valid signature", pdu.event_id
-                )
-
-            return res
-
-        handle = preserve_fn(handle_check_result)
-        deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
-
-        valid_pdus = yield make_deferred_yieldable(
-            defer.gatherResults(deferreds2, consumeErrors=True)
-        ).addErrback(unwrapFirstError)
-
-        if include_none:
-            return valid_pdus
-        else:
-            return [p for p in valid_pdus if p]
-
     def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred:
         return make_deferred_yieldable(
             self._check_sigs_and_hashes(room_version, [pdu])[0]
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index b5538bc07a..8c6b839478 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -33,6 +33,7 @@ from typing import (
 from prometheus_client import Counter
 
 from twisted.internet import defer
+from twisted.internet.defer import Deferred
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import (
@@ -51,7 +52,7 @@ from synapse.api.room_versions import (
 )
 from synapse.events import EventBase, builder
 from synapse.federation.federation_base import FederationBase, event_from_pdu_json
-from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.context import make_deferred_yieldable, preserve_fn
 from synapse.logging.utils import log_function
 from synapse.types import JsonDict
 from synapse.util import unwrapFirstError
@@ -345,6 +346,83 @@ class FederationClient(FederationBase):
 
         return state_event_ids, auth_event_ids
 
+    async def _check_sigs_and_hash_and_fetch(
+        self,
+        origin: str,
+        pdus: List[EventBase],
+        room_version: str,
+        outlier: bool = False,
+        include_none: bool = False,
+    ) -> List[EventBase]:
+        """Takes a list of PDUs and checks the signatures and hashs of each
+        one. If a PDU fails its signature check then we check if we have it in
+        the database and if not then request if from the originating server of
+        that PDU.
+
+        If a PDU fails its content hash check then it is redacted.
+
+        The given list of PDUs are not modified, instead the function returns
+        a new list.
+
+        Args:
+            origin
+            pdu
+            room_version
+            outlier: Whether the events are outliers or not
+            include_none: Whether to include None in the returned list
+                for events that have failed their checks
+
+        Returns:
+            Deferred : A list of PDUs that have valid signatures and hashes.
+        """
+        deferreds = self._check_sigs_and_hashes(room_version, pdus)
+
+        @defer.inlineCallbacks
+        def handle_check_result(pdu: EventBase, deferred: Deferred):
+            try:
+                res = yield make_deferred_yieldable(deferred)
+            except SynapseError:
+                res = None
+
+            if not res:
+                # Check local db.
+                res = yield self.store.get_event(
+                    pdu.event_id, allow_rejected=True, allow_none=True
+                )
+
+            if not res and pdu.origin != origin:
+                try:
+                    res = yield defer.ensureDeferred(
+                        self.get_pdu(
+                            destinations=[pdu.origin],
+                            event_id=pdu.event_id,
+                            room_version=room_version,  # type: ignore
+                            outlier=outlier,
+                            timeout=10000,
+                        )
+                    )
+                except SynapseError:
+                    pass
+
+            if not res:
+                logger.warning(
+                    "Failed to find copy of %s with valid signature", pdu.event_id
+                )
+
+            return res
+
+        handle = preserve_fn(handle_check_result)
+        deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
+
+        valid_pdus = await make_deferred_yieldable(
+            defer.gatherResults(deferreds2, consumeErrors=True)
+        ).addErrback(unwrapFirstError)
+
+        if include_none:
+            return valid_pdus
+        else:
+            return [p for p in valid_pdus if p]
+
     async def get_event_auth(self, destination, room_id, event_id):
         res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
 
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 7f9da49326..275b9c99d7 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -470,57 +470,6 @@ class FederationServer(FederationBase):
             res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
         return 200, res
 
-    async def on_query_auth_request(self, origin, content, room_id, event_id):
-        """
-        Content is a dict with keys::
-            auth_chain (list): A list of events that give the auth chain.
-            missing (list): A list of event_ids indicating what the other
-              side (`origin`) think we're missing.
-            rejects (dict): A mapping from event_id to a 2-tuple of reason
-              string and a proof (or None) of why the event was rejected.
-              The keys of this dict give the list of events the `origin` has
-              rejected.
-
-        Args:
-            origin (str)
-            content (dict)
-            event_id (str)
-
-        Returns:
-            Deferred: Results in `dict` with the same format as `content`
-        """
-        with (await self._server_linearizer.queue((origin, room_id))):
-            origin_host, _ = parse_server_name(origin)
-            await self.check_server_matches_acl(origin_host, room_id)
-
-            room_version = await self.store.get_room_version(room_id)
-
-            auth_chain = [
-                event_from_pdu_json(e, room_version) for e in content["auth_chain"]
-            ]
-
-            signed_auth = await self._check_sigs_and_hash_and_fetch(
-                origin, auth_chain, outlier=True, room_version=room_version.identifier
-            )
-
-            ret = await self.handler.on_query_auth(
-                origin,
-                event_id,
-                room_id,
-                signed_auth,
-                content.get("rejects", []),
-                content.get("missing", []),
-            )
-
-            time_now = self._clock.time_msec()
-            send_content = {
-                "auth_chain": [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
-                "rejects": ret.get("rejects", []),
-                "missing": ret.get("missing", []),
-            }
-
-        return 200, send_content
-
     @log_function
     def on_query_client_keys(self, origin, content):
         return self.on_query_request("client_keys", content)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 92a9ae2320..af4595498c 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -643,17 +643,6 @@ class FederationClientKeysClaimServlet(BaseFederationServlet):
         return 200, response
 
 
-class FederationQueryAuthServlet(BaseFederationServlet):
-    PATH = "/query_auth/(?P[^/]*)/(?P[^/]*)"
-
-    async def on_POST(self, origin, content, query, context, event_id):
-        new_content = await self.handler.on_query_auth_request(
-            origin, content, context, event_id
-        )
-
-        return 200, new_content
-
-
 class FederationGetMissingEventsServlet(BaseFederationServlet):
     # TODO(paul): Why does this path alone end with "/?" optional?
     PATH = "/get_missing_events/(?P[^/]*)/?"
@@ -1412,7 +1401,6 @@ FEDERATION_SERVLET_CLASSES = (
     FederationV2SendLeaveServlet,
     FederationV1InviteServlet,
     FederationV2InviteServlet,
-    FederationQueryAuthServlet,
     FederationGetMissingEventsServlet,
     FederationEventAuthServlet,
     FederationClientKeysQueryServlet,
-- 
cgit 1.4.1


From 5e477c1debfd932ced56ec755204d6ead4ce8ec8 Mon Sep 17 00:00:00 2001
From: The Stranjer <791672+TheStranjer@users.noreply.github.com>
Date: Tue, 17 Mar 2020 09:29:09 -0400
Subject: Set charset to utf-8 when adding headers for certain text content
 types (#7044)

Fixes #7043
---
 changelog.d/7044.bugfix        |  1 +
 synapse/rest/media/v1/_base.py | 25 ++++++++++++++++++++++++-
 2 files changed, 25 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7044.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7044.bugfix b/changelog.d/7044.bugfix
new file mode 100644
index 0000000000..790088ddb4
--- /dev/null
+++ b/changelog.d/7044.bugfix
@@ -0,0 +1 @@
+Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer.
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index ba28dd089d..503f2bed98 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -30,6 +30,22 @@ from synapse.util.stringutils import is_ascii
 
 logger = logging.getLogger(__name__)
 
+# list all text content types that will have the charset default to UTF-8 when
+# none is given
+TEXT_CONTENT_TYPES = [
+    "text/css",
+    "text/csv",
+    "text/html",
+    "text/calendar",
+    "text/plain",
+    "text/javascript",
+    "application/json",
+    "application/ld+json",
+    "application/rtf",
+    "image/svg+xml",
+    "text/xml",
+]
+
 
 def parse_media_id(request):
     try:
@@ -96,7 +112,14 @@ def add_file_headers(request, media_type, file_size, upload_name):
     def _quote(x):
         return urllib.parse.quote(x.encode("utf-8"))
 
-    request.setHeader(b"Content-Type", media_type.encode("UTF-8"))
+    # Default to a UTF-8 charset for text content types.
+    # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
+    if media_type.lower() in TEXT_CONTENT_TYPES:
+        content_type = media_type + "; charset=UTF-8"
+    else:
+        content_type = media_type
+
+    request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
     if upload_name:
         # RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
         #
-- 
cgit 1.4.1


From c37db0211e36cd298426ff8811e547b0acd10bf4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 17 Mar 2020 22:32:25 +0100
Subject: Share SSL contexts for non-federation requests (#7094)

Extends #5794 etc to the SimpleHttpClient so that it also applies to non-federation requests.

Fixes #7092.
---
 changelog.d/7094.misc                              |  1 +
 synapse/crypto/context_factory.py                  | 68 ++++++++++++++--------
 synapse/http/client.py                             |  3 -
 synapse/http/federation/matrix_federation_agent.py |  2 +-
 synapse/server.py                                  |  6 +-
 tests/config/test_tls.py                           | 29 +++++----
 .../federation/test_matrix_federation_agent.py     |  6 +-
 7 files changed, 71 insertions(+), 44 deletions(-)
 create mode 100644 changelog.d/7094.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7094.misc b/changelog.d/7094.misc
new file mode 100644
index 0000000000..aa093ee3c0
--- /dev/null
+++ b/changelog.d/7094.misc
@@ -0,0 +1 @@
+Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections.
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index e93f0b3705..a5a2a7815d 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -75,7 +75,7 @@ class ServerContextFactory(ContextFactory):
 
 
 @implementer(IPolicyForHTTPS)
-class ClientTLSOptionsFactory(object):
+class FederationPolicyForHTTPS(object):
     """Factory for Twisted SSLClientConnectionCreators that are used to make connections
     to remote servers for federation.
 
@@ -103,15 +103,15 @@ class ClientTLSOptionsFactory(object):
         # let us do).
         minTLS = _TLS_VERSION_MAP[config.federation_client_minimum_tls_version]
 
-        self._verify_ssl = CertificateOptions(
+        _verify_ssl = CertificateOptions(
             trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
         )
-        self._verify_ssl_context = self._verify_ssl.getContext()
-        self._verify_ssl_context.set_info_callback(self._context_info_cb)
+        self._verify_ssl_context = _verify_ssl.getContext()
+        self._verify_ssl_context.set_info_callback(_context_info_cb)
 
-        self._no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS)
-        self._no_verify_ssl_context = self._no_verify_ssl.getContext()
-        self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
+        _no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS)
+        self._no_verify_ssl_context = _no_verify_ssl.getContext()
+        self._no_verify_ssl_context.set_info_callback(_context_info_cb)
 
     def get_options(self, host: bytes):
 
@@ -136,23 +136,6 @@ class ClientTLSOptionsFactory(object):
 
         return SSLClientConnectionCreator(host, ssl_context, should_verify)
 
-    @staticmethod
-    def _context_info_cb(ssl_connection, where, ret):
-        """The 'information callback' for our openssl context object."""
-        # we assume that the app_data on the connection object has been set to
-        # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
-        tls_protocol = ssl_connection.get_app_data()
-        try:
-            # ... we further assume that SSLClientConnectionCreator has set the
-            # '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
-            tls_protocol._synapse_tls_verifier.verify_context_info_cb(
-                ssl_connection, where
-            )
-        except:  # noqa: E722, taken from the twisted implementation
-            logger.exception("Error during info_callback")
-            f = Failure()
-            tls_protocol.failVerification(f)
-
     def creatorForNetloc(self, hostname, port):
         """Implements the IPolicyForHTTPS interace so that this can be passed
         directly to agents.
@@ -160,6 +143,43 @@ class ClientTLSOptionsFactory(object):
         return self.get_options(hostname)
 
 
+@implementer(IPolicyForHTTPS)
+class RegularPolicyForHTTPS(object):
+    """Factory for Twisted SSLClientConnectionCreators that are used to make connections
+    to remote servers, for other than federation.
+
+    Always uses the same OpenSSL context object, which uses the default OpenSSL CA
+    trust root.
+    """
+
+    def __init__(self):
+        trust_root = platformTrust()
+        self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
+        self._ssl_context.set_info_callback(_context_info_cb)
+
+    def creatorForNetloc(self, hostname, port):
+        return SSLClientConnectionCreator(hostname, self._ssl_context, True)
+
+
+def _context_info_cb(ssl_connection, where, ret):
+    """The 'information callback' for our openssl context objects.
+
+    Note: Once this is set as the info callback on a Context object, the Context should
+    only be used with the SSLClientConnectionCreator.
+    """
+    # we assume that the app_data on the connection object has been set to
+    # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
+    tls_protocol = ssl_connection.get_app_data()
+    try:
+        # ... we further assume that SSLClientConnectionCreator has set the
+        # '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
+        tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where)
+    except:  # noqa: E722, taken from the twisted implementation
+        logger.exception("Error during info_callback")
+        f = Failure()
+        tls_protocol.failVerification(f)
+
+
 @implementer(IOpenSSLClientConnectionCreator)
 class SSLClientConnectionCreator(object):
     """Creates openssl connection objects for client connections.
diff --git a/synapse/http/client.py b/synapse/http/client.py
index d4c285445e..3797545824 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -244,9 +244,6 @@ class SimpleHttpClient(object):
         pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5))
         pool.cachedConnectionTimeout = 2 * 60
 
-        # The default context factory in Twisted 14.0.0 (which we require) is
-        # BrowserLikePolicyForHTTPS which will do regular cert validation
-        # 'like a browser'
         self.agent = ProxyAgent(
             self.reactor,
             connectTimeout=15,
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 647d26dc56..f5f917f5ae 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -45,7 +45,7 @@ class MatrixFederationAgent(object):
     Args:
         reactor (IReactor): twisted reactor to use for underlying requests
 
-        tls_client_options_factory (ClientTLSOptionsFactory|None):
+        tls_client_options_factory (FederationPolicyForHTTPS|None):
             factory to use for fetching client tls options, or none to disable TLS.
 
         _srv_resolver (SrvResolver|None):
diff --git a/synapse/server.py b/synapse/server.py
index fd2f69e928..1b980371de 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -26,7 +26,6 @@ import logging
 import os
 
 from twisted.mail.smtp import sendmail
-from twisted.web.client import BrowserLikePolicyForHTTPS
 
 from synapse.api.auth import Auth
 from synapse.api.filtering import Filtering
@@ -35,6 +34,7 @@ from synapse.appservice.api import ApplicationServiceApi
 from synapse.appservice.scheduler import ApplicationServiceScheduler
 from synapse.config.homeserver import HomeServerConfig
 from synapse.crypto import context_factory
+from synapse.crypto.context_factory import RegularPolicyForHTTPS
 from synapse.crypto.keyring import Keyring
 from synapse.events.builder import EventBuilderFactory
 from synapse.events.spamcheck import SpamChecker
@@ -310,7 +310,7 @@ class HomeServer(object):
         return (
             InsecureInterceptableContextFactory()
             if self.config.use_insecure_ssl_client_just_for_testing_do_not_use
-            else BrowserLikePolicyForHTTPS()
+            else RegularPolicyForHTTPS()
         )
 
     def build_simple_http_client(self):
@@ -420,7 +420,7 @@ class HomeServer(object):
         return PusherPool(self)
 
     def build_http_client(self):
-        tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
+        tls_client_options_factory = context_factory.FederationPolicyForHTTPS(
             self.config
         )
         return MatrixFederationHttpClient(self, tls_client_options_factory)
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
index 1be6ff563b..ec32d4b1ca 100644
--- a/tests/config/test_tls.py
+++ b/tests/config/test_tls.py
@@ -23,7 +23,7 @@ from OpenSSL import SSL
 
 from synapse.config._base import Config, RootConfig
 from synapse.config.tls import ConfigError, TlsConfig
-from synapse.crypto.context_factory import ClientTLSOptionsFactory
+from synapse.crypto.context_factory import FederationPolicyForHTTPS
 
 from tests.unittest import TestCase
 
@@ -180,12 +180,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
         t = TestConfig()
         t.read_config(config, config_dir_path="", data_dir_path="")
 
-        cf = ClientTLSOptionsFactory(t)
+        cf = FederationPolicyForHTTPS(t)
+        options = _get_ssl_context_options(cf._verify_ssl_context)
 
         # The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2
-        self.assertNotEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0)
-        self.assertNotEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0)
-        self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0)
+        self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0)
+        self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0)
+        self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)
 
     def test_tls_client_minimum_set_passed_through_1_0(self):
         """
@@ -195,12 +196,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
         t = TestConfig()
         t.read_config(config, config_dir_path="", data_dir_path="")
 
-        cf = ClientTLSOptionsFactory(t)
+        cf = FederationPolicyForHTTPS(t)
+        options = _get_ssl_context_options(cf._verify_ssl_context)
 
         # The context has not had any of the NO_TLS set.
-        self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0)
-        self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0)
-        self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0)
+        self.assertEqual(options & SSL.OP_NO_TLSv1, 0)
+        self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0)
+        self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)
 
     def test_acme_disabled_in_generated_config_no_acme_domain_provied(self):
         """
@@ -273,7 +275,7 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
         t = TestConfig()
         t.read_config(config, config_dir_path="", data_dir_path="")
 
-        cf = ClientTLSOptionsFactory(t)
+        cf = FederationPolicyForHTTPS(t)
 
         # Not in the whitelist
         opts = cf.get_options(b"notexample.com")
@@ -282,3 +284,10 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
         # Caught by the wildcard
         opts = cf.get_options(idna.encode("テスト.ドメイン.テスト"))
         self.assertFalse(opts._verifier._verify_certs)
+
+
+def _get_ssl_context_options(ssl_context: SSL.Context) -> int:
+    """get the options bits from an openssl context object"""
+    # the OpenSSL.SSL.Context wrapper doesn't expose get_options, so we have to
+    # use the low-level interface
+    return SSL._lib.SSL_CTX_get_options(ssl_context._context)
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index cfcd98ff7d..fdc1d918ff 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -31,7 +31,7 @@ from twisted.web.http_headers import Headers
 from twisted.web.iweb import IPolicyForHTTPS
 
 from synapse.config.homeserver import HomeServerConfig
-from synapse.crypto.context_factory import ClientTLSOptionsFactory
+from synapse.crypto.context_factory import FederationPolicyForHTTPS
 from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
 from synapse.http.federation.srv_resolver import Server
 from synapse.http.federation.well_known_resolver import (
@@ -79,7 +79,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         self._config = config = HomeServerConfig()
         config.parse_config_dict(config_dict, "", "")
 
-        self.tls_factory = ClientTLSOptionsFactory(config)
+        self.tls_factory = FederationPolicyForHTTPS(config)
 
         self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
         self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
@@ -715,7 +715,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         config = default_config("test", parse=True)
 
         # Build a new agent and WellKnownResolver with a different tls factory
-        tls_factory = ClientTLSOptionsFactory(config)
+        tls_factory = FederationPolicyForHTTPS(config)
         agent = MatrixFederationAgent(
             reactor=self.reactor,
             tls_client_options_factory=tls_factory,
-- 
cgit 1.4.1


From 6d110ddea4b4c300a1d062442da060d021a280cf Mon Sep 17 00:00:00 2001
From: Richard von Kellner 
Date: Tue, 17 Mar 2020 22:48:23 +0100
Subject: Update INSTALL.md updated CentOS8 install instructions (#6925)

---
 INSTALL.md           | 13 +++++++++++--
 changelog.d/6925.doc |  1 +
 2 files changed, 12 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/6925.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index ffb82bdcc3..c0926ba590 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -124,12 +124,21 @@ sudo pacman -S base-devel python python-pip \
 
 #### CentOS/Fedora
 
-Installing prerequisites on CentOS 7 or Fedora 25:
+Installing prerequisites on CentOS 8 or Fedora>26:
+
+```
+sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
+                 libwebp-devel tk-devel redhat-rpm-config \
+                 python3-virtualenv libffi-devel openssl-devel
+sudo dnf groupinstall "Development Tools"
+```
+
+Installing prerequisites on CentOS 7 or Fedora<=25:
 
 ```
 sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
                  lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
-                 python-virtualenv libffi-devel openssl-devel
+                 python3-virtualenv libffi-devel openssl-devel
 sudo yum groupinstall "Development Tools"
 ```
 
diff --git a/changelog.d/6925.doc b/changelog.d/6925.doc
new file mode 100644
index 0000000000..b8e6c73630
--- /dev/null
+++ b/changelog.d/6925.doc
@@ -0,0 +1 @@
+Updated CentOS8 install instructions. Contributed by Richard Kellner.
-- 
cgit 1.4.1


From 88b41986dbc54e8601ad4d889f4ebff952858b4f Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Wed, 18 Mar 2020 07:50:00 -0400
Subject: Add an option to the set password API to choose whether to logout
 other devices. (#7085)

---
 changelog.d/7085.feature                |  1 +
 docs/admin_api/user_admin_api.rst       |  6 ++++-
 synapse/handlers/set_password.py        | 41 ++++++++++++++++++++-------------
 synapse/rest/admin/users.py             |  6 +++--
 synapse/rest/client/v2_alpha/account.py |  5 +++-
 5 files changed, 39 insertions(+), 20 deletions(-)
 create mode 100644 changelog.d/7085.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7085.feature b/changelog.d/7085.feature
new file mode 100644
index 0000000000..df6d0f990d
--- /dev/null
+++ b/changelog.d/7085.feature
@@ -0,0 +1 @@
+Add an optional parameter to control whether other sessions are logged out when a user's password is modified.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 6b02d963e6..9ce10119ff 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -38,6 +38,7 @@ The parameter ``threepids`` is optional.
 The parameter ``avatar_url`` is optional.
 The parameter ``admin`` is optional and defaults to 'false'.
 The parameter ``deactivated`` is optional and defaults to 'false'.
+The parameter ``password`` is optional. If provided the user's password is updated and all devices are logged out.
 If the user already exists then optional parameters default to the current value.
 
 List Accounts
@@ -168,11 +169,14 @@ with a body of:
 .. code:: json
 
    {
-       "new_password": ""
+       "new_password": "",
+       "logout_devices": true,
    }
 
 including an ``access_token`` of a server admin.
 
+The parameter ``new_password`` is required.
+The parameter ``logout_devices`` is optional and defaults to ``true``.
 
 Get whether a user is a server administrator or not
 ===================================================
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index d90c9e0108..12657ca698 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -13,10 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Optional
 
 from twisted.internet import defer
 
 from synapse.api.errors import Codes, StoreError, SynapseError
+from synapse.types import Requester
 
 from ._base import BaseHandler
 
@@ -32,14 +34,17 @@ class SetPasswordHandler(BaseHandler):
         self._device_handler = hs.get_device_handler()
 
     @defer.inlineCallbacks
-    def set_password(self, user_id, newpassword, requester=None):
+    def set_password(
+        self,
+        user_id: str,
+        new_password: str,
+        logout_devices: bool,
+        requester: Optional[Requester] = None,
+    ):
         if not self.hs.config.password_localdb_enabled:
             raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
 
-        password_hash = yield self._auth_handler.hash(newpassword)
-
-        except_device_id = requester.device_id if requester else None
-        except_access_token_id = requester.access_token_id if requester else None
+        password_hash = yield self._auth_handler.hash(new_password)
 
         try:
             yield self.store.user_set_password_hash(user_id, password_hash)
@@ -48,14 +53,18 @@ class SetPasswordHandler(BaseHandler):
                 raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
             raise e
 
-        # we want to log out all of the user's other sessions. First delete
-        # all his other devices.
-        yield self._device_handler.delete_all_devices_for_user(
-            user_id, except_device_id=except_device_id
-        )
-
-        # and now delete any access tokens which weren't associated with
-        # devices (or were associated with this device).
-        yield self._auth_handler.delete_access_tokens_for_user(
-            user_id, except_token_id=except_access_token_id
-        )
+        # Optionally, log out all of the user's other sessions.
+        if logout_devices:
+            except_device_id = requester.device_id if requester else None
+            except_access_token_id = requester.access_token_id if requester else None
+
+            # First delete all of their other devices.
+            yield self._device_handler.delete_all_devices_for_user(
+                user_id, except_device_id=except_device_id
+            )
+
+            # and now delete any access tokens which weren't associated with
+            # devices (or were associated with this device).
+            yield self._auth_handler.delete_access_tokens_for_user(
+                user_id, except_token_id=except_access_token_id
+            )
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 80f959248d..8551ac19b8 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -221,8 +221,9 @@ class UserRestServletV2(RestServlet):
                     raise SynapseError(400, "Invalid password")
                 else:
                     new_password = body["password"]
+                    logout_devices = True
                     await self.set_password_handler.set_password(
-                        target_user.to_string(), new_password, requester
+                        target_user.to_string(), new_password, logout_devices, requester
                     )
 
             if "deactivated" in body:
@@ -536,9 +537,10 @@ class ResetPasswordRestServlet(RestServlet):
         params = parse_json_object_from_request(request)
         assert_params_in_dict(params, ["new_password"])
         new_password = params["new_password"]
+        logout_devices = params.get("logout_devices", True)
 
         await self._set_password_handler.set_password(
-            target_user_id, new_password, requester
+            target_user_id, new_password, logout_devices, requester
         )
         return 200, {}
 
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index dc837d6c75..631cc74cb4 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -265,8 +265,11 @@ class PasswordRestServlet(RestServlet):
 
         assert_params_in_dict(params, ["new_password"])
         new_password = params["new_password"]
+        logout_devices = params.get("logout_devices", True)
 
-        await self._set_password_handler.set_password(user_id, new_password, requester)
+        await self._set_password_handler.set_password(
+            user_id, new_password, logout_devices, requester
+        )
 
         return 200, {}
 
-- 
cgit 1.4.1


From 4a17a647a9508b70de35130fd82e3e21474270a9 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 18 Mar 2020 16:46:41 +0000
Subject: Improve get auth chain difference algorithm. (#7095)

It was originally implemented by pulling the full auth chain of all
state sets out of the database and doing set comparison. However, that
can take a lot work if the state and auth chains are large.

Instead, lets try and fetch the auth chains at the same time and
calculate the difference on the fly, allowing us to bail early if all
the auth chains converge. Assuming that the auth chains do converge more
often than not, this should improve performance. Hopefully.
---
 changelog.d/7095.misc                              |   1 +
 synapse/state/__init__.py                          |  28 ++--
 synapse/state/v2.py                                |  32 +----
 .../storage/data_stores/main/event_federation.py   | 150 +++++++++++++++++++-
 tests/state/test_v2.py                             |  13 +-
 tests/storage/test_event_federation.py             | 157 ++++++++++++++++++---
 6 files changed, 310 insertions(+), 71 deletions(-)
 create mode 100644 changelog.d/7095.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7095.misc b/changelog.d/7095.misc
new file mode 100644
index 0000000000..44fc9f616f
--- /dev/null
+++ b/changelog.d/7095.misc
@@ -0,0 +1 @@
+Attempt to improve performance of state res v2 algorithm.
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index df7a4f6a89..4afefc6b1d 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -662,28 +662,16 @@ class StateResolutionStore(object):
             allow_rejected=allow_rejected,
         )
 
-    def get_auth_chain(self, event_ids: List[str], ignore_events: Set[str]):
-        """Gets the full auth chain for a set of events (including rejected
-        events).
-
-        Includes the given event IDs in the result.
-
-        Note that:
-            1. All events must be state events.
-            2. For v1 rooms this may not have the full auth chain in the
-               presence of rejected events
-
-        Args:
-            event_ids: The event IDs of the events to fetch the auth chain for.
-                Must be state events.
-            ignore_events: Set of events to exclude from the returned auth
-                chain.
+    def get_auth_chain_difference(self, state_sets: List[Set[str]]):
+        """Given sets of state events figure out the auth chain difference (as
+        per state res v2 algorithm).
 
+        This equivalent to fetching the full auth chain for each set of state
+        and returning the events that don't appear in each and every auth
+        chain.
 
         Returns:
-            Deferred[list[str]]: List of event IDs of the auth chain.
+            Deferred[Set[str]]: Set of event IDs.
         """
 
-        return self.store.get_auth_chain_ids(
-            event_ids, include_given=True, ignore_events=ignore_events,
-        )
+        return self.store.get_auth_chain_difference(state_sets)
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 0ffe6d8c14..18484e2fa6 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -227,36 +227,12 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
     Returns:
         Deferred[set[str]]: Set of event IDs
     """
-    common = set(itervalues(state_sets[0])).intersection(
-        *(itervalues(s) for s in state_sets[1:])
-    )
-
-    auth_sets = []
-    for state_set in state_sets:
-        auth_ids = {
-            eid
-            for key, eid in iteritems(state_set)
-            if (
-                key[0] in (EventTypes.Member, EventTypes.ThirdPartyInvite)
-                or key
-                in (
-                    (EventTypes.PowerLevels, ""),
-                    (EventTypes.Create, ""),
-                    (EventTypes.JoinRules, ""),
-                )
-            )
-            and eid not in common
-        }
 
-        auth_chain = yield state_res_store.get_auth_chain(auth_ids, common)
-        auth_ids.update(auth_chain)
-
-        auth_sets.append(auth_ids)
-
-    intersection = set(auth_sets[0]).intersection(*auth_sets[1:])
-    union = set().union(*auth_sets)
+    difference = yield state_res_store.get_auth_chain_difference(
+        [set(state_set.values()) for state_set in state_sets]
+    )
 
-    return union - intersection
+    return difference
 
 
 def _seperate(state_sets):
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 49a7b8b433..62d4e9f599 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 import itertools
 import logging
-from typing import List, Optional, Set
+from typing import Dict, List, Optional, Set, Tuple
 
 from six.moves.queue import Empty, PriorityQueue
 
@@ -103,6 +103,154 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
         return list(results)
 
+    def get_auth_chain_difference(self, state_sets: List[Set[str]]):
+        """Given sets of state events figure out the auth chain difference (as
+        per state res v2 algorithm).
+
+        This equivalent to fetching the full auth chain for each set of state
+        and returning the events that don't appear in each and every auth
+        chain.
+
+        Returns:
+            Deferred[Set[str]]
+        """
+
+        return self.db.runInteraction(
+            "get_auth_chain_difference",
+            self._get_auth_chain_difference_txn,
+            state_sets,
+        )
+
+    def _get_auth_chain_difference_txn(
+        self, txn, state_sets: List[Set[str]]
+    ) -> Set[str]:
+
+        # Algorithm Description
+        # ~~~~~~~~~~~~~~~~~~~~~
+        #
+        # The idea here is to basically walk the auth graph of each state set in
+        # tandem, keeping track of which auth events are reachable by each state
+        # set. If we reach an auth event we've already visited (via a different
+        # state set) then we mark that auth event and all ancestors as reachable
+        # by the state set. This requires that we keep track of the auth chains
+        # in memory.
+        #
+        # Doing it in a such a way means that we can stop early if all auth
+        # events we're currently walking are reachable by all state sets.
+        #
+        # *Note*: We can't stop walking an event's auth chain if it is reachable
+        # by all state sets. This is because other auth chains we're walking
+        # might be reachable only via the original auth chain. For example,
+        # given the following auth chain:
+        #
+        #       A -> C -> D -> E
+        #           /         /
+        #       B -´---------´
+        #
+        # and state sets {A} and {B} then walking the auth chains of A and B
+        # would immediately show that C is reachable by both. However, if we
+        # stopped at C then we'd only reach E via the auth chain of B and so E
+        # would errornously get included in the returned difference.
+        #
+        # The other thing that we do is limit the number of auth chains we walk
+        # at once, due to practical limits (i.e. we can only query the database
+        # with a limited set of parameters). We pick the auth chains we walk
+        # each iteration based on their depth, in the hope that events with a
+        # lower depth are likely reachable by those with higher depths.
+        #
+        # We could use any ordering that we believe would give a rough
+        # topological ordering, e.g. origin server timestamp. If the ordering
+        # chosen is not topological then the algorithm still produces the right
+        # result, but perhaps a bit more inefficiently. This is why it is safe
+        # to use "depth" here.
+
+        initial_events = set(state_sets[0]).union(*state_sets[1:])
+
+        # Dict from events in auth chains to which sets *cannot* reach them.
+        # I.e. if the set is empty then all sets can reach the event.
+        event_to_missing_sets = {
+            event_id: {i for i, a in enumerate(state_sets) if event_id not in a}
+            for event_id in initial_events
+        }
+
+        # We need to get the depth of the initial events for sorting purposes.
+        sql = """
+            SELECT depth, event_id FROM events
+            WHERE %s
+            ORDER BY depth ASC
+        """
+        clause, args = make_in_list_sql_clause(
+            txn.database_engine, "event_id", initial_events
+        )
+        txn.execute(sql % (clause,), args)
+
+        # The sorted list of events whose auth chains we should walk.
+        search = txn.fetchall()  # type: List[Tuple[int, str]]
+
+        # Map from event to its auth events
+        event_to_auth_events = {}  # type: Dict[str, Set[str]]
+
+        base_sql = """
+            SELECT a.event_id, auth_id, depth
+            FROM event_auth AS a
+            INNER JOIN events AS e ON (e.event_id = a.auth_id)
+            WHERE
+        """
+
+        while search:
+            # Check whether all our current walks are reachable by all state
+            # sets. If so we can bail.
+            if all(not event_to_missing_sets[eid] for _, eid in search):
+                break
+
+            # Fetch the auth events and their depths of the N last events we're
+            # currently walking
+            search, chunk = search[:-100], search[-100:]
+            clause, args = make_in_list_sql_clause(
+                txn.database_engine, "a.event_id", [e_id for _, e_id in chunk]
+            )
+            txn.execute(base_sql + clause, args)
+
+            for event_id, auth_event_id, auth_event_depth in txn:
+                event_to_auth_events.setdefault(event_id, set()).add(auth_event_id)
+
+                sets = event_to_missing_sets.get(auth_event_id)
+                if sets is None:
+                    # First time we're seeing this event, so we add it to the
+                    # queue of things to fetch.
+                    search.append((auth_event_depth, auth_event_id))
+
+                    # Assume that this event is unreachable from any of the
+                    # state sets until proven otherwise
+                    sets = event_to_missing_sets[auth_event_id] = set(
+                        range(len(state_sets))
+                    )
+                else:
+                    # We've previously seen this event, so look up its auth
+                    # events and recursively mark all ancestors as reachable
+                    # by the current event's state set.
+                    a_ids = event_to_auth_events.get(auth_event_id)
+                    while a_ids:
+                        new_aids = set()
+                        for a_id in a_ids:
+                            event_to_missing_sets[a_id].intersection_update(
+                                event_to_missing_sets[event_id]
+                            )
+
+                            b = event_to_auth_events.get(a_id)
+                            if b:
+                                new_aids.update(b)
+
+                        a_ids = new_aids
+
+                # Mark that the auth event is reachable by the approriate sets.
+                sets.intersection_update(event_to_missing_sets[event_id])
+
+            search.sort()
+
+        # Return all events where not all sets can reach them.
+        return {eid for eid, n in event_to_missing_sets.items() if n}
+
     def get_oldest_events_in_room(self, room_id):
         return self.db.runInteraction(
             "get_oldest_events_in_room", self._get_oldest_events_in_room_txn, room_id
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 5059ade850..a44960203e 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -603,7 +603,7 @@ class TestStateResolutionStore(object):
 
         return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map}
 
-    def get_auth_chain(self, event_ids, ignore_events):
+    def _get_auth_chain(self, event_ids):
         """Gets the full auth chain for a set of events (including rejected
         events).
 
@@ -617,9 +617,6 @@ class TestStateResolutionStore(object):
         Args:
             event_ids (list): The event IDs of the events to fetch the auth
                 chain for. Must be state events.
-            ignore_events: Set of events to exclude from the returned auth
-                chain.
-
         Returns:
             Deferred[list[str]]: List of event IDs of the auth chain.
         """
@@ -629,7 +626,7 @@ class TestStateResolutionStore(object):
         stack = list(event_ids)
         while stack:
             event_id = stack.pop()
-            if event_id in result or event_id in ignore_events:
+            if event_id in result:
                 continue
 
             result.add(event_id)
@@ -639,3 +636,9 @@ class TestStateResolutionStore(object):
                 stack.append(aid)
 
         return list(result)
+
+    def get_auth_chain_difference(self, auth_sets):
+        chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets]
+
+        common = set(chains[0]).intersection(*chains[1:])
+        return set(chains[0]).union(*chains[1:]) - common
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index a331517f4d..3aeec0dc0f 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -13,19 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
 import tests.unittest
 import tests.utils
 
 
-class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
-    @defer.inlineCallbacks
-    def setUp(self):
-        hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
+class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, hs):
         self.store = hs.get_datastore()
 
-    @defer.inlineCallbacks
     def test_get_prev_events_for_room(self):
         room_id = "@ROOM:local"
 
@@ -61,15 +56,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
             )
 
         for i in range(0, 20):
-            yield self.store.db.runInteraction("insert", insert_event, i)
+            self.get_success(self.store.db.runInteraction("insert", insert_event, i))
 
         # this should get the last ten
-        r = yield self.store.get_prev_events_for_room(room_id)
+        r = self.get_success(self.store.get_prev_events_for_room(room_id))
         self.assertEqual(10, len(r))
         for i in range(0, 10):
             self.assertEqual("$event_%i:local" % (19 - i), r[i])
 
-    @defer.inlineCallbacks
     def test_get_rooms_with_many_extremities(self):
         room1 = "#room1"
         room2 = "#room2"
@@ -86,25 +80,154 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
             )
 
         for i in range(0, 20):
-            yield self.store.db.runInteraction("insert", insert_event, i, room1)
-            yield self.store.db.runInteraction("insert", insert_event, i, room2)
-            yield self.store.db.runInteraction("insert", insert_event, i, room3)
+            self.get_success(
+                self.store.db.runInteraction("insert", insert_event, i, room1)
+            )
+            self.get_success(
+                self.store.db.runInteraction("insert", insert_event, i, room2)
+            )
+            self.get_success(
+                self.store.db.runInteraction("insert", insert_event, i, room3)
+            )
 
         # Test simple case
-        r = yield self.store.get_rooms_with_many_extremities(5, 5, [])
+        r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, []))
         self.assertEqual(len(r), 3)
 
         # Does filter work?
 
-        r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1])
+        r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, [room1]))
         self.assertTrue(room2 in r)
         self.assertTrue(room3 in r)
         self.assertEqual(len(r), 2)
 
-        r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1, room2])
+        r = self.get_success(
+            self.store.get_rooms_with_many_extremities(5, 5, [room1, room2])
+        )
         self.assertEqual(r, [room3])
 
         # Does filter and limit work?
 
-        r = yield self.store.get_rooms_with_many_extremities(5, 1, [room1])
+        r = self.get_success(self.store.get_rooms_with_many_extremities(5, 1, [room1]))
         self.assertTrue(r == [room2] or r == [room3])
+
+    def test_auth_difference(self):
+        room_id = "@ROOM:local"
+
+        # The silly auth graph we use to test the auth difference algorithm,
+        # where the top are the most recent events.
+        #
+        #   A   B
+        #    \ /
+        #  D  E
+        #  \  |
+        #   ` F   C
+        #     |  /|
+        #     G ´ |
+        #     | \ |
+        #     H   I
+        #     |   |
+        #     K   J
+
+        auth_graph = {
+            "a": ["e"],
+            "b": ["e"],
+            "c": ["g", "i"],
+            "d": ["f"],
+            "e": ["f"],
+            "f": ["g"],
+            "g": ["h", "i"],
+            "h": ["k"],
+            "i": ["j"],
+            "k": [],
+            "j": [],
+        }
+
+        depth_map = {
+            "a": 7,
+            "b": 7,
+            "c": 4,
+            "d": 6,
+            "e": 6,
+            "f": 5,
+            "g": 3,
+            "h": 2,
+            "i": 2,
+            "k": 1,
+            "j": 1,
+        }
+
+        # We rudely fiddle with the appropriate tables directly, as that's much
+        # easier than constructing events properly.
+
+        def insert_event(txn, event_id, stream_ordering):
+
+            depth = depth_map[event_id]
+
+            self.store.db.simple_insert_txn(
+                txn,
+                table="events",
+                values={
+                    "event_id": event_id,
+                    "room_id": room_id,
+                    "depth": depth,
+                    "topological_ordering": depth,
+                    "type": "m.test",
+                    "processed": True,
+                    "outlier": False,
+                    "stream_ordering": stream_ordering,
+                },
+            )
+
+            self.store.db.simple_insert_many_txn(
+                txn,
+                table="event_auth",
+                values=[
+                    {"event_id": event_id, "room_id": room_id, "auth_id": a}
+                    for a in auth_graph[event_id]
+                ],
+            )
+
+        next_stream_ordering = 0
+        for event_id in auth_graph:
+            next_stream_ordering += 1
+            self.get_success(
+                self.store.db.runInteraction(
+                    "insert", insert_event, event_id, next_stream_ordering
+                )
+            )
+
+        # Now actually test that various combinations give the right result:
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a"}, {"b"}])
+        )
+        self.assertSetEqual(difference, {"a", "b"})
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a"}, {"b"}, {"c"}])
+        )
+        self.assertSetEqual(difference, {"a", "b", "c", "e", "f"})
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a", "c"}, {"b"}])
+        )
+        self.assertSetEqual(difference, {"a", "b", "c"})
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a"}, {"b"}, {"d"}])
+        )
+        self.assertSetEqual(difference, {"a", "b", "d", "e"})
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a"}, {"b"}, {"c"}, {"d"}])
+        )
+        self.assertSetEqual(difference, {"a", "b", "c", "d", "e", "f"})
+
+        difference = self.get_success(
+            self.store.get_auth_chain_difference([{"a"}, {"b"}, {"e"}])
+        )
+        self.assertSetEqual(difference, {"a", "b"})
+
+        difference = self.get_success(self.store.get_auth_chain_difference([{"a"}]))
+        self.assertSetEqual(difference, set())
-- 
cgit 1.4.1


From 443162e57724c34099215732eda690ea25cb1e4c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Mar 2020 10:48:45 +0100
Subject: Move pusherpool startup into _base.setup (#7104)

This should be safe to do on all workers/masters because it is guarded by
a config option which will ensure it is only actually done on the worker
assigned as a pusher.
---
 changelog.d/7104.misc     | 1 +
 synapse/app/_base.py      | 1 +
 synapse/app/homeserver.py | 1 -
 3 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7104.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7104.misc b/changelog.d/7104.misc
new file mode 100644
index 0000000000..ec5c004bbe
--- /dev/null
+++ b/changelog.d/7104.misc
@@ -0,0 +1 @@
+Merge worker apps together.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 9ffd23c6df..4d84f4595a 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -276,6 +276,7 @@ def start(hs, listeners=None):
         # It is now safe to start your Synapse.
         hs.start_listening(listeners)
         hs.get_datastore().db.start_profiling()
+        hs.get_pusherpool().start()
 
         setup_sentry(hs)
         setup_sdnotify(hs)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index e0fdddfdc9..f2b56a636f 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -408,7 +408,6 @@ def setup(config_options):
 
             _base.start(hs, config.listeners)
 
-            hs.get_pusherpool().start()
             hs.get_datastore().db.updates.start_doing_background_updates()
         except Exception:
             # Print the exception and bail out.
-- 
cgit 1.4.1


From 8c75667ad7810b4c05e40f7665e724a40aaf4d64 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Mar 2020 11:00:24 +0100
Subject: Add prometheus metrics for the number of active pushers (#7103)

---
 changelog.d/7103.feature                      |  1 +
 synapse/metrics/__init__.py                   | 12 +++++++-----
 synapse/metrics/background_process_metrics.py |  5 +++--
 synapse/push/pusherpool.py                    | 24 +++++++++++++++++++++++-
 tox.ini                                       |  2 ++
 5 files changed, 36 insertions(+), 8 deletions(-)
 create mode 100644 changelog.d/7103.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7103.feature b/changelog.d/7103.feature
new file mode 100644
index 0000000000..413e7f29d7
--- /dev/null
+++ b/changelog.d/7103.feature
@@ -0,0 +1 @@
+Add prometheus metrics for the number of active pushers.
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 0dba997a23..d2fd29acb4 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -20,7 +20,7 @@ import os
 import platform
 import threading
 import time
-from typing import Dict, Union
+from typing import Callable, Dict, Iterable, Optional, Tuple, Union
 
 import six
 
@@ -59,10 +59,12 @@ class RegistryProxy(object):
 @attr.s(hash=True)
 class LaterGauge(object):
 
-    name = attr.ib()
-    desc = attr.ib()
-    labels = attr.ib(hash=False)
-    caller = attr.ib()
+    name = attr.ib(type=str)
+    desc = attr.ib(type=str)
+    labels = attr.ib(hash=False, type=Optional[Iterable[str]])
+    # callback: should either return a value (if there are no labels for this metric),
+    # or dict mapping from a label tuple to a value
+    caller = attr.ib(type=Callable[[], Union[Dict[Tuple[str, ...], float], float]])
 
     def collect(self):
 
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index b65bcd8806..8449ef82f7 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -17,6 +17,7 @@ import logging
 import threading
 from asyncio import iscoroutine
 from functools import wraps
+from typing import Dict, Set
 
 import six
 
@@ -80,13 +81,13 @@ _background_process_db_sched_duration = Counter(
 # map from description to a counter, so that we can name our logcontexts
 # incrementally. (It actually duplicates _background_process_start_count, but
 # it's much simpler to do so than to try to combine them.)
-_background_process_counts = {}  # type: dict[str, int]
+_background_process_counts = {}  # type: Dict[str, int]
 
 # map from description to the currently running background processes.
 #
 # it's kept as a dict of sets rather than a big set so that we can keep track
 # of process descriptions that no longer have any active processes.
-_background_processes = {}  # type: dict[str, set[_BackgroundProcess]]
+_background_processes = {}  # type: Dict[str, Set[_BackgroundProcess]]
 
 # A lock that covers the above dicts
 _bg_metrics_lock = threading.Lock()
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 01789a9fb4..bf721759df 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -15,11 +15,16 @@
 # limitations under the License.
 
 import logging
+from collections import defaultdict
+from typing import Dict, Tuple, Union
 
 from twisted.internet import defer
 
+from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.push import PusherConfigException
+from synapse.push.emailpusher import EmailPusher
+from synapse.push.httppusher import HttpPusher
 from synapse.push.pusher import PusherFactory
 from synapse.util.async_helpers import concurrently_execute
 
@@ -47,7 +52,24 @@ class PusherPool:
         self._should_start_pushers = _hs.config.start_pushers
         self.store = self.hs.get_datastore()
         self.clock = self.hs.get_clock()
-        self.pushers = {}
+
+        # map from user id to app_id:pushkey to pusher
+        self.pushers = {}  # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]]
+
+        def count_pushers():
+            results = defaultdict(int)  # type: Dict[Tuple[str, str], int]
+            for pushers in self.pushers.values():
+                for pusher in pushers.values():
+                    k = (type(pusher).__name__, pusher.app_id)
+                    results[k] += 1
+            return results
+
+        LaterGauge(
+            name="synapse_pushers",
+            desc="the number of active pushers",
+            labels=["kind", "app_id"],
+            caller=count_pushers,
+        )
 
     def start(self):
         """Starts the pushers off in a background process.
diff --git a/tox.ini b/tox.ini
index 8b4c37c2ee..8e3f09e638 100644
--- a/tox.ini
+++ b/tox.ini
@@ -191,7 +191,9 @@ commands = mypy \
             synapse/handlers/sync.py \
             synapse/handlers/ui_auth \
             synapse/logging/ \
+            synapse/metrics \
             synapse/module_api \
+            synapse/push/pusherpool.py \
             synapse/replication \
             synapse/rest \
             synapse/spam_checker_api \
-- 
cgit 1.4.1


From e913823a220b89a205a09efe53116fab435dfdfb Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Thu, 19 Mar 2020 11:28:49 +0100
Subject: Fix concurrent modification errors in pusher metrics (#7106)

add a lock to try to make this metric actually work
---
 changelog.d/7106.feature   |  1 +
 synapse/push/pusherpool.py | 27 ++++++++++++++++++---------
 2 files changed, 19 insertions(+), 9 deletions(-)
 create mode 100644 changelog.d/7106.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7106.feature b/changelog.d/7106.feature
new file mode 100644
index 0000000000..413e7f29d7
--- /dev/null
+++ b/changelog.d/7106.feature
@@ -0,0 +1 @@
+Add prometheus metrics for the number of active pushers.
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index bf721759df..88d203aa44 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -16,6 +16,7 @@
 
 import logging
 from collections import defaultdict
+from threading import Lock
 from typing import Dict, Tuple, Union
 
 from twisted.internet import defer
@@ -56,12 +57,17 @@ class PusherPool:
         # map from user id to app_id:pushkey to pusher
         self.pushers = {}  # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]]
 
+        # a lock for the pushers dict, since `count_pushers` is called from an different
+        # and we otherwise get concurrent modification errors
+        self._pushers_lock = Lock()
+
         def count_pushers():
             results = defaultdict(int)  # type: Dict[Tuple[str, str], int]
-            for pushers in self.pushers.values():
-                for pusher in pushers.values():
-                    k = (type(pusher).__name__, pusher.app_id)
-                    results[k] += 1
+            with self._pushers_lock:
+                for pushers in self.pushers.values():
+                    for pusher in pushers.values():
+                        k = (type(pusher).__name__, pusher.app_id)
+                        results[k] += 1
             return results
 
         LaterGauge(
@@ -293,11 +299,12 @@ class PusherPool:
             return
 
         appid_pushkey = "%s:%s" % (pusherdict["app_id"], pusherdict["pushkey"])
-        byuser = self.pushers.setdefault(pusherdict["user_name"], {})
 
-        if appid_pushkey in byuser:
-            byuser[appid_pushkey].on_stop()
-        byuser[appid_pushkey] = p
+        with self._pushers_lock:
+            byuser = self.pushers.setdefault(pusherdict["user_name"], {})
+            if appid_pushkey in byuser:
+                byuser[appid_pushkey].on_stop()
+            byuser[appid_pushkey] = p
 
         # Check if there *may* be push to process. We do this as this check is a
         # lot cheaper to do than actually fetching the exact rows we need to
@@ -326,7 +333,9 @@ class PusherPool:
         if appid_pushkey in byuser:
             logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
             byuser[appid_pushkey].on_stop()
-            del byuser[appid_pushkey]
+            with self._pushers_lock:
+                del byuser[appid_pushkey]
+
         yield self.store.delete_pusher_by_app_id_pushkey_user_id(
             app_id, pushkey, user_id
         )
-- 
cgit 1.4.1


From e43e78b985c586133fedd9779eaf19e1a16ad68b Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Thu, 19 Mar 2020 11:07:16 +0000
Subject: 1.12.0rc1

---
 CHANGES.md               | 77 ++++++++++++++++++++++++++++++++++++++++++++++++
 changelog.d/6309.misc    |  1 -
 changelog.d/6315.feature |  1 -
 changelog.d/6572.bugfix  |  1 -
 changelog.d/6615.misc    |  1 -
 changelog.d/6874.misc    |  1 -
 changelog.d/6875.misc    |  1 -
 changelog.d/6925.doc     |  1 -
 changelog.d/6941.removal |  1 -
 changelog.d/6952.misc    |  1 -
 changelog.d/6953.misc    |  1 -
 changelog.d/6954.misc    |  1 -
 changelog.d/6956.misc    |  1 -
 changelog.d/6957.misc    |  1 -
 changelog.d/6962.bugfix  |  1 -
 changelog.d/6964.misc    |  1 -
 changelog.d/6965.feature |  1 -
 changelog.d/6966.removal |  1 -
 changelog.d/6967.bugfix  |  1 -
 changelog.d/6968.bugfix  |  1 -
 changelog.d/6970.removal |  1 -
 changelog.d/6971.feature |  1 -
 changelog.d/6979.misc    |  1 -
 changelog.d/6982.feature |  1 -
 changelog.d/6983.misc    |  1 -
 changelog.d/6984.docker  |  1 -
 changelog.d/6985.misc    |  1 -
 changelog.d/6986.feature |  1 -
 changelog.d/6987.misc    |  1 -
 changelog.d/6990.bugfix  |  1 -
 changelog.d/6991.misc    |  1 -
 changelog.d/6995.misc    |  1 -
 changelog.d/7002.misc    |  1 -
 changelog.d/7003.misc    |  1 -
 changelog.d/7015.misc    |  1 -
 changelog.d/7018.bugfix  |  1 -
 changelog.d/7019.misc    |  1 -
 changelog.d/7020.misc    |  1 -
 changelog.d/7026.removal |  1 -
 changelog.d/7030.feature |  1 -
 changelog.d/7034.removal |  1 -
 changelog.d/7035.bugfix  |  1 -
 changelog.d/7037.feature |  1 -
 changelog.d/7044.bugfix  |  1 -
 changelog.d/7045.misc    |  1 -
 changelog.d/7048.doc     |  1 -
 changelog.d/7055.misc    |  1 -
 changelog.d/7058.feature |  1 -
 changelog.d/7063.misc    |  1 -
 changelog.d/7066.bugfix  |  1 -
 changelog.d/7067.feature |  1 -
 changelog.d/7070.bugfix  |  1 -
 changelog.d/7074.bugfix  |  1 -
 changelog.d/7085.feature |  1 -
 changelog.d/7094.misc    |  1 -
 changelog.d/7095.misc    |  1 -
 changelog.d/7103.feature |  1 -
 changelog.d/7104.misc    |  1 -
 changelog.d/7106.feature |  1 -
 synapse/__init__.py      |  2 +-
 60 files changed, 78 insertions(+), 59 deletions(-)
 delete mode 100644 changelog.d/6309.misc
 delete mode 100644 changelog.d/6315.feature
 delete mode 100644 changelog.d/6572.bugfix
 delete mode 100644 changelog.d/6615.misc
 delete mode 100644 changelog.d/6874.misc
 delete mode 100644 changelog.d/6875.misc
 delete mode 100644 changelog.d/6925.doc
 delete mode 100644 changelog.d/6941.removal
 delete mode 100644 changelog.d/6952.misc
 delete mode 100644 changelog.d/6953.misc
 delete mode 100644 changelog.d/6954.misc
 delete mode 100644 changelog.d/6956.misc
 delete mode 100644 changelog.d/6957.misc
 delete mode 100644 changelog.d/6962.bugfix
 delete mode 100644 changelog.d/6964.misc
 delete mode 100644 changelog.d/6965.feature
 delete mode 100644 changelog.d/6966.removal
 delete mode 100644 changelog.d/6967.bugfix
 delete mode 100644 changelog.d/6968.bugfix
 delete mode 100644 changelog.d/6970.removal
 delete mode 100644 changelog.d/6971.feature
 delete mode 100644 changelog.d/6979.misc
 delete mode 100644 changelog.d/6982.feature
 delete mode 100644 changelog.d/6983.misc
 delete mode 100644 changelog.d/6984.docker
 delete mode 100644 changelog.d/6985.misc
 delete mode 100644 changelog.d/6986.feature
 delete mode 100644 changelog.d/6987.misc
 delete mode 100644 changelog.d/6990.bugfix
 delete mode 100644 changelog.d/6991.misc
 delete mode 100644 changelog.d/6995.misc
 delete mode 100644 changelog.d/7002.misc
 delete mode 100644 changelog.d/7003.misc
 delete mode 100644 changelog.d/7015.misc
 delete mode 100644 changelog.d/7018.bugfix
 delete mode 100644 changelog.d/7019.misc
 delete mode 100644 changelog.d/7020.misc
 delete mode 100644 changelog.d/7026.removal
 delete mode 100644 changelog.d/7030.feature
 delete mode 100644 changelog.d/7034.removal
 delete mode 100644 changelog.d/7035.bugfix
 delete mode 100644 changelog.d/7037.feature
 delete mode 100644 changelog.d/7044.bugfix
 delete mode 100644 changelog.d/7045.misc
 delete mode 100644 changelog.d/7048.doc
 delete mode 100644 changelog.d/7055.misc
 delete mode 100644 changelog.d/7058.feature
 delete mode 100644 changelog.d/7063.misc
 delete mode 100644 changelog.d/7066.bugfix
 delete mode 100644 changelog.d/7067.feature
 delete mode 100644 changelog.d/7070.bugfix
 delete mode 100644 changelog.d/7074.bugfix
 delete mode 100644 changelog.d/7085.feature
 delete mode 100644 changelog.d/7094.misc
 delete mode 100644 changelog.d/7095.misc
 delete mode 100644 changelog.d/7103.feature
 delete mode 100644 changelog.d/7104.misc
 delete mode 100644 changelog.d/7106.feature

(limited to 'changelog.d')

diff --git a/CHANGES.md b/CHANGES.md
index dc9ca05ad1..18ffcea4cd 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,80 @@
+Synapse 1.12.0rc1 (2020-03-19)
+==============================
+
+Features
+--------
+
+- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432):
+  - Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. ([\#6965](https://github.com/matrix-org/synapse/issues/6965))
+  - Validate the alt_aliases property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971))
+  - Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986))
+  - Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037))
+  - Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941))
+  - Synapse no longer uses room alias events to calculate room names for email notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966))
+  - The room list endpoint no longer returns a list of aliases. ([\#6970](https://github.com/matrix-org/synapse/issues/6970))
+  - Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. ([\#7034](https://github.com/matrix-org/synapse/issues/7034))
+- Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. ([\#6315](https://github.com/matrix-org/synapse/issues/6315))
+- Check that server_name is correctly set before running database updates. ([\#6982](https://github.com/matrix-org/synapse/issues/6982))
+- Break down monthly active users by `appservice_id` and emit via Prometheus. ([\#7030](https://github.com/matrix-org/synapse/issues/7030))
+- Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. ([\#7058](https://github.com/matrix-org/synapse/issues/7058), [\#7067](https://github.com/matrix-org/synapse/issues/7067))
+- Add an optional parameter to control whether other sessions are logged out when a user's password is modified. ([\#7085](https://github.com/matrix-org/synapse/issues/7085))
+- Add prometheus metrics for the number of active pushers. ([\#7103](https://github.com/matrix-org/synapse/issues/7103), [\#7106](https://github.com/matrix-org/synapse/issues/7106))
+- Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. ([\#7094](https://github.com/matrix-org/synapse/issues/7094))
+
+
+Bugfixes
+--------
+
+- When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. ([\#6572](https://github.com/matrix-org/synapse/issues/6572))
+- Fix a couple of bugs in email configuration handling. ([\#6962](https://github.com/matrix-org/synapse/issues/6962))
+- Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. ([\#6967](https://github.com/matrix-org/synapse/issues/6967))
+- Fix `duplicate key` error which was logged when rejoining a room over federation. ([\#6968](https://github.com/matrix-org/synapse/issues/6968))
+- Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. ([\#6990](https://github.com/matrix-org/synapse/issues/6990))
+- Fix py35-old CI by using native tox package. ([\#7018](https://github.com/matrix-org/synapse/issues/7018))
+- Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. ([\#7035](https://github.com/matrix-org/synapse/issues/7035))
+- Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. ([\#7044](https://github.com/matrix-org/synapse/issues/7044))
+- Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. ([\#7066](https://github.com/matrix-org/synapse/issues/7066))
+- Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. ([\#7070](https://github.com/matrix-org/synapse/issues/7070))
+- Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. ([\#7074](https://github.com/matrix-org/synapse/issues/7074))
+
+
+Improved Documentation
+----------------------
+
+- Updated CentOS8 install instructions. Contributed by Richard Kellner. ([\#6925](https://github.com/matrix-org/synapse/issues/6925))
+- Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. ([\#6984](https://github.com/matrix-org/synapse/issues/6984))
+- Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. ([\#7048](https://github.com/matrix-org/synapse/issues/7048))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the unused query_auth federation endpoint per MSC2451. ([\#7026](https://github.com/matrix-org/synapse/issues/7026))
+
+
+Internal Changes
+----------------
+
+- Add type hints to `logging/context.py`. ([\#6309](https://github.com/matrix-org/synapse/issues/6309))
+- Add some clarifications to `README.md` in the database schema directory. ([\#6615](https://github.com/matrix-org/synapse/issues/6615))
+- Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003))
+- Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095))
+- Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953))
+- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954))
+- Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956))
+- Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957))
+- Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104))
+- Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. ([\#6979](https://github.com/matrix-org/synapse/issues/6979))
+- Update warning for incorrect database collation/ctype to include link to documentation. ([\#6985](https://github.com/matrix-org/synapse/issues/6985))
+- Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987))
+- Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019))
+- Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995))
+- Change date in [INSTALL.md#tls-certificates] for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015))
+- Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020))
+- Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045))
+- Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063))
+
+
 Synapse 1.11.1 (2020-03-03)
 ===========================
 
diff --git a/changelog.d/6309.misc b/changelog.d/6309.misc
deleted file mode 100644
index 1aa7294617..0000000000
--- a/changelog.d/6309.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to `logging/context.py`.
diff --git a/changelog.d/6315.feature b/changelog.d/6315.feature
deleted file mode 100644
index c5377dd1e9..0000000000
--- a/changelog.d/6315.feature
+++ /dev/null
@@ -1 +0,0 @@
-Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0.
diff --git a/changelog.d/6572.bugfix b/changelog.d/6572.bugfix
deleted file mode 100644
index 4f708f409f..0000000000
--- a/changelog.d/6572.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room.
diff --git a/changelog.d/6615.misc b/changelog.d/6615.misc
deleted file mode 100644
index 9f93152565..0000000000
--- a/changelog.d/6615.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some clarifications to `README.md` in the database schema directory.
diff --git a/changelog.d/6874.misc b/changelog.d/6874.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6874.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6875.misc b/changelog.d/6875.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6875.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6925.doc b/changelog.d/6925.doc
deleted file mode 100644
index b8e6c73630..0000000000
--- a/changelog.d/6925.doc
+++ /dev/null
@@ -1 +0,0 @@
-Updated CentOS8 install instructions. Contributed by Richard Kellner.
diff --git a/changelog.d/6941.removal b/changelog.d/6941.removal
deleted file mode 100644
index 8573be84b3..0000000000
--- a/changelog.d/6941.removal
+++ /dev/null
@@ -1 +0,0 @@
-Stop sending m.room.aliases events during room creation and upgrade.
diff --git a/changelog.d/6952.misc b/changelog.d/6952.misc
deleted file mode 100644
index e26dc5cab8..0000000000
--- a/changelog.d/6952.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve perf of v2 state res for large rooms.
diff --git a/changelog.d/6953.misc b/changelog.d/6953.misc
deleted file mode 100644
index 0ab52041cf..0000000000
--- a/changelog.d/6953.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce time spent doing GC by freezing objects on startup.
diff --git a/changelog.d/6954.misc b/changelog.d/6954.misc
deleted file mode 100644
index 8b84ce2f19..0000000000
--- a/changelog.d/6954.misc
+++ /dev/null
@@ -1 +0,0 @@
-Minor perf fixes to `get_auth_chain_ids`.
diff --git a/changelog.d/6956.misc b/changelog.d/6956.misc
deleted file mode 100644
index 5cb0894182..0000000000
--- a/changelog.d/6956.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't record remote cross-signing keys in the `devices` table.
diff --git a/changelog.d/6957.misc b/changelog.d/6957.misc
deleted file mode 100644
index 4f98030110..0000000000
--- a/changelog.d/6957.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions.
diff --git a/changelog.d/6962.bugfix b/changelog.d/6962.bugfix
deleted file mode 100644
index 9f5229d400..0000000000
--- a/changelog.d/6962.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a couple of bugs in email configuration handling.
diff --git a/changelog.d/6964.misc b/changelog.d/6964.misc
deleted file mode 100644
index ec5c004bbe..0000000000
--- a/changelog.d/6964.misc
+++ /dev/null
@@ -1 +0,0 @@
-Merge worker apps together.
diff --git a/changelog.d/6965.feature b/changelog.d/6965.feature
deleted file mode 100644
index 6ad9956e40..0000000000
--- a/changelog.d/6965.feature
+++ /dev/null
@@ -1 +0,0 @@
-Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases.
diff --git a/changelog.d/6966.removal b/changelog.d/6966.removal
deleted file mode 100644
index 69673d9139..0000000000
--- a/changelog.d/6966.removal
+++ /dev/null
@@ -1 +0,0 @@
-Synapse no longer uses room alias events to calculate room names for email notifications.
diff --git a/changelog.d/6967.bugfix b/changelog.d/6967.bugfix
deleted file mode 100644
index b65f80cf1d..0000000000
--- a/changelog.d/6967.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room.
diff --git a/changelog.d/6968.bugfix b/changelog.d/6968.bugfix
deleted file mode 100644
index 9965bfc0c3..0000000000
--- a/changelog.d/6968.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `duplicate key` error which was logged when rejoining a room over federation.
diff --git a/changelog.d/6970.removal b/changelog.d/6970.removal
deleted file mode 100644
index 89bd363b95..0000000000
--- a/changelog.d/6970.removal
+++ /dev/null
@@ -1 +0,0 @@
-The room list endpoint no longer returns a list of aliases.
diff --git a/changelog.d/6971.feature b/changelog.d/6971.feature
deleted file mode 100644
index ccf02a61df..0000000000
--- a/changelog.d/6971.feature
+++ /dev/null
@@ -1 +0,0 @@
-Validate the alt_aliases property of canonical alias events.
diff --git a/changelog.d/6979.misc b/changelog.d/6979.misc
deleted file mode 100644
index c57b398c2f..0000000000
--- a/changelog.d/6979.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant `store_room` call from `FederationHandler._process_received_pdu`.
diff --git a/changelog.d/6982.feature b/changelog.d/6982.feature
deleted file mode 100644
index 934cc5141a..0000000000
--- a/changelog.d/6982.feature
+++ /dev/null
@@ -1 +0,0 @@
-Check that server_name is correctly set before running database updates.
diff --git a/changelog.d/6983.misc b/changelog.d/6983.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/6983.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6984.docker b/changelog.d/6984.docker
deleted file mode 100644
index 84a55e1267..0000000000
--- a/changelog.d/6984.docker
+++ /dev/null
@@ -1 +0,0 @@
-Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration.
diff --git a/changelog.d/6985.misc b/changelog.d/6985.misc
deleted file mode 100644
index ba367fa9af..0000000000
--- a/changelog.d/6985.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update warning for incorrect database collation/ctype to include link to documentation.
diff --git a/changelog.d/6986.feature b/changelog.d/6986.feature
deleted file mode 100644
index 16dea8bd7f..0000000000
--- a/changelog.d/6986.feature
+++ /dev/null
@@ -1 +0,0 @@
-Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases.
diff --git a/changelog.d/6987.misc b/changelog.d/6987.misc
deleted file mode 100644
index 7ff74cda55..0000000000
--- a/changelog.d/6987.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type annotations to the database storage classes.
diff --git a/changelog.d/6990.bugfix b/changelog.d/6990.bugfix
deleted file mode 100644
index 8c1c48f4d4..0000000000
--- a/changelog.d/6990.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API.
\ No newline at end of file
diff --git a/changelog.d/6991.misc b/changelog.d/6991.misc
deleted file mode 100644
index 5130f4e8af..0000000000
--- a/changelog.d/6991.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `synapse.handlers.presence` to async/await.
diff --git a/changelog.d/6995.misc b/changelog.d/6995.misc
deleted file mode 100644
index 884b4cf4ee..0000000000
--- a/changelog.d/6995.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type annotations to the federation base & client classes.
diff --git a/changelog.d/7002.misc b/changelog.d/7002.misc
deleted file mode 100644
index ec5c004bbe..0000000000
--- a/changelog.d/7002.misc
+++ /dev/null
@@ -1 +0,0 @@
-Merge worker apps together.
diff --git a/changelog.d/7003.misc b/changelog.d/7003.misc
deleted file mode 100644
index 08aa80bcd9..0000000000
--- a/changelog.d/7003.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/7015.misc b/changelog.d/7015.misc
deleted file mode 100644
index 9709dc606e..0000000000
--- a/changelog.d/7015.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change date in INSTALL.md#tls-certificates for last date of getting TLS certificates to November 2019.
\ No newline at end of file
diff --git a/changelog.d/7018.bugfix b/changelog.d/7018.bugfix
deleted file mode 100644
index d1b6c1d464..0000000000
--- a/changelog.d/7018.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix py35-old CI by using native tox package.
diff --git a/changelog.d/7019.misc b/changelog.d/7019.misc
deleted file mode 100644
index 5130f4e8af..0000000000
--- a/changelog.d/7019.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `synapse.handlers.presence` to async/await.
diff --git a/changelog.d/7020.misc b/changelog.d/7020.misc
deleted file mode 100644
index 188b4378cb..0000000000
--- a/changelog.d/7020.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `synapse.rest.keys` to async/await.
diff --git a/changelog.d/7026.removal b/changelog.d/7026.removal
deleted file mode 100644
index 4c8c563bb0..0000000000
--- a/changelog.d/7026.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove the unused query_auth federation endpoint per MSC2451.
diff --git a/changelog.d/7030.feature b/changelog.d/7030.feature
deleted file mode 100644
index fcfdb8d8a1..0000000000
--- a/changelog.d/7030.feature
+++ /dev/null
@@ -1 +0,0 @@
-Break down monthly active users by `appservice_id` and emit via Prometheus.
diff --git a/changelog.d/7034.removal b/changelog.d/7034.removal
deleted file mode 100644
index be8d20e14f..0000000000
--- a/changelog.d/7034.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1.
diff --git a/changelog.d/7035.bugfix b/changelog.d/7035.bugfix
deleted file mode 100644
index 56292dc8ac..0000000000
--- a/changelog.d/7035.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`.
diff --git a/changelog.d/7037.feature b/changelog.d/7037.feature
deleted file mode 100644
index 4bc1b3b19f..0000000000
--- a/changelog.d/7037.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432).
diff --git a/changelog.d/7044.bugfix b/changelog.d/7044.bugfix
deleted file mode 100644
index 790088ddb4..0000000000
--- a/changelog.d/7044.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer.
diff --git a/changelog.d/7045.misc b/changelog.d/7045.misc
deleted file mode 100644
index 74c1abea56..0000000000
--- a/changelog.d/7045.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a type check to `is_verified` when processing room keys.
diff --git a/changelog.d/7048.doc b/changelog.d/7048.doc
deleted file mode 100644
index c9666f333e..0000000000
--- a/changelog.d/7048.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints.
diff --git a/changelog.d/7055.misc b/changelog.d/7055.misc
deleted file mode 100644
index ec5c004bbe..0000000000
--- a/changelog.d/7055.misc
+++ /dev/null
@@ -1 +0,0 @@
-Merge worker apps together.
diff --git a/changelog.d/7058.feature b/changelog.d/7058.feature
deleted file mode 100644
index 53ea485e03..0000000000
--- a/changelog.d/7058.feature
+++ /dev/null
@@ -1 +0,0 @@
-Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process.
diff --git a/changelog.d/7063.misc b/changelog.d/7063.misc
deleted file mode 100644
index e7b1cd3cd8..0000000000
--- a/changelog.d/7063.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type annotations and comments to the auth handler.
diff --git a/changelog.d/7066.bugfix b/changelog.d/7066.bugfix
deleted file mode 100644
index 94bb096287..0000000000
--- a/changelog.d/7066.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token.
diff --git a/changelog.d/7067.feature b/changelog.d/7067.feature
deleted file mode 100644
index 53ea485e03..0000000000
--- a/changelog.d/7067.feature
+++ /dev/null
@@ -1 +0,0 @@
-Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process.
diff --git a/changelog.d/7070.bugfix b/changelog.d/7070.bugfix
deleted file mode 100644
index 9031927546..0000000000
--- a/changelog.d/7070.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms.
diff --git a/changelog.d/7074.bugfix b/changelog.d/7074.bugfix
deleted file mode 100644
index 38d7455971..0000000000
--- a/changelog.d/7074.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases.
diff --git a/changelog.d/7085.feature b/changelog.d/7085.feature
deleted file mode 100644
index df6d0f990d..0000000000
--- a/changelog.d/7085.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an optional parameter to control whether other sessions are logged out when a user's password is modified.
diff --git a/changelog.d/7094.misc b/changelog.d/7094.misc
deleted file mode 100644
index aa093ee3c0..0000000000
--- a/changelog.d/7094.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections.
diff --git a/changelog.d/7095.misc b/changelog.d/7095.misc
deleted file mode 100644
index 44fc9f616f..0000000000
--- a/changelog.d/7095.misc
+++ /dev/null
@@ -1 +0,0 @@
-Attempt to improve performance of state res v2 algorithm.
diff --git a/changelog.d/7103.feature b/changelog.d/7103.feature
deleted file mode 100644
index 413e7f29d7..0000000000
--- a/changelog.d/7103.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add prometheus metrics for the number of active pushers.
diff --git a/changelog.d/7104.misc b/changelog.d/7104.misc
deleted file mode 100644
index ec5c004bbe..0000000000
--- a/changelog.d/7104.misc
+++ /dev/null
@@ -1 +0,0 @@
-Merge worker apps together.
diff --git a/changelog.d/7106.feature b/changelog.d/7106.feature
deleted file mode 100644
index 413e7f29d7..0000000000
--- a/changelog.d/7106.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add prometheus metrics for the number of active pushers.
diff --git a/synapse/__init__.py b/synapse/__init__.py
index e56ba89ff4..020e0536be 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.11.1"
+__version__ = "1.12.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
-- 
cgit 1.4.1


From c2db6599c820d97e3c8a02d782e90af80121c903 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 19 Mar 2020 08:22:56 -0400
Subject: Fix a bug in the federation API which could cause occasional "Failed
 to get PDU" errors (#7089).

---
 changelog.d/7089.bugfix                 |  1 +
 synapse/federation/federation_base.py   | 24 +++++++++---------------
 synapse/federation/federation_client.py | 19 ++++++++-----------
 synapse/federation/federation_server.py |  8 ++++----
 4 files changed, 22 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/7089.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7089.bugfix b/changelog.d/7089.bugfix
new file mode 100644
index 0000000000..f1f440f23a
--- /dev/null
+++ b/changelog.d/7089.bugfix
@@ -0,0 +1 @@
+Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors.
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 5c991e5412..b0b0eba41e 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -25,11 +25,7 @@ from twisted.python.failure import Failure
 
 from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
-from synapse.api.room_versions import (
-    KNOWN_ROOM_VERSIONS,
-    EventFormatVersions,
-    RoomVersion,
-)
+from synapse.api.room_versions import EventFormatVersions, RoomVersion
 from synapse.crypto.event_signing import check_event_content_hash
 from synapse.crypto.keyring import Keyring
 from synapse.events import EventBase, make_event_from_dict
@@ -55,13 +51,15 @@ class FederationBase(object):
         self.store = hs.get_datastore()
         self._clock = hs.get_clock()
 
-    def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred:
+    def _check_sigs_and_hash(
+        self, room_version: RoomVersion, pdu: EventBase
+    ) -> Deferred:
         return make_deferred_yieldable(
             self._check_sigs_and_hashes(room_version, [pdu])[0]
         )
 
     def _check_sigs_and_hashes(
-        self, room_version: str, pdus: List[EventBase]
+        self, room_version: RoomVersion, pdus: List[EventBase]
     ) -> List[Deferred]:
         """Checks that each of the received events is correctly signed by the
         sending server.
@@ -146,7 +144,7 @@ class PduToCheckSig(
 
 
 def _check_sigs_on_pdus(
-    keyring: Keyring, room_version: str, pdus: Iterable[EventBase]
+    keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase]
 ) -> List[Deferred]:
     """Check that the given events are correctly signed
 
@@ -191,10 +189,6 @@ def _check_sigs_on_pdus(
         for p in pdus
     ]
 
-    v = KNOWN_ROOM_VERSIONS.get(room_version)
-    if not v:
-        raise RuntimeError("Unrecognized room version %s" % (room_version,))
-
     # First we check that the sender event is signed by the sender's domain
     # (except if its a 3pid invite, in which case it may be sent by any server)
     pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
@@ -204,7 +198,7 @@ def _check_sigs_on_pdus(
             (
                 p.sender_domain,
                 p.redacted_pdu_json,
-                p.pdu.origin_server_ts if v.enforce_key_validity else 0,
+                p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
                 p.pdu.event_id,
             )
             for p in pdus_to_check_sender
@@ -227,7 +221,7 @@ def _check_sigs_on_pdus(
     # event id's domain (normally only the case for joins/leaves), and add additional
     # checks. Only do this if the room version has a concept of event ID domain
     # (ie, the room version uses old-style non-hash event IDs).
-    if v.event_format == EventFormatVersions.V1:
+    if room_version.event_format == EventFormatVersions.V1:
         pdus_to_check_event_id = [
             p
             for p in pdus_to_check
@@ -239,7 +233,7 @@ def _check_sigs_on_pdus(
                 (
                     get_domain_from_id(p.pdu.event_id),
                     p.redacted_pdu_json,
-                    p.pdu.origin_server_ts if v.enforce_key_validity else 0,
+                    p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
                     p.pdu.event_id,
                 )
                 for p in pdus_to_check_event_id
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 8c6b839478..a0071fec94 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -220,8 +220,7 @@ class FederationClient(FederationBase):
         # FIXME: We should handle signature failures more gracefully.
         pdus[:] = await make_deferred_yieldable(
             defer.gatherResults(
-                self._check_sigs_and_hashes(room_version.identifier, pdus),
-                consumeErrors=True,
+                self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True,
             ).addErrback(unwrapFirstError)
         )
 
@@ -291,9 +290,7 @@ class FederationClient(FederationBase):
                     pdu = pdu_list[0]
 
                     # Check signatures are correct.
-                    signed_pdu = await self._check_sigs_and_hash(
-                        room_version.identifier, pdu
-                    )
+                    signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
 
                     break
 
@@ -350,7 +347,7 @@ class FederationClient(FederationBase):
         self,
         origin: str,
         pdus: List[EventBase],
-        room_version: str,
+        room_version: RoomVersion,
         outlier: bool = False,
         include_none: bool = False,
     ) -> List[EventBase]:
@@ -396,7 +393,7 @@ class FederationClient(FederationBase):
                         self.get_pdu(
                             destinations=[pdu.origin],
                             event_id=pdu.event_id,
-                            room_version=room_version,  # type: ignore
+                            room_version=room_version,
                             outlier=outlier,
                             timeout=10000,
                         )
@@ -434,7 +431,7 @@ class FederationClient(FederationBase):
         ]
 
         signed_auth = await self._check_sigs_and_hash_and_fetch(
-            destination, auth_chain, outlier=True, room_version=room_version.identifier
+            destination, auth_chain, outlier=True, room_version=room_version
         )
 
         signed_auth.sort(key=lambda e: e.depth)
@@ -661,7 +658,7 @@ class FederationClient(FederationBase):
                 destination,
                 list(pdus.values()),
                 outlier=True,
-                room_version=room_version.identifier,
+                room_version=room_version,
             )
 
             valid_pdus_map = {p.event_id: p for p in valid_pdus}
@@ -756,7 +753,7 @@ class FederationClient(FederationBase):
         pdu = event_from_pdu_json(pdu_dict, room_version)
 
         # Check signatures are correct.
-        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
+        pdu = await self._check_sigs_and_hash(room_version, pdu)
 
         # FIXME: We should handle signature failures more gracefully.
 
@@ -948,7 +945,7 @@ class FederationClient(FederationBase):
             ]
 
             signed_events = await self._check_sigs_and_hash_and_fetch(
-                destination, events, outlier=False, room_version=room_version.identifier
+                destination, events, outlier=False, room_version=room_version
             )
         except HttpResponseException as e:
             if not e.code == 400:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 275b9c99d7..89d521bc31 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -409,7 +409,7 @@ class FederationServer(FederationBase):
         pdu = event_from_pdu_json(content, room_version)
         origin_host, _ = parse_server_name(origin)
         await self.check_server_matches_acl(origin_host, pdu.room_id)
-        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
+        pdu = await self._check_sigs_and_hash(room_version, pdu)
         ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
         time_now = self._clock.time_msec()
         return {"event": ret_pdu.get_pdu_json(time_now)}
@@ -425,7 +425,7 @@ class FederationServer(FederationBase):
 
         logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
 
-        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
+        pdu = await self._check_sigs_and_hash(room_version, pdu)
 
         res_pdus = await self.handler.on_send_join_request(origin, pdu)
         time_now = self._clock.time_msec()
@@ -455,7 +455,7 @@ class FederationServer(FederationBase):
 
         logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
 
-        pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
+        pdu = await self._check_sigs_and_hash(room_version, pdu)
 
         await self.handler.on_send_leave_request(origin, pdu)
         return {}
@@ -611,7 +611,7 @@ class FederationServer(FederationBase):
                 logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
 
         # We've already checked that we know the room version by this point
-        room_version = await self.store.get_room_version_id(pdu.room_id)
+        room_version = await self.store.get_room_version(pdu.room_id)
 
         # Check signature.
         try:
-- 
cgit 1.4.1


From caec7d4fa0041697b7714e638477772f0a827ff6 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 20 Mar 2020 07:20:02 -0400
Subject: Convert some of the media REST code to async/await (#7110)

---
 changelog.d/7110.misc                         |   1 +
 synapse/rest/media/v1/media_repository.py     | 110 ++++++++++++--------------
 synapse/rest/media/v1/preview_url_resource.py |  37 ++++-----
 synapse/rest/media/v1/thumbnail_resource.py   |  54 ++++++-------
 4 files changed, 91 insertions(+), 111 deletions(-)
 create mode 100644 changelog.d/7110.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7110.misc b/changelog.d/7110.misc
new file mode 100644
index 0000000000..fac5bc0403
--- /dev/null
+++ b/changelog.d/7110.misc
@@ -0,0 +1 @@
+Convert some of synapse.rest.media to async/await.
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 490b1b45a8..fd10d42f2f 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -24,7 +24,6 @@ from six import iteritems
 
 import twisted.internet.error
 import twisted.web.http
-from twisted.internet import defer
 from twisted.web.resource import Resource
 
 from synapse.api.errors import (
@@ -114,15 +113,14 @@ class MediaRepository(object):
             "update_recently_accessed_media", self._update_recently_accessed
         )
 
-    @defer.inlineCallbacks
-    def _update_recently_accessed(self):
+    async def _update_recently_accessed(self):
         remote_media = self.recently_accessed_remotes
         self.recently_accessed_remotes = set()
 
         local_media = self.recently_accessed_locals
         self.recently_accessed_locals = set()
 
-        yield self.store.update_cached_last_access_time(
+        await self.store.update_cached_last_access_time(
             local_media, remote_media, self.clock.time_msec()
         )
 
@@ -138,8 +136,7 @@ class MediaRepository(object):
         else:
             self.recently_accessed_locals.add(media_id)
 
-    @defer.inlineCallbacks
-    def create_content(
+    async def create_content(
         self, media_type, upload_name, content, content_length, auth_user
     ):
         """Store uploaded content for a local user and return the mxc URL
@@ -158,11 +155,11 @@ class MediaRepository(object):
 
         file_info = FileInfo(server_name=None, file_id=media_id)
 
-        fname = yield self.media_storage.store_file(content, file_info)
+        fname = await self.media_storage.store_file(content, file_info)
 
         logger.info("Stored local media in file %r", fname)
 
-        yield self.store.store_local_media(
+        await self.store.store_local_media(
             media_id=media_id,
             media_type=media_type,
             time_now_ms=self.clock.time_msec(),
@@ -171,12 +168,11 @@ class MediaRepository(object):
             user_id=auth_user,
         )
 
-        yield self._generate_thumbnails(None, media_id, media_id, media_type)
+        await self._generate_thumbnails(None, media_id, media_id, media_type)
 
         return "mxc://%s/%s" % (self.server_name, media_id)
 
-    @defer.inlineCallbacks
-    def get_local_media(self, request, media_id, name):
+    async def get_local_media(self, request, media_id, name):
         """Responds to reqests for local media, if exists, or returns 404.
 
         Args:
@@ -190,7 +186,7 @@ class MediaRepository(object):
             Deferred: Resolves once a response has successfully been written
                 to request
         """
-        media_info = yield self.store.get_local_media(media_id)
+        media_info = await self.store.get_local_media(media_id)
         if not media_info or media_info["quarantined_by"]:
             respond_404(request)
             return
@@ -204,13 +200,12 @@ class MediaRepository(object):
 
         file_info = FileInfo(None, media_id, url_cache=url_cache)
 
-        responder = yield self.media_storage.fetch_media(file_info)
-        yield respond_with_responder(
+        responder = await self.media_storage.fetch_media(file_info)
+        await respond_with_responder(
             request, responder, media_type, media_length, upload_name
         )
 
-    @defer.inlineCallbacks
-    def get_remote_media(self, request, server_name, media_id, name):
+    async def get_remote_media(self, request, server_name, media_id, name):
         """Respond to requests for remote media.
 
         Args:
@@ -236,8 +231,8 @@ class MediaRepository(object):
         # We linearize here to ensure that we don't try and download remote
         # media multiple times concurrently
         key = (server_name, media_id)
-        with (yield self.remote_media_linearizer.queue(key)):
-            responder, media_info = yield self._get_remote_media_impl(
+        with (await self.remote_media_linearizer.queue(key)):
+            responder, media_info = await self._get_remote_media_impl(
                 server_name, media_id
             )
 
@@ -246,14 +241,13 @@ class MediaRepository(object):
             media_type = media_info["media_type"]
             media_length = media_info["media_length"]
             upload_name = name if name else media_info["upload_name"]
-            yield respond_with_responder(
+            await respond_with_responder(
                 request, responder, media_type, media_length, upload_name
             )
         else:
             respond_404(request)
 
-    @defer.inlineCallbacks
-    def get_remote_media_info(self, server_name, media_id):
+    async def get_remote_media_info(self, server_name, media_id):
         """Gets the media info associated with the remote file, downloading
         if necessary.
 
@@ -274,8 +268,8 @@ class MediaRepository(object):
         # We linearize here to ensure that we don't try and download remote
         # media multiple times concurrently
         key = (server_name, media_id)
-        with (yield self.remote_media_linearizer.queue(key)):
-            responder, media_info = yield self._get_remote_media_impl(
+        with (await self.remote_media_linearizer.queue(key)):
+            responder, media_info = await self._get_remote_media_impl(
                 server_name, media_id
             )
 
@@ -286,8 +280,7 @@ class MediaRepository(object):
 
         return media_info
 
-    @defer.inlineCallbacks
-    def _get_remote_media_impl(self, server_name, media_id):
+    async def _get_remote_media_impl(self, server_name, media_id):
         """Looks for media in local cache, if not there then attempt to
         download from remote server.
 
@@ -299,7 +292,7 @@ class MediaRepository(object):
         Returns:
             Deferred[(Responder, media_info)]
         """
-        media_info = yield self.store.get_cached_remote_media(server_name, media_id)
+        media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
         # file_id is the ID we use to track the file locally. If we've already
         # seen the file then reuse the existing ID, otherwise genereate a new
@@ -317,19 +310,18 @@ class MediaRepository(object):
                 logger.info("Media is quarantined")
                 raise NotFoundError()
 
-            responder = yield self.media_storage.fetch_media(file_info)
+            responder = await self.media_storage.fetch_media(file_info)
             if responder:
                 return responder, media_info
 
         # Failed to find the file anywhere, lets download it.
 
-        media_info = yield self._download_remote_file(server_name, media_id, file_id)
+        media_info = await self._download_remote_file(server_name, media_id, file_id)
 
-        responder = yield self.media_storage.fetch_media(file_info)
+        responder = await self.media_storage.fetch_media(file_info)
         return responder, media_info
 
-    @defer.inlineCallbacks
-    def _download_remote_file(self, server_name, media_id, file_id):
+    async def _download_remote_file(self, server_name, media_id, file_id):
         """Attempt to download the remote file from the given server name,
         using the given file_id as the local id.
 
@@ -351,7 +343,7 @@ class MediaRepository(object):
                 ("/_matrix/media/v1/download", server_name, media_id)
             )
             try:
-                length, headers = yield self.client.get_file(
+                length, headers = await self.client.get_file(
                     server_name,
                     request_path,
                     output_stream=f,
@@ -397,7 +389,7 @@ class MediaRepository(object):
                 )
                 raise SynapseError(502, "Failed to fetch remote media")
 
-            yield finish()
+            await finish()
 
         media_type = headers[b"Content-Type"][0].decode("ascii")
         upload_name = get_filename_from_headers(headers)
@@ -405,7 +397,7 @@ class MediaRepository(object):
 
         logger.info("Stored remote media in file %r", fname)
 
-        yield self.store.store_cached_remote_media(
+        await self.store.store_cached_remote_media(
             origin=server_name,
             media_id=media_id,
             media_type=media_type,
@@ -423,7 +415,7 @@ class MediaRepository(object):
             "filesystem_id": file_id,
         }
 
-        yield self._generate_thumbnails(server_name, media_id, file_id, media_type)
+        await self._generate_thumbnails(server_name, media_id, file_id, media_type)
 
         return media_info
 
@@ -458,16 +450,15 @@ class MediaRepository(object):
 
         return t_byte_source
 
-    @defer.inlineCallbacks
-    def generate_local_exact_thumbnail(
+    async def generate_local_exact_thumbnail(
         self, media_id, t_width, t_height, t_method, t_type, url_cache
     ):
-        input_path = yield self.media_storage.ensure_media_is_in_local_cache(
+        input_path = await self.media_storage.ensure_media_is_in_local_cache(
             FileInfo(None, media_id, url_cache=url_cache)
         )
 
         thumbnailer = Thumbnailer(input_path)
-        t_byte_source = yield defer_to_thread(
+        t_byte_source = await defer_to_thread(
             self.hs.get_reactor(),
             self._generate_thumbnail,
             thumbnailer,
@@ -490,7 +481,7 @@ class MediaRepository(object):
                     thumbnail_type=t_type,
                 )
 
-                output_path = yield self.media_storage.store_file(
+                output_path = await self.media_storage.store_file(
                     t_byte_source, file_info
                 )
             finally:
@@ -500,22 +491,21 @@ class MediaRepository(object):
 
             t_len = os.path.getsize(output_path)
 
-            yield self.store.store_local_thumbnail(
+            await self.store.store_local_thumbnail(
                 media_id, t_width, t_height, t_type, t_method, t_len
             )
 
             return output_path
 
-    @defer.inlineCallbacks
-    def generate_remote_exact_thumbnail(
+    async def generate_remote_exact_thumbnail(
         self, server_name, file_id, media_id, t_width, t_height, t_method, t_type
     ):
-        input_path = yield self.media_storage.ensure_media_is_in_local_cache(
+        input_path = await self.media_storage.ensure_media_is_in_local_cache(
             FileInfo(server_name, file_id, url_cache=False)
         )
 
         thumbnailer = Thumbnailer(input_path)
-        t_byte_source = yield defer_to_thread(
+        t_byte_source = await defer_to_thread(
             self.hs.get_reactor(),
             self._generate_thumbnail,
             thumbnailer,
@@ -537,7 +527,7 @@ class MediaRepository(object):
                     thumbnail_type=t_type,
                 )
 
-                output_path = yield self.media_storage.store_file(
+                output_path = await self.media_storage.store_file(
                     t_byte_source, file_info
                 )
             finally:
@@ -547,7 +537,7 @@ class MediaRepository(object):
 
             t_len = os.path.getsize(output_path)
 
-            yield self.store.store_remote_media_thumbnail(
+            await self.store.store_remote_media_thumbnail(
                 server_name,
                 media_id,
                 file_id,
@@ -560,8 +550,7 @@ class MediaRepository(object):
 
             return output_path
 
-    @defer.inlineCallbacks
-    def _generate_thumbnails(
+    async def _generate_thumbnails(
         self, server_name, media_id, file_id, media_type, url_cache=False
     ):
         """Generate and store thumbnails for an image.
@@ -582,7 +571,7 @@ class MediaRepository(object):
         if not requirements:
             return
 
-        input_path = yield self.media_storage.ensure_media_is_in_local_cache(
+        input_path = await self.media_storage.ensure_media_is_in_local_cache(
             FileInfo(server_name, file_id, url_cache=url_cache)
         )
 
@@ -600,7 +589,7 @@ class MediaRepository(object):
             return
 
         if thumbnailer.transpose_method is not None:
-            m_width, m_height = yield defer_to_thread(
+            m_width, m_height = await defer_to_thread(
                 self.hs.get_reactor(), thumbnailer.transpose
             )
 
@@ -620,11 +609,11 @@ class MediaRepository(object):
         for (t_width, t_height, t_type), t_method in iteritems(thumbnails):
             # Generate the thumbnail
             if t_method == "crop":
-                t_byte_source = yield defer_to_thread(
+                t_byte_source = await defer_to_thread(
                     self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type
                 )
             elif t_method == "scale":
-                t_byte_source = yield defer_to_thread(
+                t_byte_source = await defer_to_thread(
                     self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type
                 )
             else:
@@ -646,7 +635,7 @@ class MediaRepository(object):
                     url_cache=url_cache,
                 )
 
-                output_path = yield self.media_storage.store_file(
+                output_path = await self.media_storage.store_file(
                     t_byte_source, file_info
                 )
             finally:
@@ -656,7 +645,7 @@ class MediaRepository(object):
 
             # Write to database
             if server_name:
-                yield self.store.store_remote_media_thumbnail(
+                await self.store.store_remote_media_thumbnail(
                     server_name,
                     media_id,
                     file_id,
@@ -667,15 +656,14 @@ class MediaRepository(object):
                     t_len,
                 )
             else:
-                yield self.store.store_local_thumbnail(
+                await self.store.store_local_thumbnail(
                     media_id, t_width, t_height, t_type, t_method, t_len
                 )
 
         return {"width": m_width, "height": m_height}
 
-    @defer.inlineCallbacks
-    def delete_old_remote_media(self, before_ts):
-        old_media = yield self.store.get_remote_media_before(before_ts)
+    async def delete_old_remote_media(self, before_ts):
+        old_media = await self.store.get_remote_media_before(before_ts)
 
         deleted = 0
 
@@ -689,7 +677,7 @@ class MediaRepository(object):
 
             # TODO: Should we delete from the backup store
 
-            with (yield self.remote_media_linearizer.queue(key)):
+            with (await self.remote_media_linearizer.queue(key)):
                 full_path = self.filepaths.remote_media_filepath(origin, file_id)
                 try:
                     os.remove(full_path)
@@ -705,7 +693,7 @@ class MediaRepository(object):
                 )
                 shutil.rmtree(thumbnail_dir, ignore_errors=True)
 
-                yield self.store.delete_remote_media(origin, media_id)
+                await self.store.delete_remote_media(origin, media_id)
                 deleted += 1
 
         return {"deleted": deleted}
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 07e395cfd1..c46676f8fc 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -165,8 +165,7 @@ class PreviewUrlResource(DirectServeResource):
         og = await make_deferred_yieldable(defer.maybeDeferred(observable.observe))
         respond_with_json_bytes(request, 200, og, send_cors=True)
 
-    @defer.inlineCallbacks
-    def _do_preview(self, url, user, ts):
+    async def _do_preview(self, url, user, ts):
         """Check the db, and download the URL and build a preview
 
         Args:
@@ -179,7 +178,7 @@ class PreviewUrlResource(DirectServeResource):
         """
         # check the URL cache in the DB (which will also provide us with
         # historical previews, if we have any)
-        cache_result = yield self.store.get_url_cache(url, ts)
+        cache_result = await self.store.get_url_cache(url, ts)
         if (
             cache_result
             and cache_result["expires_ts"] > ts
@@ -192,13 +191,13 @@ class PreviewUrlResource(DirectServeResource):
                 og = og.encode("utf8")
             return og
 
-        media_info = yield self._download_url(url, user)
+        media_info = await self._download_url(url, user)
 
         logger.debug("got media_info of '%s'", media_info)
 
         if _is_media(media_info["media_type"]):
             file_id = media_info["filesystem_id"]
-            dims = yield self.media_repo._generate_thumbnails(
+            dims = await self.media_repo._generate_thumbnails(
                 None, file_id, file_id, media_info["media_type"], url_cache=True
             )
 
@@ -248,14 +247,14 @@ class PreviewUrlResource(DirectServeResource):
             # request itself and benefit from the same caching etc.  But for now we
             # just rely on the caching on the master request to speed things up.
             if "og:image" in og and og["og:image"]:
-                image_info = yield self._download_url(
+                image_info = await self._download_url(
                     _rebase_url(og["og:image"], media_info["uri"]), user
                 )
 
                 if _is_media(image_info["media_type"]):
                     # TODO: make sure we don't choke on white-on-transparent images
                     file_id = image_info["filesystem_id"]
-                    dims = yield self.media_repo._generate_thumbnails(
+                    dims = await self.media_repo._generate_thumbnails(
                         None, file_id, file_id, image_info["media_type"], url_cache=True
                     )
                     if dims:
@@ -293,7 +292,7 @@ class PreviewUrlResource(DirectServeResource):
         jsonog = json.dumps(og)
 
         # store OG in history-aware DB cache
-        yield self.store.store_url_cache(
+        await self.store.store_url_cache(
             url,
             media_info["response_code"],
             media_info["etag"],
@@ -305,8 +304,7 @@ class PreviewUrlResource(DirectServeResource):
 
         return jsonog.encode("utf8")
 
-    @defer.inlineCallbacks
-    def _download_url(self, url, user):
+    async def _download_url(self, url, user):
         # TODO: we should probably honour robots.txt... except in practice
         # we're most likely being explicitly triggered by a human rather than a
         # bot, so are we really a robot?
@@ -318,7 +316,7 @@ class PreviewUrlResource(DirectServeResource):
         with self.media_storage.store_into_file(file_info) as (f, fname, finish):
             try:
                 logger.debug("Trying to get url '%s'", url)
-                length, headers, uri, code = yield self.client.get_file(
+                length, headers, uri, code = await self.client.get_file(
                     url, output_stream=f, max_size=self.max_spider_size
                 )
             except SynapseError:
@@ -345,7 +343,7 @@ class PreviewUrlResource(DirectServeResource):
                     % (traceback.format_exception_only(sys.exc_info()[0], e),),
                     Codes.UNKNOWN,
                 )
-            yield finish()
+            await finish()
 
         try:
             if b"Content-Type" in headers:
@@ -356,7 +354,7 @@ class PreviewUrlResource(DirectServeResource):
 
             download_name = get_filename_from_headers(headers)
 
-            yield self.store.store_local_media(
+            await self.store.store_local_media(
                 media_id=file_id,
                 media_type=media_type,
                 time_now_ms=self.clock.time_msec(),
@@ -393,8 +391,7 @@ class PreviewUrlResource(DirectServeResource):
             "expire_url_cache_data", self._expire_url_cache_data
         )
 
-    @defer.inlineCallbacks
-    def _expire_url_cache_data(self):
+    async def _expire_url_cache_data(self):
         """Clean up expired url cache content, media and thumbnails.
         """
         # TODO: Delete from backup media store
@@ -403,12 +400,12 @@ class PreviewUrlResource(DirectServeResource):
 
         logger.info("Running url preview cache expiry")
 
-        if not (yield self.store.db.updates.has_completed_background_updates()):
+        if not (await self.store.db.updates.has_completed_background_updates()):
             logger.info("Still running DB updates; skipping expiry")
             return
 
         # First we delete expired url cache entries
-        media_ids = yield self.store.get_expired_url_cache(now)
+        media_ids = await self.store.get_expired_url_cache(now)
 
         removed_media = []
         for media_id in media_ids:
@@ -430,7 +427,7 @@ class PreviewUrlResource(DirectServeResource):
             except Exception:
                 pass
 
-        yield self.store.delete_url_cache(removed_media)
+        await self.store.delete_url_cache(removed_media)
 
         if removed_media:
             logger.info("Deleted %d entries from url cache", len(removed_media))
@@ -440,7 +437,7 @@ class PreviewUrlResource(DirectServeResource):
         # may have a room open with a preview url thing open).
         # So we wait a couple of days before deleting, just in case.
         expire_before = now - 2 * 24 * 60 * 60 * 1000
-        media_ids = yield self.store.get_url_cache_media_before(expire_before)
+        media_ids = await self.store.get_url_cache_media_before(expire_before)
 
         removed_media = []
         for media_id in media_ids:
@@ -478,7 +475,7 @@ class PreviewUrlResource(DirectServeResource):
             except Exception:
                 pass
 
-        yield self.store.delete_url_cache_media(removed_media)
+        await self.store.delete_url_cache_media(removed_media)
 
         logger.info("Deleted %d media from url cache", len(removed_media))
 
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index d57480f761..0b87220234 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -16,8 +16,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.http.server import (
     DirectServeResource,
     set_cors_headers,
@@ -79,11 +77,10 @@ class ThumbnailResource(DirectServeResource):
                 )
             self.media_repo.mark_recently_accessed(server_name, media_id)
 
-    @defer.inlineCallbacks
-    def _respond_local_thumbnail(
+    async def _respond_local_thumbnail(
         self, request, media_id, width, height, method, m_type
     ):
-        media_info = yield self.store.get_local_media(media_id)
+        media_info = await self.store.get_local_media(media_id)
 
         if not media_info:
             respond_404(request)
@@ -93,7 +90,7 @@ class ThumbnailResource(DirectServeResource):
             respond_404(request)
             return
 
-        thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id)
+        thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
 
         if thumbnail_infos:
             thumbnail_info = self._select_thumbnail(
@@ -114,14 +111,13 @@ class ThumbnailResource(DirectServeResource):
             t_type = file_info.thumbnail_type
             t_length = thumbnail_info["thumbnail_length"]
 
-            responder = yield self.media_storage.fetch_media(file_info)
-            yield respond_with_responder(request, responder, t_type, t_length)
+            responder = await self.media_storage.fetch_media(file_info)
+            await respond_with_responder(request, responder, t_type, t_length)
         else:
             logger.info("Couldn't find any generated thumbnails")
             respond_404(request)
 
-    @defer.inlineCallbacks
-    def _select_or_generate_local_thumbnail(
+    async def _select_or_generate_local_thumbnail(
         self,
         request,
         media_id,
@@ -130,7 +126,7 @@ class ThumbnailResource(DirectServeResource):
         desired_method,
         desired_type,
     ):
-        media_info = yield self.store.get_local_media(media_id)
+        media_info = await self.store.get_local_media(media_id)
 
         if not media_info:
             respond_404(request)
@@ -140,7 +136,7 @@ class ThumbnailResource(DirectServeResource):
             respond_404(request)
             return
 
-        thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id)
+        thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
         for info in thumbnail_infos:
             t_w = info["thumbnail_width"] == desired_width
             t_h = info["thumbnail_height"] == desired_height
@@ -162,15 +158,15 @@ class ThumbnailResource(DirectServeResource):
                 t_type = file_info.thumbnail_type
                 t_length = info["thumbnail_length"]
 
-                responder = yield self.media_storage.fetch_media(file_info)
+                responder = await self.media_storage.fetch_media(file_info)
                 if responder:
-                    yield respond_with_responder(request, responder, t_type, t_length)
+                    await respond_with_responder(request, responder, t_type, t_length)
                     return
 
         logger.debug("We don't have a thumbnail of that size. Generating")
 
         # Okay, so we generate one.
-        file_path = yield self.media_repo.generate_local_exact_thumbnail(
+        file_path = await self.media_repo.generate_local_exact_thumbnail(
             media_id,
             desired_width,
             desired_height,
@@ -180,13 +176,12 @@ class ThumbnailResource(DirectServeResource):
         )
 
         if file_path:
-            yield respond_with_file(request, desired_type, file_path)
+            await respond_with_file(request, desired_type, file_path)
         else:
             logger.warning("Failed to generate thumbnail")
             respond_404(request)
 
-    @defer.inlineCallbacks
-    def _select_or_generate_remote_thumbnail(
+    async def _select_or_generate_remote_thumbnail(
         self,
         request,
         server_name,
@@ -196,9 +191,9 @@ class ThumbnailResource(DirectServeResource):
         desired_method,
         desired_type,
     ):
-        media_info = yield self.media_repo.get_remote_media_info(server_name, media_id)
+        media_info = await self.media_repo.get_remote_media_info(server_name, media_id)
 
-        thumbnail_infos = yield self.store.get_remote_media_thumbnails(
+        thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
         )
 
@@ -224,15 +219,15 @@ class ThumbnailResource(DirectServeResource):
                 t_type = file_info.thumbnail_type
                 t_length = info["thumbnail_length"]
 
-                responder = yield self.media_storage.fetch_media(file_info)
+                responder = await self.media_storage.fetch_media(file_info)
                 if responder:
-                    yield respond_with_responder(request, responder, t_type, t_length)
+                    await respond_with_responder(request, responder, t_type, t_length)
                     return
 
         logger.debug("We don't have a thumbnail of that size. Generating")
 
         # Okay, so we generate one.
-        file_path = yield self.media_repo.generate_remote_exact_thumbnail(
+        file_path = await self.media_repo.generate_remote_exact_thumbnail(
             server_name,
             file_id,
             media_id,
@@ -243,21 +238,20 @@ class ThumbnailResource(DirectServeResource):
         )
 
         if file_path:
-            yield respond_with_file(request, desired_type, file_path)
+            await respond_with_file(request, desired_type, file_path)
         else:
             logger.warning("Failed to generate thumbnail")
             respond_404(request)
 
-    @defer.inlineCallbacks
-    def _respond_remote_thumbnail(
+    async def _respond_remote_thumbnail(
         self, request, server_name, media_id, width, height, method, m_type
     ):
         # TODO: Don't download the whole remote file
         # We should proxy the thumbnail from the remote server instead of
         # downloading the remote file and generating our own thumbnails.
-        media_info = yield self.media_repo.get_remote_media_info(server_name, media_id)
+        media_info = await self.media_repo.get_remote_media_info(server_name, media_id)
 
-        thumbnail_infos = yield self.store.get_remote_media_thumbnails(
+        thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
         )
 
@@ -278,8 +272,8 @@ class ThumbnailResource(DirectServeResource):
             t_type = file_info.thumbnail_type
             t_length = thumbnail_info["thumbnail_length"]
 
-            responder = yield self.media_storage.fetch_media(file_info)
-            yield respond_with_responder(request, responder, t_type, t_length)
+            responder = await self.media_storage.fetch_media(file_info)
+            await respond_with_responder(request, responder, t_type, t_length)
         else:
             logger.info("Failed to find any generated thumbnails")
             respond_404(request)
-- 
cgit 1.4.1


From fdb13447167da0670dd6ad95fdf4a99cde450eb9 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Fri, 20 Mar 2020 14:40:47 +0000
Subject: Remove concept of a non-limited stream. (#7011)

---
 changelog.d/7011.misc                              |  1 +
 synapse/handlers/presence.py                       |  4 +-
 synapse/handlers/typing.py                         | 11 +++-
 synapse/replication/tcp/resource.py                |  9 +--
 synapse/replication/tcp/streams/_base.py           | 66 +++++++++-------------
 synapse/storage/data_stores/main/devices.py        | 10 +++-
 .../storage/data_stores/main/end_to_end_keys.py    | 14 +++--
 synapse/storage/data_stores/main/presence.py       | 23 ++++----
 8 files changed, 71 insertions(+), 67 deletions(-)
 create mode 100644 changelog.d/7011.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7011.misc b/changelog.d/7011.misc
new file mode 100644
index 0000000000..41c3b37574
--- /dev/null
+++ b/changelog.d/7011.misc
@@ -0,0 +1 @@
+Remove concept of a non-limited stream.
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 5526015ddb..6912165622 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -747,7 +747,7 @@ class PresenceHandler(object):
 
         return False
 
-    async def get_all_presence_updates(self, last_id, current_id):
+    async def get_all_presence_updates(self, last_id, current_id, limit):
         """
         Gets a list of presence update rows from between the given stream ids.
         Each row has:
@@ -762,7 +762,7 @@ class PresenceHandler(object):
         """
         # TODO(markjh): replicate the unpersisted changes.
         # This could use the in-memory stores for recent changes.
-        rows = await self.store.get_all_presence_updates(last_id, current_id)
+        rows = await self.store.get_all_presence_updates(last_id, current_id, limit)
         return rows
 
     def notify_new_event(self):
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 391bceb0c4..c7bc14c623 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -15,6 +15,7 @@
 
 import logging
 from collections import namedtuple
+from typing import List
 
 from twisted.internet import defer
 
@@ -257,7 +258,13 @@ class TypingHandler(object):
             "typing_key", self._latest_room_serial, rooms=[member.room_id]
         )
 
-    async def get_all_typing_updates(self, last_id, current_id):
+    async def get_all_typing_updates(
+        self, last_id: int, current_id: int, limit: int
+    ) -> List[dict]:
+        """Get up to `limit` typing updates between the given tokens, earliest
+        updates first.
+        """
+
         if last_id == current_id:
             return []
 
@@ -275,7 +282,7 @@ class TypingHandler(object):
                 typing = self._room_typing[room_id]
                 rows.append((serial, room_id, list(typing)))
         rows.sort()
-        return rows
+        return rows[:limit]
 
     def get_current_token(self):
         return self._latest_room_serial
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index ce9d1fae12..6e2ebaf614 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -166,11 +166,6 @@ class ReplicationStreamer(object):
                 self.pending_updates = False
 
                 with Measure(self.clock, "repl.stream.get_updates"):
-                    # First we tell the streams that they should update their
-                    # current tokens.
-                    for stream in self.streams:
-                        stream.advance_current_token()
-
                     all_streams = self.streams
 
                     if self._replication_torture_level is not None:
@@ -180,7 +175,7 @@ class ReplicationStreamer(object):
                         random.shuffle(all_streams)
 
                     for stream in all_streams:
-                        if stream.last_token == stream.upto_token:
+                        if stream.last_token == stream.current_token():
                             continue
 
                         if self._replication_torture_level:
@@ -192,7 +187,7 @@ class ReplicationStreamer(object):
                             "Getting stream: %s: %s -> %s",
                             stream.NAME,
                             stream.last_token,
-                            stream.upto_token,
+                            stream.current_token(),
                         )
                         try:
                             updates, current_token = await stream.get_updates()
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 7a8b6e9df1..abf5c6c6a8 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -17,10 +17,12 @@
 import itertools
 import logging
 from collections import namedtuple
-from typing import Any, List, Optional
+from typing import Any, List, Optional, Tuple
 
 import attr
 
+from synapse.types import JsonDict
+
 logger = logging.getLogger(__name__)
 
 
@@ -119,13 +121,12 @@ class Stream(object):
     """Base class for the streams.
 
     Provides a `get_updates()` function that returns new updates since the last
-    time it was called up until the point `advance_current_token` was called.
+    time it was called.
     """
 
     NAME = None  # type: str  # The name of the stream
     # The type of the row. Used by the default impl of parse_row.
     ROW_TYPE = None  # type: Any
-    _LIMITED = True  # Whether the update function takes a limit
 
     @classmethod
     def parse_row(cls, row):
@@ -146,26 +147,15 @@ class Stream(object):
         # The token from which we last asked for updates
         self.last_token = self.current_token()
 
-        # The token that we will get updates up to
-        self.upto_token = self.current_token()
-
-    def advance_current_token(self):
-        """Updates `upto_token` to "now", which updates up until which point
-        get_updates[_since] will fetch rows till.
-        """
-        self.upto_token = self.current_token()
-
     def discard_updates_and_advance(self):
         """Called when the stream should advance but the updates would be discarded,
         e.g. when there are no currently connected workers.
         """
-        self.upto_token = self.current_token()
-        self.last_token = self.upto_token
+        self.last_token = self.current_token()
 
     async def get_updates(self):
         """Gets all updates since the last time this function was called (or
-        since the stream was constructed if it hadn't been called before),
-        until the `upto_token`
+        since the stream was constructed if it hadn't been called before).
 
         Returns:
             Deferred[Tuple[List[Tuple[int, Any]], int]:
@@ -178,44 +168,45 @@ class Stream(object):
 
         return updates, current_token
 
-    async def get_updates_since(self, from_token):
+    async def get_updates_since(
+        self, from_token: int
+    ) -> Tuple[List[Tuple[int, JsonDict]], int]:
         """Like get_updates except allows specifying from when we should
         stream updates
 
         Returns:
-            Deferred[Tuple[List[Tuple[int, Any]], int]:
-                Resolves to a pair ``(updates, current_token)``, where ``updates`` is a
-                list of ``(token, row)`` entries. ``row`` will be json-serialised and
-                sent over the replication steam.
+            Resolves to a pair `(updates, new_last_token)`, where `updates` is
+            a list of `(token, row)` entries and `new_last_token` is the new
+            position in stream.
         """
+
         if from_token in ("NOW", "now"):
-            return [], self.upto_token
+            return [], self.current_token()
 
-        current_token = self.upto_token
+        current_token = self.current_token()
 
         from_token = int(from_token)
 
         if from_token == current_token:
             return [], current_token
 
-        logger.info("get_updates_since: %s", self.__class__)
-        if self._LIMITED:
-            rows = await self.update_function(
-                from_token, current_token, limit=MAX_EVENTS_BEHIND + 1
-            )
+        rows = await self.update_function(
+            from_token, current_token, limit=MAX_EVENTS_BEHIND + 1
+        )
 
-            # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
-            rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
-        else:
-            rows = await self.update_function(from_token, current_token)
+        # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
+        rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
 
         updates = [(row[0], row[1:]) for row in rows]
 
         # check we didn't get more rows than the limit.
         # doing it like this allows the update_function to be a generator.
-        if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
+        if len(updates) >= MAX_EVENTS_BEHIND:
             raise Exception("stream %s has fallen behind" % (self.NAME))
 
+        # The update function didn't hit the limit, so we must have got all
+        # the updates to `current_token`, and can return that as our new
+        # stream position.
         return updates, current_token
 
     def current_token(self):
@@ -227,9 +218,8 @@ class Stream(object):
         """
         raise NotImplementedError()
 
-    def update_function(self, from_token, current_token, limit=None):
-        """Get updates between from_token and to_token. If Stream._LIMITED is
-        True then limit is provided, otherwise it's not.
+    def update_function(self, from_token, current_token, limit):
+        """Get updates between from_token and to_token.
 
         Returns:
             Deferred(list(tuple)): the first entry in the tuple is the token for
@@ -257,7 +247,6 @@ class BackfillStream(Stream):
 
 class PresenceStream(Stream):
     NAME = "presence"
-    _LIMITED = False
     ROW_TYPE = PresenceStreamRow
 
     def __init__(self, hs):
@@ -272,7 +261,6 @@ class PresenceStream(Stream):
 
 class TypingStream(Stream):
     NAME = "typing"
-    _LIMITED = False
     ROW_TYPE = TypingStreamRow
 
     def __init__(self, hs):
@@ -372,7 +360,6 @@ class DeviceListsStream(Stream):
     """
 
     NAME = "device_lists"
-    _LIMITED = False
     ROW_TYPE = DeviceListsStreamRow
 
     def __init__(self, hs):
@@ -462,7 +449,6 @@ class UserSignatureStream(Stream):
     """
 
     NAME = "user_signature"
-    _LIMITED = False
     ROW_TYPE = UserSignatureStreamRow
 
     def __init__(self, hs):
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index 4c19c02bbc..2d47cfd131 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -576,7 +576,7 @@ class DeviceWorkerStore(SQLBaseStore):
             return set()
 
     async def get_all_device_list_changes_for_remotes(
-        self, from_key: int, to_key: int
+        self, from_key: int, to_key: int, limit: int,
     ) -> List[Tuple[int, str]]:
         """Return a list of `(stream_id, entity)` which is the combined list of
         changes to devices and which destinations need to be poked. Entity is
@@ -592,10 +592,16 @@ class DeviceWorkerStore(SQLBaseStore):
                 SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
             ) AS e
             WHERE ? < stream_id AND stream_id <= ?
+            LIMIT ?
         """
 
         return await self.db.execute(
-            "get_all_device_list_changes_for_remotes", None, sql, from_key, to_key
+            "get_all_device_list_changes_for_remotes",
+            None,
+            sql,
+            from_key,
+            to_key,
+            limit,
         )
 
     @cached(max_entries=10000)
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index 001a53f9b4..bcf746b7ef 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -537,7 +537,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
 
         return result
 
-    def get_all_user_signature_changes_for_remotes(self, from_key, to_key):
+    def get_all_user_signature_changes_for_remotes(self, from_key, to_key, limit):
         """Return a list of changes from the user signature stream to notify remotes.
         Note that the user signature stream represents when a user signs their
         device with their user-signing key, which is not published to other
@@ -552,13 +552,19 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             Deferred[list[(int,str)]] a list of `(stream_id, user_id)`
         """
         sql = """
-            SELECT MAX(stream_id) AS stream_id, from_user_id AS user_id
+            SELECT stream_id, from_user_id AS user_id
             FROM user_signature_stream
             WHERE ? < stream_id AND stream_id <= ?
-            GROUP BY user_id
+            ORDER BY stream_id ASC
+            LIMIT ?
         """
         return self.db.execute(
-            "get_all_user_signature_changes_for_remotes", None, sql, from_key, to_key
+            "get_all_user_signature_changes_for_remotes",
+            None,
+            sql,
+            from_key,
+            to_key,
+            limit,
         )
 
 
diff --git a/synapse/storage/data_stores/main/presence.py b/synapse/storage/data_stores/main/presence.py
index 604c8b7ddd..dab31e0c2d 100644
--- a/synapse/storage/data_stores/main/presence.py
+++ b/synapse/storage/data_stores/main/presence.py
@@ -60,7 +60,7 @@ class PresenceStore(SQLBaseStore):
                     "status_msg": state.status_msg,
                     "currently_active": state.currently_active,
                 }
-                for state in presence_states
+                for stream_id, state in zip(stream_orderings, presence_states)
             ],
         )
 
@@ -73,19 +73,22 @@ class PresenceStore(SQLBaseStore):
             )
             txn.execute(sql + clause, [stream_id] + list(args))
 
-    def get_all_presence_updates(self, last_id, current_id):
+    def get_all_presence_updates(self, last_id, current_id, limit):
         if last_id == current_id:
             return defer.succeed([])
 
         def get_all_presence_updates_txn(txn):
-            sql = (
-                "SELECT stream_id, user_id, state, last_active_ts,"
-                " last_federation_update_ts, last_user_sync_ts, status_msg,"
-                " currently_active"
-                " FROM presence_stream"
-                " WHERE ? < stream_id AND stream_id <= ?"
-            )
-            txn.execute(sql, (last_id, current_id))
+            sql = """
+                SELECT stream_id, user_id, state, last_active_ts,
+                    last_federation_update_ts, last_user_sync_ts,
+                    status_msg,
+                currently_active
+                FROM presence_stream
+                WHERE ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC
+                LIMIT ?
+            """
+            txn.execute(sql, (last_id, current_id, limit))
             return txn.fetchall()
 
         return self.db.runInteraction(
-- 
cgit 1.4.1


From c165c1233b8ef244fadca97c7d465fdcf473d077 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Fri, 20 Mar 2020 16:24:22 +0100
Subject: Improve database configuration docs (#6988)

Attempts to clarify the sample config for databases, and add some stuff about
tcp keepalives to `postgres.md`.
---
 changelog.d/6988.doc          |  1 +
 docs/postgres.md              | 42 ++++++++++++++-----
 docs/sample_config.yaml       | 43 +++++++++++++++++---
 synapse/config/_base.py       |  2 -
 synapse/config/database.py    | 93 +++++++++++++++++++++++++++----------------
 tests/config/test_database.py | 22 +---------
 6 files changed, 132 insertions(+), 71 deletions(-)
 create mode 100644 changelog.d/6988.doc

(limited to 'changelog.d')

diff --git a/changelog.d/6988.doc b/changelog.d/6988.doc
new file mode 100644
index 0000000000..b6f71bb966
--- /dev/null
+++ b/changelog.d/6988.doc
@@ -0,0 +1 @@
+Improve the documentation for database configuration.
diff --git a/docs/postgres.md b/docs/postgres.md
index e0793ecee8..16a630c3d1 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -105,19 +105,41 @@ of free memory the database host has available.
 When you are ready to start using PostgreSQL, edit the `database`
 section in your config file to match the following lines:
 
-    database:
-        name: psycopg2
-        args:
-            user: 
-            password: 
-            database: 
-            host: 
-            cp_min: 5
-            cp_max: 10
+```yaml
+database:
+  name: psycopg2
+  args:
+    user: 
+    password: 
+    database: 
+    host: 
+    cp_min: 5
+    cp_max: 10
+```
 
 All key, values in `args` are passed to the `psycopg2.connect(..)`
 function, except keys beginning with `cp_`, which are consumed by the
-twisted adbapi connection pool.
+twisted adbapi connection pool. See the [libpq
+documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
+for a list of options which can be passed.
+
+You should consider tuning the `args.keepalives_*` options if there is any danger of
+the connection between your homeserver and database dropping, otherwise Synapse
+may block for an extended period while it waits for a response from the
+database server. Example values might be:
+
+```yaml
+# seconds of inactivity after which TCP should send a keepalive message to the server
+keepalives_idle: 10
+
+# the number of seconds after which a TCP keepalive message that is not
+# acknowledged by the server should be retransmitted
+keepalives_interval: 10
+
+# the number of TCP keepalives that can be lost before the client's connection
+# to the server is considered dead
+keepalives_count: 3
+```
 
 ## Porting from SQLite
 
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 2ff0dd05a2..276e43b732 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -578,13 +578,46 @@ acme:
 
 ## Database ##
 
+# The 'database' setting defines the database that synapse uses to store all of
+# its data.
+#
+# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
+# 'psycopg2' (for PostgreSQL).
+#
+# 'args' gives options which are passed through to the database engine,
+# except for options starting 'cp_', which are used to configure the Twisted
+# connection pool. For a reference to valid arguments, see:
+#   * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
+#   * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+#   * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__
+#
+#
+# Example SQLite configuration:
+#
+#database:
+#  name: sqlite3
+#  args:
+#    database: /path/to/homeserver.db
+#
+#
+# Example Postgres configuration:
+#
+#database:
+#  name: psycopg2
+#  args:
+#    user: synapse
+#    password: secretpassword
+#    database: synapse
+#    host: localhost
+#    cp_min: 5
+#    cp_max: 10
+#
+# For more information on using Synapse with Postgres, see `docs/postgres.md`.
+#
 database:
-  # The database engine name
-  name: "sqlite3"
-  # Arguments to pass to the engine
+  name: sqlite3
   args:
-    # Path to the database
-    database: "DATADIR/homeserver.db"
+    database: DATADIR/homeserver.db
 
 # Number of events to cache in memory.
 #
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index ba846042c4..efe2af5504 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -294,7 +294,6 @@ class RootConfig(object):
         report_stats=None,
         open_private_ports=False,
         listeners=None,
-        database_conf=None,
         tls_certificate_path=None,
         tls_private_key_path=None,
         acme_domain=None,
@@ -367,7 +366,6 @@ class RootConfig(object):
                 report_stats=report_stats,
                 open_private_ports=open_private_ports,
                 listeners=listeners,
-                database_conf=database_conf,
                 tls_certificate_path=tls_certificate_path,
                 tls_private_key_path=tls_private_key_path,
                 acme_domain=acme_domain,
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 219b32f670..b8ab2f86ac 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,14 +15,60 @@
 # limitations under the License.
 import logging
 import os
-from textwrap import indent
-
-import yaml
 
 from synapse.config._base import Config, ConfigError
 
 logger = logging.getLogger(__name__)
 
+DEFAULT_CONFIG = """\
+## Database ##
+
+# The 'database' setting defines the database that synapse uses to store all of
+# its data.
+#
+# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
+# 'psycopg2' (for PostgreSQL).
+#
+# 'args' gives options which are passed through to the database engine,
+# except for options starting 'cp_', which are used to configure the Twisted
+# connection pool. For a reference to valid arguments, see:
+#   * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
+#   * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+#   * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__
+#
+#
+# Example SQLite configuration:
+#
+#database:
+#  name: sqlite3
+#  args:
+#    database: /path/to/homeserver.db
+#
+#
+# Example Postgres configuration:
+#
+#database:
+#  name: psycopg2
+#  args:
+#    user: synapse
+#    password: secretpassword
+#    database: synapse
+#    host: localhost
+#    cp_min: 5
+#    cp_max: 10
+#
+# For more information on using Synapse with Postgres, see `docs/postgres.md`.
+#
+database:
+  name: sqlite3
+  args:
+    database: %(database_path)s
+
+# Number of events to cache in memory.
+#
+#event_cache_size: 10K
+"""
+
 
 class DatabaseConnectionConfig:
     """Contains the connection config for a particular database.
@@ -36,10 +83,12 @@ class DatabaseConnectionConfig:
     """
 
     def __init__(self, name: str, db_config: dict):
-        if db_config["name"] not in ("sqlite3", "psycopg2"):
-            raise ConfigError("Unsupported database type %r" % (db_config["name"],))
+        db_engine = db_config.get("name", "sqlite3")
 
-        if db_config["name"] == "sqlite3":
+        if db_engine not in ("sqlite3", "psycopg2"):
+            raise ConfigError("Unsupported database type %r" % (db_engine,))
+
+        if db_engine == "sqlite3":
             db_config.setdefault("args", {}).update(
                 {"cp_min": 1, "cp_max": 1, "check_same_thread": False}
             )
@@ -97,34 +146,10 @@ class DatabaseConfig(Config):
 
             self.set_databasepath(config.get("database_path"))
 
-    def generate_config_section(self, data_dir_path, database_conf, **kwargs):
-        if not database_conf:
-            database_path = os.path.join(data_dir_path, "homeserver.db")
-            database_conf = (
-                """# The database engine name
-          name: "sqlite3"
-          # Arguments to pass to the engine
-          args:
-            # Path to the database
-            database: "%(database_path)s"
-            """
-                % locals()
-            )
-        else:
-            database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip()
-
-        return (
-            """\
-        ## Database ##
-
-        database:
-          %(database_conf)s
-        # Number of events to cache in memory.
-        #
-        #event_cache_size: 10K
-        """
-            % locals()
-        )
+    def generate_config_section(self, data_dir_path, **kwargs):
+        return DEFAULT_CONFIG % {
+            "database_path": os.path.join(data_dir_path, "homeserver.db")
+        }
 
     def read_arguments(self, args):
         self.set_databasepath(args.database_path)
diff --git a/tests/config/test_database.py b/tests/config/test_database.py
index 151d3006ac..f675bde68e 100644
--- a/tests/config/test_database.py
+++ b/tests/config/test_database.py
@@ -21,9 +21,9 @@ from tests import unittest
 
 
 class DatabaseConfigTestCase(unittest.TestCase):
-    def test_database_configured_correctly_no_database_conf_param(self):
+    def test_database_configured_correctly(self):
         conf = yaml.safe_load(
-            DatabaseConfig().generate_config_section("/data_dir_path", None)
+            DatabaseConfig().generate_config_section(data_dir_path="/data_dir_path")
         )
 
         expected_database_conf = {
@@ -32,21 +32,3 @@ class DatabaseConfigTestCase(unittest.TestCase):
         }
 
         self.assertEqual(conf["database"], expected_database_conf)
-
-    def test_database_configured_correctly_database_conf_param(self):
-
-        database_conf = {
-            "name": "my super fast datastore",
-            "args": {
-                "user": "matrix",
-                "password": "synapse_database_password",
-                "host": "synapse_database_host",
-                "database": "matrix",
-            },
-        }
-
-        conf = yaml.safe_load(
-            DatabaseConfig().generate_config_section("/data_dir_path", database_conf)
-        )
-
-        self.assertEqual(conf["database"], database_conf)
-- 
cgit 1.4.1


From 477c4f5b1c2c7733d4b2cf578dc9aa8e048011b0 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Fri, 20 Mar 2020 16:22:47 -0400
Subject: Clean-up some auth/login REST code (#7115)

---
 changelog.d/7115.misc                |  1 +
 synapse/rest/client/v1/login.py      |  8 ------
 synapse/rest/client/v2_alpha/auth.py | 53 ++++++++++++++----------------------
 3 files changed, 21 insertions(+), 41 deletions(-)
 create mode 100644 changelog.d/7115.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7115.misc b/changelog.d/7115.misc
new file mode 100644
index 0000000000..7d4a011e3e
--- /dev/null
+++ b/changelog.d/7115.misc
@@ -0,0 +1 @@
+De-duplicate / remove unused REST code for login and auth.
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index d0d4999795..31551524f8 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -28,7 +28,6 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
     parse_string,
 )
-from synapse.push.mailer import load_jinja2_templates
 from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.rest.well_known import WellKnownBuilder
 from synapse.types import UserID, map_username_to_mxid_localpart
@@ -548,13 +547,6 @@ class SSOAuthHandler(object):
         self._registration_handler = hs.get_registration_handler()
         self._macaroon_gen = hs.get_macaroon_generator()
 
-        # Load the redirect page HTML template
-        self._template = load_jinja2_templates(
-            hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"],
-        )[0]
-
-        self._server_name = hs.config.server_name
-
         # cast to tuple for use with str.startswith
         self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
 
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 50e080673b..85cf5a14c6 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -142,14 +142,6 @@ class AuthRestServlet(RestServlet):
                 % (CLIENT_API_PREFIX, LoginType.RECAPTCHA),
                 "sitekey": self.hs.config.recaptcha_public_key,
             }
-            html_bytes = html.encode("utf8")
-            request.setResponseCode(200)
-            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
-            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
-
-            request.write(html_bytes)
-            finish_request(request)
-            return None
         elif stagetype == LoginType.TERMS:
             html = TERMS_TEMPLATE % {
                 "session": session,
@@ -158,17 +150,19 @@ class AuthRestServlet(RestServlet):
                 "myurl": "%s/r0/auth/%s/fallback/web"
                 % (CLIENT_API_PREFIX, LoginType.TERMS),
             }
-            html_bytes = html.encode("utf8")
-            request.setResponseCode(200)
-            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
-            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
-
-            request.write(html_bytes)
-            finish_request(request)
-            return None
         else:
             raise SynapseError(404, "Unknown auth stage type")
 
+        # Render the HTML and return.
+        html_bytes = html.encode("utf8")
+        request.setResponseCode(200)
+        request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+        request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+        request.write(html_bytes)
+        finish_request(request)
+        return None
+
     async def on_POST(self, request, stagetype):
 
         session = parse_string(request, "session")
@@ -196,15 +190,6 @@ class AuthRestServlet(RestServlet):
                     % (CLIENT_API_PREFIX, LoginType.RECAPTCHA),
                     "sitekey": self.hs.config.recaptcha_public_key,
                 }
-            html_bytes = html.encode("utf8")
-            request.setResponseCode(200)
-            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
-            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
-
-            request.write(html_bytes)
-            finish_request(request)
-
-            return None
         elif stagetype == LoginType.TERMS:
             authdict = {"session": session}
 
@@ -225,17 +210,19 @@ class AuthRestServlet(RestServlet):
                     "myurl": "%s/r0/auth/%s/fallback/web"
                     % (CLIENT_API_PREFIX, LoginType.TERMS),
                 }
-            html_bytes = html.encode("utf8")
-            request.setResponseCode(200)
-            request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
-            request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
-
-            request.write(html_bytes)
-            finish_request(request)
-            return None
         else:
             raise SynapseError(404, "Unknown auth stage type")
 
+        # Render the HTML and return.
+        html_bytes = html.encode("utf8")
+        request.setResponseCode(200)
+        request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+        request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+        request.write(html_bytes)
+        finish_request(request)
+        return None
+
     def on_OPTIONS(self, _):
         return 200, {}
 
-- 
cgit 1.4.1


From 96071eea8f5e18282c07da3a61e4b3431f694cc5 Mon Sep 17 00:00:00 2001
From: Dionysis Grigoropoulos 
Date: Mon, 23 Mar 2020 11:48:28 +0200
Subject: Set Referrer-Policy to no-referrer for media (#7009)

---
 changelog.d/7009.feature                   | 1 +
 synapse/rest/media/v1/download_resource.py | 3 +++
 2 files changed, 4 insertions(+)
 create mode 100644 changelog.d/7009.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7009.feature b/changelog.d/7009.feature
new file mode 100644
index 0000000000..cd2705d5ba
--- /dev/null
+++ b/changelog.d/7009.feature
@@ -0,0 +1 @@
+Set `Referrer-Policy` header to `no-referrer` on media downloads.
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
index 66a01559e1..24d3ae5bbc 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/v1/download_resource.py
@@ -50,6 +50,9 @@ class DownloadResource(DirectServeResource):
             b" media-src 'self';"
             b" object-src 'self';",
         )
+        request.setHeader(
+            b"Referrer-Policy", b"no-referrer",
+        )
         server_name, media_id, name = parse_media_id(request)
         if server_name == self.server_name:
             await self.media_repo.get_local_media(request, media_id, name)
-- 
cgit 1.4.1


From b3cee0ce670ada582b2a4b36c377f160c7ee1d09 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 23 Mar 2020 11:39:36 +0000
Subject: Fix processing of `groups` stream, and use symbolic names for streams
 (#7117)

`groups` != `receipts`

Introduced in #6964
---
 changelog.d/7117.bugfix                     |  1 +
 synapse/app/generic_worker.py               | 35 ++++++++++-----
 synapse/replication/tcp/streams/__init__.py | 70 +++++++++++++++++++++--------
 3 files changed, 76 insertions(+), 30 deletions(-)
 create mode 100644 changelog.d/7117.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7117.bugfix b/changelog.d/7117.bugfix
new file mode 100644
index 0000000000..1896d7ad49
--- /dev/null
+++ b/changelog.d/7117.bugfix
@@ -0,0 +1 @@
+Fix a bug which meant that groups updates were not correctly replicated between workers.
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index cdc078cf11..136babe6ce 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -65,12 +65,23 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
 from synapse.replication.slave.storage.room import RoomStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.replication.tcp.streams._base import (
+from synapse.replication.tcp.streams import (
+    AccountDataStream,
     DeviceListsStream,
+    GroupServerStream,
+    PresenceStream,
+    PushersStream,
+    PushRulesStream,
     ReceiptsStream,
+    TagAccountDataStream,
     ToDeviceStream,
+    TypingStream,
+)
+from synapse.replication.tcp.streams.events import (
+    EventsStream,
+    EventsStreamEventRow,
+    EventsStreamRow,
 )
-from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
 from synapse.rest.admin import register_servlets_for_media_repo
 from synapse.rest.client.v1 import events
 from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
@@ -626,7 +637,7 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler):
             if self.send_handler:
                 self.send_handler.process_replication_rows(stream_name, token, rows)
 
-            if stream_name == "events":
+            if stream_name == EventsStream.NAME:
                 # We shouldn't get multiple rows per token for events stream, so
                 # we don't need to optimise this for multiple rows.
                 for row in rows:
@@ -649,44 +660,44 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler):
                     )
 
                 await self.pusher_pool.on_new_notifications(token, token)
-            elif stream_name == "push_rules":
+            elif stream_name == PushRulesStream.NAME:
                 self.notifier.on_new_event(
                     "push_rules_key", token, users=[row.user_id for row in rows]
                 )
-            elif stream_name in ("account_data", "tag_account_data"):
+            elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME):
                 self.notifier.on_new_event(
                     "account_data_key", token, users=[row.user_id for row in rows]
                 )
-            elif stream_name == "receipts":
+            elif stream_name == ReceiptsStream.NAME:
                 self.notifier.on_new_event(
                     "receipt_key", token, rooms=[row.room_id for row in rows]
                 )
                 await self.pusher_pool.on_new_receipts(
                     token, token, {row.room_id for row in rows}
                 )
-            elif stream_name == "typing":
+            elif stream_name == TypingStream.NAME:
                 self.typing_handler.process_replication_rows(token, rows)
                 self.notifier.on_new_event(
                     "typing_key", token, rooms=[row.room_id for row in rows]
                 )
-            elif stream_name == "to_device":
+            elif stream_name == ToDeviceStream.NAME:
                 entities = [row.entity for row in rows if row.entity.startswith("@")]
                 if entities:
                     self.notifier.on_new_event("to_device_key", token, users=entities)
-            elif stream_name == "device_lists":
+            elif stream_name == DeviceListsStream.NAME:
                 all_room_ids = set()
                 for row in rows:
                     if row.entity.startswith("@"):
                         room_ids = await self.store.get_rooms_for_user(row.entity)
                         all_room_ids.update(room_ids)
                 self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
-            elif stream_name == "presence":
+            elif stream_name == PresenceStream.NAME:
                 await self.presence_handler.process_replication_rows(token, rows)
-            elif stream_name == "receipts":
+            elif stream_name == GroupServerStream.NAME:
                 self.notifier.on_new_event(
                     "groups_key", token, users=[row.user_id for row in rows]
                 )
-            elif stream_name == "pushers":
+            elif stream_name == PushersStream.NAME:
                 for row in rows:
                     if row.deleted:
                         self.stop_pusher(row.user_id, row.app_id, row.pushkey)
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
index 5f52264e84..29199f5b46 100644
--- a/synapse/replication/tcp/streams/__init__.py
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -24,27 +24,61 @@ Each stream is defined by the following information:
     current_token:      The function that returns the current token for the stream
     update_function:    The function that returns a list of updates between two tokens
 """
-
-from . import _base, events, federation
+from synapse.replication.tcp.streams._base import (
+    AccountDataStream,
+    BackfillStream,
+    CachesStream,
+    DeviceListsStream,
+    GroupServerStream,
+    PresenceStream,
+    PublicRoomsStream,
+    PushersStream,
+    PushRulesStream,
+    ReceiptsStream,
+    TagAccountDataStream,
+    ToDeviceStream,
+    TypingStream,
+    UserSignatureStream,
+)
+from synapse.replication.tcp.streams.events import EventsStream
+from synapse.replication.tcp.streams.federation import FederationStream
 
 STREAMS_MAP = {
     stream.NAME: stream
     for stream in (
-        events.EventsStream,
-        _base.BackfillStream,
-        _base.PresenceStream,
-        _base.TypingStream,
-        _base.ReceiptsStream,
-        _base.PushRulesStream,
-        _base.PushersStream,
-        _base.CachesStream,
-        _base.PublicRoomsStream,
-        _base.DeviceListsStream,
-        _base.ToDeviceStream,
-        federation.FederationStream,
-        _base.TagAccountDataStream,
-        _base.AccountDataStream,
-        _base.GroupServerStream,
-        _base.UserSignatureStream,
+        EventsStream,
+        BackfillStream,
+        PresenceStream,
+        TypingStream,
+        ReceiptsStream,
+        PushRulesStream,
+        PushersStream,
+        CachesStream,
+        PublicRoomsStream,
+        DeviceListsStream,
+        ToDeviceStream,
+        FederationStream,
+        TagAccountDataStream,
+        AccountDataStream,
+        GroupServerStream,
+        UserSignatureStream,
     )
 }
+
+__all__ = [
+    "STREAMS_MAP",
+    "BackfillStream",
+    "PresenceStream",
+    "TypingStream",
+    "ReceiptsStream",
+    "PushRulesStream",
+    "PushersStream",
+    "CachesStream",
+    "PublicRoomsStream",
+    "DeviceListsStream",
+    "ToDeviceStream",
+    "TagAccountDataStream",
+    "AccountDataStream",
+    "GroupServerStream",
+    "UserSignatureStream",
+]
-- 
cgit 1.4.1


From a564b92d37625855940fe599c730a9958c33f973 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Mon, 23 Mar 2020 13:59:11 +0000
Subject: Convert `*StreamRow` classes to inner classes (#7116)

This just helps keep the rows closer to their streams, so that it's easier to
see what the format of each stream is.
---
 changelog.d/7116.misc                          |   1 +
 synapse/app/generic_worker.py                  |   2 +-
 synapse/federation/send_queue.py               |   2 +-
 synapse/replication/tcp/streams/_base.py       | 181 +++++++++++++------------
 synapse/replication/tcp/streams/federation.py  |  16 +--
 tests/replication/tcp/streams/test_receipts.py |   4 +-
 6 files changed, 106 insertions(+), 100 deletions(-)
 create mode 100644 changelog.d/7116.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7116.misc b/changelog.d/7116.misc
new file mode 100644
index 0000000000..89d90bd49e
--- /dev/null
+++ b/changelog.d/7116.misc
@@ -0,0 +1 @@
+Convert `*StreamRow` classes to inner classes.
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 136babe6ce..c8fd8909a4 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -804,7 +804,7 @@ class FederationSenderHandler(object):
     async def _on_new_receipts(self, rows):
         """
         Args:
-            rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
+            rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]):
                 new receipts to be processed
         """
         for receipt in rows:
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 876fb0e245..e1700ca8aa 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -477,7 +477,7 @@ def process_rows_for_federation(transaction_queue, rows):
 
     Args:
         transaction_queue (FederationSender)
-        rows (list(synapse.replication.tcp.streams.FederationStreamRow))
+        rows (list(synapse.replication.tcp.streams.federation.FederationStream.FederationStreamRow))
     """
 
     # The federation stream contains a bunch of different types of
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index abf5c6c6a8..32d9514883 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -28,94 +28,6 @@ logger = logging.getLogger(__name__)
 
 MAX_EVENTS_BEHIND = 500000
 
-BackfillStreamRow = namedtuple(
-    "BackfillStreamRow",
-    (
-        "event_id",  # str
-        "room_id",  # str
-        "type",  # str
-        "state_key",  # str, optional
-        "redacts",  # str, optional
-        "relates_to",  # str, optional
-    ),
-)
-PresenceStreamRow = namedtuple(
-    "PresenceStreamRow",
-    (
-        "user_id",  # str
-        "state",  # str
-        "last_active_ts",  # int
-        "last_federation_update_ts",  # int
-        "last_user_sync_ts",  # int
-        "status_msg",  # str
-        "currently_active",  # bool
-    ),
-)
-TypingStreamRow = namedtuple(
-    "TypingStreamRow", ("room_id", "user_ids")  # str  # list(str)
-)
-ReceiptsStreamRow = namedtuple(
-    "ReceiptsStreamRow",
-    (
-        "room_id",  # str
-        "receipt_type",  # str
-        "user_id",  # str
-        "event_id",  # str
-        "data",  # dict
-    ),
-)
-PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",))  # str
-PushersStreamRow = namedtuple(
-    "PushersStreamRow",
-    ("user_id", "app_id", "pushkey", "deleted"),  # str  # str  # str  # bool
-)
-
-
-@attr.s
-class CachesStreamRow:
-    """Stream to inform workers they should invalidate their cache.
-
-    Attributes:
-        cache_func: Name of the cached function.
-        keys: The entry in the cache to invalidate. If None then will
-            invalidate all.
-        invalidation_ts: Timestamp of when the invalidation took place.
-    """
-
-    cache_func = attr.ib(type=str)
-    keys = attr.ib(type=Optional[List[Any]])
-    invalidation_ts = attr.ib(type=int)
-
-
-PublicRoomsStreamRow = namedtuple(
-    "PublicRoomsStreamRow",
-    (
-        "room_id",  # str
-        "visibility",  # str
-        "appservice_id",  # str, optional
-        "network_id",  # str, optional
-    ),
-)
-
-
-@attr.s
-class DeviceListsStreamRow:
-    entity = attr.ib(type=str)
-
-
-ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",))  # str
-TagAccountDataStreamRow = namedtuple(
-    "TagAccountDataStreamRow", ("user_id", "room_id", "data")  # str  # str  # dict
-)
-AccountDataStreamRow = namedtuple(
-    "AccountDataStream", ("user_id", "room_id", "data_type")  # str  # str  # str
-)
-GroupsStreamRow = namedtuple(
-    "GroupsStreamRow",
-    ("group_id", "user_id", "type", "content"),  # str  # str  # str  # dict
-)
-UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id"))  # str
-
 
 class Stream(object):
     """Base class for the streams.
@@ -234,6 +146,18 @@ class BackfillStream(Stream):
     or it went from being an outlier to not.
     """
 
+    BackfillStreamRow = namedtuple(
+        "BackfillStreamRow",
+        (
+            "event_id",  # str
+            "room_id",  # str
+            "type",  # str
+            "state_key",  # str, optional
+            "redacts",  # str, optional
+            "relates_to",  # str, optional
+        ),
+    )
+
     NAME = "backfill"
     ROW_TYPE = BackfillStreamRow
 
@@ -246,6 +170,19 @@ class BackfillStream(Stream):
 
 
 class PresenceStream(Stream):
+    PresenceStreamRow = namedtuple(
+        "PresenceStreamRow",
+        (
+            "user_id",  # str
+            "state",  # str
+            "last_active_ts",  # int
+            "last_federation_update_ts",  # int
+            "last_user_sync_ts",  # int
+            "status_msg",  # str
+            "currently_active",  # bool
+        ),
+    )
+
     NAME = "presence"
     ROW_TYPE = PresenceStreamRow
 
@@ -260,6 +197,10 @@ class PresenceStream(Stream):
 
 
 class TypingStream(Stream):
+    TypingStreamRow = namedtuple(
+        "TypingStreamRow", ("room_id", "user_ids")  # str  # list(str)
+    )
+
     NAME = "typing"
     ROW_TYPE = TypingStreamRow
 
@@ -273,6 +214,17 @@ class TypingStream(Stream):
 
 
 class ReceiptsStream(Stream):
+    ReceiptsStreamRow = namedtuple(
+        "ReceiptsStreamRow",
+        (
+            "room_id",  # str
+            "receipt_type",  # str
+            "user_id",  # str
+            "event_id",  # str
+            "data",  # dict
+        ),
+    )
+
     NAME = "receipts"
     ROW_TYPE = ReceiptsStreamRow
 
@@ -289,6 +241,8 @@ class PushRulesStream(Stream):
     """A user has changed their push rules
     """
 
+    PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",))  # str
+
     NAME = "push_rules"
     ROW_TYPE = PushRulesStreamRow
 
@@ -309,6 +263,11 @@ class PushersStream(Stream):
     """A user has added/changed/removed a pusher
     """
 
+    PushersStreamRow = namedtuple(
+        "PushersStreamRow",
+        ("user_id", "app_id", "pushkey", "deleted"),  # str  # str  # str  # bool
+    )
+
     NAME = "pushers"
     ROW_TYPE = PushersStreamRow
 
@@ -326,6 +285,21 @@ class CachesStream(Stream):
     the cache on the workers
     """
 
+    @attr.s
+    class CachesStreamRow:
+        """Stream to inform workers they should invalidate their cache.
+
+        Attributes:
+            cache_func: Name of the cached function.
+            keys: The entry in the cache to invalidate. If None then will
+                invalidate all.
+            invalidation_ts: Timestamp of when the invalidation took place.
+        """
+
+        cache_func = attr.ib(type=str)
+        keys = attr.ib(type=Optional[List[Any]])
+        invalidation_ts = attr.ib(type=int)
+
     NAME = "caches"
     ROW_TYPE = CachesStreamRow
 
@@ -342,6 +316,16 @@ class PublicRoomsStream(Stream):
     """The public rooms list changed
     """
 
+    PublicRoomsStreamRow = namedtuple(
+        "PublicRoomsStreamRow",
+        (
+            "room_id",  # str
+            "visibility",  # str
+            "appservice_id",  # str, optional
+            "network_id",  # str, optional
+        ),
+    )
+
     NAME = "public_rooms"
     ROW_TYPE = PublicRoomsStreamRow
 
@@ -359,6 +343,10 @@ class DeviceListsStream(Stream):
     told about a device update.
     """
 
+    @attr.s
+    class DeviceListsStreamRow:
+        entity = attr.ib(type=str)
+
     NAME = "device_lists"
     ROW_TYPE = DeviceListsStreamRow
 
@@ -375,6 +363,8 @@ class ToDeviceStream(Stream):
     """New to_device messages for a client
     """
 
+    ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",))  # str
+
     NAME = "to_device"
     ROW_TYPE = ToDeviceStreamRow
 
@@ -391,6 +381,10 @@ class TagAccountDataStream(Stream):
     """Someone added/removed a tag for a room
     """
 
+    TagAccountDataStreamRow = namedtuple(
+        "TagAccountDataStreamRow", ("user_id", "room_id", "data")  # str  # str  # dict
+    )
+
     NAME = "tag_account_data"
     ROW_TYPE = TagAccountDataStreamRow
 
@@ -407,6 +401,10 @@ class AccountDataStream(Stream):
     """Global or per room account data was changed
     """
 
+    AccountDataStreamRow = namedtuple(
+        "AccountDataStream", ("user_id", "room_id", "data_type")  # str  # str  # str
+    )
+
     NAME = "account_data"
     ROW_TYPE = AccountDataStreamRow
 
@@ -432,6 +430,11 @@ class AccountDataStream(Stream):
 
 
 class GroupServerStream(Stream):
+    GroupsStreamRow = namedtuple(
+        "GroupsStreamRow",
+        ("group_id", "user_id", "type", "content"),  # str  # str  # str  # dict
+    )
+
     NAME = "groups"
     ROW_TYPE = GroupsStreamRow
 
@@ -448,6 +451,8 @@ class UserSignatureStream(Stream):
     """A user has signed their own device with their user-signing key
     """
 
+    UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id"))  # str
+
     NAME = "user_signature"
     ROW_TYPE = UserSignatureStreamRow
 
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
index 615f3dc9ac..f5f9336430 100644
--- a/synapse/replication/tcp/streams/federation.py
+++ b/synapse/replication/tcp/streams/federation.py
@@ -17,20 +17,20 @@ from collections import namedtuple
 
 from ._base import Stream
 
-FederationStreamRow = namedtuple(
-    "FederationStreamRow",
-    (
-        "type",  # str, the type of data as defined in the BaseFederationRows
-        "data",  # dict, serialization of a federation.send_queue.BaseFederationRow
-    ),
-)
-
 
 class FederationStream(Stream):
     """Data to be sent over federation. Only available when master has federation
     sending disabled.
     """
 
+    FederationStreamRow = namedtuple(
+        "FederationStreamRow",
+        (
+            "type",  # str, the type of data as defined in the BaseFederationRows
+            "data",  # dict, serialization of a federation.send_queue.BaseFederationRow
+        ),
+    )
+
     NAME = "federation"
     ROW_TYPE = FederationStreamRow
 
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
index d5a99f6caa..fa2493cad6 100644
--- a/tests/replication/tcp/streams/test_receipts.py
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from synapse.replication.tcp.streams._base import ReceiptsStreamRow
+from synapse.replication.tcp.streams._base import ReceiptsStream
 
 from tests.replication.tcp.streams._base import BaseStreamTestCase
 
@@ -38,7 +38,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
         rdata_rows = self.test_handler.received_rdata_rows
         self.assertEqual(1, len(rdata_rows))
         self.assertEqual(rdata_rows[0][0], "receipts")
-        row = rdata_rows[0][2]  # type: ReceiptsStreamRow
+        row = rdata_rows[0][2]  # type: ReceiptsStream.ReceiptsStreamRow
         self.assertEqual(ROOM_ID, row.room_id)
         self.assertEqual("m.read", row.receipt_type)
         self.assertEqual(USER_ID, row.user_id)
-- 
cgit 1.4.1


From e341518f92132ad0b71a826857146b0bd2e56d6b Mon Sep 17 00:00:00 2001
From: "Kartikaya Gupta (kats)" 
Date: Mon, 23 Mar 2020 11:31:02 -0400
Subject: Update pre-built package name for FreeBSD (#7107). (#7107)

Signed-off-by: Kartikaya Gupta 
---
 INSTALL.md           | 2 +-
 changelog.d/7107.doc | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7107.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index c0926ba590..f9e13b4cf6 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -383,7 +383,7 @@ Synapse can be found in the void repositories as 'synapse':
 Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
 
  - Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
- - Packages: `pkg install py27-matrix-synapse`
+ - Packages: `pkg install py37-matrix-synapse`
 
 
 ### NixOS
diff --git a/changelog.d/7107.doc b/changelog.d/7107.doc
new file mode 100644
index 0000000000..f6da32d406
--- /dev/null
+++ b/changelog.d/7107.doc
@@ -0,0 +1 @@
+Update pre-built package name for FreeBSD.
-- 
cgit 1.4.1


From 190ab593b7a2c0d79569758c0faa4d2442bc2c5f Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Mon, 23 Mar 2020 15:21:54 -0400
Subject: Use the proper error code when a canonical alias that does not exist
 is used. (#7109)

---
 changelog.d/7109.bugfix     |  1 +
 synapse/handlers/message.py | 57 ++++++++++++++++++++++++++++++---------------
 2 files changed, 39 insertions(+), 19 deletions(-)
 create mode 100644 changelog.d/7109.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7109.bugfix b/changelog.d/7109.bugfix
new file mode 100644
index 0000000000..268de9978e
--- /dev/null
+++ b/changelog.d/7109.bugfix
@@ -0,0 +1 @@
+Return the proper error (M_BAD_ALIAS) when a non-existant canonical alias is provided.
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index b743fc2dcc..522271eed1 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -851,6 +851,38 @@ class EventCreationHandler(object):
                     self.store.remove_push_actions_from_staging, event.event_id
                 )
 
+    @defer.inlineCallbacks
+    def _validate_canonical_alias(
+        self, directory_handler, room_alias_str, expected_room_id
+    ):
+        """
+        Ensure that the given room alias points to the expected room ID.
+
+        Args:
+            directory_handler: The directory handler object.
+            room_alias_str: The room alias to check.
+            expected_room_id: The room ID that the alias should point to.
+        """
+        room_alias = RoomAlias.from_string(room_alias_str)
+        try:
+            mapping = yield directory_handler.get_association(room_alias)
+        except SynapseError as e:
+            # Turn M_NOT_FOUND errors into M_BAD_ALIAS errors.
+            if e.errcode == Codes.NOT_FOUND:
+                raise SynapseError(
+                    400,
+                    "Room alias %s does not point to the room" % (room_alias_str,),
+                    Codes.BAD_ALIAS,
+                )
+            raise
+
+        if mapping["room_id"] != expected_room_id:
+            raise SynapseError(
+                400,
+                "Room alias %s does not point to the room" % (room_alias_str,),
+                Codes.BAD_ALIAS,
+            )
+
     @defer.inlineCallbacks
     def persist_and_notify_client_event(
         self, requester, event, context, ratelimit=True, extra_users=[]
@@ -905,15 +937,9 @@ class EventCreationHandler(object):
             room_alias_str = event.content.get("alias", None)
             directory_handler = self.hs.get_handlers().directory_handler
             if room_alias_str and room_alias_str != original_alias:
-                room_alias = RoomAlias.from_string(room_alias_str)
-                mapping = yield directory_handler.get_association(room_alias)
-
-                if mapping["room_id"] != event.room_id:
-                    raise SynapseError(
-                        400,
-                        "Room alias %s does not point to the room" % (room_alias_str,),
-                        Codes.BAD_ALIAS,
-                    )
+                yield self._validate_canonical_alias(
+                    directory_handler, room_alias_str, event.room_id
+                )
 
             # Check that alt_aliases is the proper form.
             alt_aliases = event.content.get("alt_aliases", [])
@@ -931,16 +957,9 @@ class EventCreationHandler(object):
             new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
             if new_alt_aliases:
                 for alias_str in new_alt_aliases:
-                    room_alias = RoomAlias.from_string(alias_str)
-                    mapping = yield directory_handler.get_association(room_alias)
-
-                    if mapping["room_id"] != event.room_id:
-                        raise SynapseError(
-                            400,
-                            "Room alias %s does not point to the room"
-                            % (room_alias_str,),
-                            Codes.BAD_ALIAS,
-                        )
+                    yield self._validate_canonical_alias(
+                        directory_handler, alias_str, event.room_id
+                    )
 
         federation_handler = self.hs.get_handlers().federation_handler
 
-- 
cgit 1.4.1


From d6828c129ffa5bbdd8bd0ed620772f77be45c006 Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Tue, 24 Mar 2020 10:36:44 +0000
Subject: Newsfile

---
 changelog.d/7133.bugfix | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7133.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix
new file mode 100644
index 0000000000..61a86fd34e
--- /dev/null
+++ b/changelog.d/7133.bugfix
@@ -0,0 +1 @@
+Fix starting workers when federation sending not split out.
-- 
cgit 1.4.1


From 1fcf9c6f95fcfcacab95bb78849d79b8c7fa22e9 Mon Sep 17 00:00:00 2001
From: Naugrimm 
Date: Tue, 24 Mar 2020 12:59:04 +0100
Subject: Fix CAS redirect url (#6634)

Build the same service URL when requesting the CAS ticket and when calling the proxyValidate URL.
---
 changelog.d/6634.bugfix         |  1 +
 synapse/rest/client/v1/login.py | 27 ++++++++++++++++-----------
 2 files changed, 17 insertions(+), 11 deletions(-)
 create mode 100644 changelog.d/6634.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6634.bugfix b/changelog.d/6634.bugfix
new file mode 100644
index 0000000000..ec48fdc0a0
--- /dev/null
+++ b/changelog.d/6634.bugfix
@@ -0,0 +1 @@
+Fix single-sign on with CAS systems: pass the same service URL when requesting the CAS ticket and when calling the `proxyValidate` URL. Contributed by @Naugrimm.
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 31551524f8..56d713462a 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -72,6 +72,14 @@ def login_id_thirdparty_from_phone(identifier):
     return {"type": "m.id.thirdparty", "medium": "msisdn", "address": msisdn}
 
 
+def build_service_param(cas_service_url, client_redirect_url):
+    return "%s%s?redirectUrl=%s" % (
+        cas_service_url,
+        "/_matrix/client/r0/login/cas/ticket",
+        urllib.parse.quote(client_redirect_url, safe=""),
+    )
+
+
 class LoginRestServlet(RestServlet):
     PATTERNS = client_patterns("/login$", v1=True)
     CAS_TYPE = "m.login.cas"
@@ -427,18 +435,15 @@ class BaseSSORedirectServlet(RestServlet):
 class CasRedirectServlet(BaseSSORedirectServlet):
     def __init__(self, hs):
         super(CasRedirectServlet, self).__init__()
-        self.cas_server_url = hs.config.cas_server_url.encode("ascii")
-        self.cas_service_url = hs.config.cas_service_url.encode("ascii")
+        self.cas_server_url = hs.config.cas_server_url
+        self.cas_service_url = hs.config.cas_service_url
 
     def get_sso_url(self, client_redirect_url):
-        client_redirect_url_param = urllib.parse.urlencode(
-            {b"redirectUrl": client_redirect_url}
-        ).encode("ascii")
-        hs_redirect_url = self.cas_service_url + b"/_matrix/client/r0/login/cas/ticket"
-        service_param = urllib.parse.urlencode(
-            {b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)}
-        ).encode("ascii")
-        return b"%s/login?%s" % (self.cas_server_url, service_param)
+        args = urllib.parse.urlencode(
+            {"service": build_service_param(self.cas_service_url, client_redirect_url)}
+        )
+
+        return "%s/login?%s" % (self.cas_server_url, args)
 
 
 class CasTicketServlet(RestServlet):
@@ -458,7 +463,7 @@ class CasTicketServlet(RestServlet):
         uri = self.cas_server_url + "/proxyValidate"
         args = {
             "ticket": parse_string(request, "ticket", required=True),
-            "service": self.cas_service_url,
+            "service": build_service_param(self.cas_service_url, client_redirect_url),
         }
         try:
             body = await self._http_client.get_raw(uri, args)
-- 
cgit 1.4.1


From 39230d217104f3cd7aba9065dc478f935ce1e614 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
Date: Tue, 24 Mar 2020 14:45:33 +0000
Subject: Clean up some LoggingContext stuff (#7120)

* Pull Sentinel out of LoggingContext

... and drop a few unnecessary references to it

* Factor out LoggingContext.current_context

move `current_context` and `set_context` out to top-level functions.

Mostly this means that I can more easily trace what's actually referring to
LoggingContext, but I think it's generally neater.

* move copy-to-parent into `stop`

this really just makes `start` and `stop` more symetric. It also means that it
behaves correctly if you manually `set_log_context` rather than using the
context manager.

* Replace `LoggingContext.alive` with `finished`

Turn `alive` into `finished` and make it a bit better defined.
---
 changelog.d/7120.misc                              |   1 +
 docs/log_contexts.md                               |   5 +-
 synapse/crypto/keyring.py                          |   4 +-
 synapse/federation/federation_base.py              |   4 +-
 synapse/handlers/sync.py                           |   4 +-
 synapse/http/request_metrics.py                    |   6 +-
 synapse/logging/_structured.py                     |   4 +-
 synapse/logging/context.py                         | 234 +++++++++++----------
 synapse/logging/scopecontextmanager.py             |  13 +-
 synapse/storage/data_stores/main/events_worker.py  |   4 +-
 synapse/storage/database.py                        |  11 +-
 synapse/util/metrics.py                            |   4 +-
 synapse/util/patch_inline_callbacks.py             |  36 ++--
 tests/crypto/test_keyring.py                       |   7 +-
 .../federation/test_matrix_federation_agent.py     |   6 +-
 tests/http/federation/test_srv_resolver.py         |   6 +-
 tests/http/test_fedclient.py                       |   6 +-
 tests/rest/client/test_transactions.py             |  16 +-
 tests/unittest.py                                  |  12 +-
 tests/util/caches/test_descriptors.py              |  22 +-
 tests/util/test_async_utils.py                     |  15 +-
 tests/util/test_linearizer.py                      |   6 +-
 tests/util/test_logcontext.py                      |  22 +-
 tests/utils.py                                     |   6 +-
 24 files changed, 232 insertions(+), 222 deletions(-)
 create mode 100644 changelog.d/7120.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7120.misc b/changelog.d/7120.misc
new file mode 100644
index 0000000000..731f4dcb52
--- /dev/null
+++ b/changelog.d/7120.misc
@@ -0,0 +1 @@
+Clean up some LoggingContext code.
diff --git a/docs/log_contexts.md b/docs/log_contexts.md
index 5331e8c88b..fe30ca2791 100644
--- a/docs/log_contexts.md
+++ b/docs/log_contexts.md
@@ -29,14 +29,13 @@ from synapse.logging import context         # omitted from future snippets
 def handle_request(request_id):
     request_context = context.LoggingContext()
 
-    calling_context = context.LoggingContext.current_context()
-    context.LoggingContext.set_current_context(request_context)
+    calling_context = context.set_current_context(request_context)
     try:
         request_context.request = request_id
         do_request_handling()
         logger.debug("finished")
     finally:
-        context.LoggingContext.set_current_context(calling_context)
+        context.set_current_context(calling_context)
 
 def do_request_handling():
     logger.debug("phew")  # this will be logged against request_id
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 983f0ead8c..a9f4025bfe 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -43,8 +43,8 @@ from synapse.api.errors import (
     SynapseError,
 )
 from synapse.logging.context import (
-    LoggingContext,
     PreserveLoggingContext,
+    current_context,
     make_deferred_yieldable,
     preserve_fn,
     run_in_background,
@@ -236,7 +236,7 @@ class Keyring(object):
         """
 
         try:
-            ctx = LoggingContext.current_context()
+            ctx = current_context()
 
             # map from server name to a set of outstanding request ids
             server_to_request_ids = {}
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index b0b0eba41e..4b115aac04 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -32,8 +32,8 @@ from synapse.events import EventBase, make_event_from_dict
 from synapse.events.utils import prune_event
 from synapse.http.servlet import assert_params_in_dict
 from synapse.logging.context import (
-    LoggingContext,
     PreserveLoggingContext,
+    current_context,
     make_deferred_yieldable,
 )
 from synapse.types import JsonDict, get_domain_from_id
@@ -78,7 +78,7 @@ class FederationBase(object):
         """
         deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
 
-        ctx = LoggingContext.current_context()
+        ctx = current_context()
 
         def callback(_, pdu: EventBase):
             with PreserveLoggingContext(ctx):
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 669dbc8a48..5746fdea14 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -26,7 +26,7 @@ from prometheus_client import Counter
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.filtering import FilterCollection
 from synapse.events import EventBase
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import current_context
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.storage.roommember import MemberSummary
 from synapse.storage.state import StateFilter
@@ -301,7 +301,7 @@ class SyncHandler(object):
         else:
             sync_type = "incremental_sync"
 
-        context = LoggingContext.current_context()
+        context = current_context()
         if context:
             context.tag = sync_type
 
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index 58f9cc61c8..b58ae3d9db 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -19,7 +19,7 @@ import threading
 
 from prometheus_client.core import Counter, Histogram
 
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import current_context
 from synapse.metrics import LaterGauge
 
 logger = logging.getLogger(__name__)
@@ -148,7 +148,7 @@ LaterGauge(
 class RequestMetrics(object):
     def start(self, time_sec, name, method):
         self.start = time_sec
-        self.start_context = LoggingContext.current_context()
+        self.start_context = current_context()
         self.name = name
         self.method = method
 
@@ -163,7 +163,7 @@ class RequestMetrics(object):
         with _in_flight_requests_lock:
             _in_flight_requests.discard(self)
 
-        context = LoggingContext.current_context()
+        context = current_context()
 
         tag = ""
         if context:
diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
index ffa7b20ca8..7372450b45 100644
--- a/synapse/logging/_structured.py
+++ b/synapse/logging/_structured.py
@@ -42,7 +42,7 @@ from synapse.logging._terse_json import (
     TerseJSONToConsoleLogObserver,
     TerseJSONToTCPLogObserver,
 )
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import current_context
 
 
 def stdlib_log_level_to_twisted(level: str) -> LogLevel:
@@ -86,7 +86,7 @@ class LogContextObserver(object):
             ].startswith("Timing out client"):
                 return
 
-        context = LoggingContext.current_context()
+        context = current_context()
 
         # Copy the context information to the log event.
         if context is not None:
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 860b99a4c6..a8eafb1c7c 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -175,7 +175,54 @@ class ContextResourceUsage(object):
         return res
 
 
-LoggingContextOrSentinel = Union["LoggingContext", "LoggingContext.Sentinel"]
+LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
+
+
+class _Sentinel(object):
+    """Sentinel to represent the root context"""
+
+    __slots__ = ["previous_context", "finished", "request", "scope", "tag"]
+
+    def __init__(self) -> None:
+        # Minimal set for compatibility with LoggingContext
+        self.previous_context = None
+        self.finished = False
+        self.request = None
+        self.scope = None
+        self.tag = None
+
+    def __str__(self):
+        return "sentinel"
+
+    def copy_to(self, record):
+        pass
+
+    def copy_to_twisted_log_entry(self, record):
+        record["request"] = None
+        record["scope"] = None
+
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+    def add_database_transaction(self, duration_sec):
+        pass
+
+    def add_database_scheduled(self, sched_sec):
+        pass
+
+    def record_event_fetch(self, event_count):
+        pass
+
+    def __nonzero__(self):
+        return False
+
+    __bool__ = __nonzero__  # python3
+
+
+SENTINEL_CONTEXT = _Sentinel()
 
 
 class LoggingContext(object):
@@ -199,76 +246,33 @@ class LoggingContext(object):
         "_resource_usage",
         "usage_start",
         "main_thread",
-        "alive",
+        "finished",
         "request",
         "tag",
         "scope",
     ]
 
-    thread_local = threading.local()
-
-    class Sentinel(object):
-        """Sentinel to represent the root context"""
-
-        __slots__ = ["previous_context", "alive", "request", "scope", "tag"]
-
-        def __init__(self) -> None:
-            # Minimal set for compatibility with LoggingContext
-            self.previous_context = None
-            self.alive = None
-            self.request = None
-            self.scope = None
-            self.tag = None
-
-        def __str__(self):
-            return "sentinel"
-
-        def copy_to(self, record):
-            pass
-
-        def copy_to_twisted_log_entry(self, record):
-            record["request"] = None
-            record["scope"] = None
-
-        def start(self):
-            pass
-
-        def stop(self):
-            pass
-
-        def add_database_transaction(self, duration_sec):
-            pass
-
-        def add_database_scheduled(self, sched_sec):
-            pass
-
-        def record_event_fetch(self, event_count):
-            pass
-
-        def __nonzero__(self):
-            return False
-
-        __bool__ = __nonzero__  # python3
-
-    sentinel = Sentinel()
-
     def __init__(self, name=None, parent_context=None, request=None) -> None:
-        self.previous_context = LoggingContext.current_context()
+        self.previous_context = current_context()
         self.name = name
 
         # track the resources used by this context so far
         self._resource_usage = ContextResourceUsage()
 
-        # If alive has the thread resource usage when the logcontext last
-        # became active.
+        # The thread resource usage when the logcontext became active. None
+        # if the context is not currently active.
         self.usage_start = None
 
         self.main_thread = get_thread_id()
         self.request = None
         self.tag = ""
-        self.alive = True
         self.scope = None  # type: Optional[_LogContextScope]
 
+        # keep track of whether we have hit the __exit__ block for this context
+        # (suggesting that the the thing that created the context thinks it should
+        # be finished, and that re-activating it would suggest an error).
+        self.finished = False
+
         self.parent_context = parent_context
 
         if self.parent_context is not None:
@@ -283,44 +287,15 @@ class LoggingContext(object):
             return str(self.request)
         return "%s@%x" % (self.name, id(self))
 
-    @classmethod
-    def current_context(cls) -> LoggingContextOrSentinel:
-        """Get the current logging context from thread local storage
-
-        Returns:
-            LoggingContext: the current logging context
-        """
-        return getattr(cls.thread_local, "current_context", cls.sentinel)
-
-    @classmethod
-    def set_current_context(
-        cls, context: LoggingContextOrSentinel
-    ) -> LoggingContextOrSentinel:
-        """Set the current logging context in thread local storage
-        Args:
-            context(LoggingContext): The context to activate.
-        Returns:
-            The context that was previously active
-        """
-        current = cls.current_context()
-
-        if current is not context:
-            current.stop()
-            cls.thread_local.current_context = context
-            context.start()
-        return current
-
     def __enter__(self) -> "LoggingContext":
         """Enters this logging context into thread local storage"""
-        old_context = self.set_current_context(self)
+        old_context = set_current_context(self)
         if self.previous_context != old_context:
             logger.warning(
                 "Expected previous context %r, found %r",
                 self.previous_context,
                 old_context,
             )
-        self.alive = True
-
         return self
 
     def __exit__(self, type, value, traceback) -> None:
@@ -329,24 +304,19 @@ class LoggingContext(object):
         Returns:
             None to avoid suppressing any exceptions that were thrown.
         """
-        current = self.set_current_context(self.previous_context)
+        current = set_current_context(self.previous_context)
         if current is not self:
-            if current is self.sentinel:
+            if current is SENTINEL_CONTEXT:
                 logger.warning("Expected logging context %s was lost", self)
             else:
                 logger.warning(
                     "Expected logging context %s but found %s", self, current
                 )
-        self.alive = False
-
-        # if we have a parent, pass our CPU usage stats on
-        if self.parent_context is not None and hasattr(
-            self.parent_context, "_resource_usage"
-        ):
-            self.parent_context._resource_usage += self._resource_usage
 
-            # reset them in case we get entered again
-            self._resource_usage.reset()
+        # the fact that we are here suggests that the caller thinks that everything
+        # is done and dusted for this logcontext, and further activity will not get
+        # recorded against the correct metrics.
+        self.finished = True
 
     def copy_to(self, record) -> None:
         """Copy logging fields from this context to a log record or
@@ -371,9 +341,14 @@ class LoggingContext(object):
             logger.warning("Started logcontext %s on different thread", self)
             return
 
+        if self.finished:
+            logger.warning("Re-starting finished log context %s", self)
+
         # If we haven't already started record the thread resource usage so
         # far
-        if not self.usage_start:
+        if self.usage_start:
+            logger.warning("Re-starting already-active log context %s", self)
+        else:
             self.usage_start = get_thread_resource_usage()
 
     def stop(self) -> None:
@@ -396,6 +371,15 @@ class LoggingContext(object):
 
         self.usage_start = None
 
+        # if we have a parent, pass our CPU usage stats on
+        if self.parent_context is not None and hasattr(
+            self.parent_context, "_resource_usage"
+        ):
+            self.parent_context._resource_usage += self._resource_usage
+
+            # reset them in case we get entered again
+            self._resource_usage.reset()
+
     def get_resource_usage(self) -> ContextResourceUsage:
         """Get resources used by this logcontext so far.
 
@@ -409,7 +393,7 @@ class LoggingContext(object):
         # If we are on the correct thread and we're currently running then we
         # can include resource usage so far.
         is_main_thread = get_thread_id() == self.main_thread
-        if self.alive and self.usage_start and is_main_thread:
+        if self.usage_start and is_main_thread:
             utime_delta, stime_delta = self._get_cputime()
             res.ru_utime += utime_delta
             res.ru_stime += stime_delta
@@ -492,7 +476,7 @@ class LoggingContextFilter(logging.Filter):
         Returns:
             True to include the record in the log output.
         """
-        context = LoggingContext.current_context()
+        context = current_context()
         for key, value in self.defaults.items():
             setattr(record, key, value)
 
@@ -512,27 +496,24 @@ class PreserveLoggingContext(object):
 
     __slots__ = ["current_context", "new_context", "has_parent"]
 
-    def __init__(self, new_context: Optional[LoggingContextOrSentinel] = None) -> None:
-        if new_context is None:
-            self.new_context = LoggingContext.sentinel  # type: LoggingContextOrSentinel
-        else:
-            self.new_context = new_context
+    def __init__(
+        self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT
+    ) -> None:
+        self.new_context = new_context
 
     def __enter__(self) -> None:
         """Captures the current logging context"""
-        self.current_context = LoggingContext.set_current_context(self.new_context)
+        self.current_context = set_current_context(self.new_context)
 
         if self.current_context:
             self.has_parent = self.current_context.previous_context is not None
-            if not self.current_context.alive:
-                logger.debug("Entering dead context: %s", self.current_context)
 
     def __exit__(self, type, value, traceback) -> None:
         """Restores the current logging context"""
-        context = LoggingContext.set_current_context(self.current_context)
+        context = set_current_context(self.current_context)
 
         if context != self.new_context:
-            if context is LoggingContext.sentinel:
+            if not context:
                 logger.warning("Expected logging context %s was lost", self.new_context)
             else:
                 logger.warning(
@@ -541,9 +522,30 @@ class PreserveLoggingContext(object):
                     context,
                 )
 
-        if self.current_context is not LoggingContext.sentinel:
-            if not self.current_context.alive:
-                logger.debug("Restoring dead context: %s", self.current_context)
+
+_thread_local = threading.local()
+_thread_local.current_context = SENTINEL_CONTEXT
+
+
+def current_context() -> LoggingContextOrSentinel:
+    """Get the current logging context from thread local storage"""
+    return getattr(_thread_local, "current_context", SENTINEL_CONTEXT)
+
+
+def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
+    """Set the current logging context in thread local storage
+    Args:
+        context(LoggingContext): The context to activate.
+    Returns:
+        The context that was previously active
+    """
+    current = current_context()
+
+    if current is not context:
+        current.stop()
+        _thread_local.current_context = context
+        context.start()
+    return current
 
 
 def nested_logging_context(
@@ -572,7 +574,7 @@ def nested_logging_context(
     if parent_context is not None:
         context = parent_context  # type: LoggingContextOrSentinel
     else:
-        context = LoggingContext.current_context()
+        context = current_context()
     return LoggingContext(
         parent_context=context, request=str(context.request) + "-" + suffix
     )
@@ -604,7 +606,7 @@ def run_in_background(f, *args, **kwargs):
     CRITICAL error about an unhandled error will be logged without much
     indication about where it came from.
     """
-    current = LoggingContext.current_context()
+    current = current_context()
     try:
         res = f(*args, **kwargs)
     except:  # noqa: E722
@@ -625,7 +627,7 @@ def run_in_background(f, *args, **kwargs):
 
     # The function may have reset the context before returning, so
     # we need to restore it now.
-    ctx = LoggingContext.set_current_context(current)
+    ctx = set_current_context(current)
 
     # The original context will be restored when the deferred
     # completes, but there is nothing waiting for it, so it will
@@ -674,7 +676,7 @@ def make_deferred_yieldable(deferred):
 
     # ok, we can't be sure that a yield won't block, so let's reset the
     # logcontext, and add a callback to the deferred to restore it.
-    prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
+    prev_context = set_current_context(SENTINEL_CONTEXT)
     deferred.addBoth(_set_context_cb, prev_context)
     return deferred
 
@@ -684,7 +686,7 @@ ResultT = TypeVar("ResultT")
 
 def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
     """A callback function which just sets the logging context"""
-    LoggingContext.set_current_context(context)
+    set_current_context(context)
     return result
 
 
@@ -752,7 +754,7 @@ def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
         Deferred: A Deferred which fires a callback with the result of `f`, or an
             errback if `f` throws an exception.
     """
-    logcontext = LoggingContext.current_context()
+    logcontext = current_context()
 
     def g():
         with LoggingContext(parent_context=logcontext):
diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py
index 4eed4f2338..dc3ab00cbb 100644
--- a/synapse/logging/scopecontextmanager.py
+++ b/synapse/logging/scopecontextmanager.py
@@ -19,7 +19,7 @@ from opentracing import Scope, ScopeManager
 
 import twisted
 
-from synapse.logging.context import LoggingContext, nested_logging_context
+from synapse.logging.context import current_context, nested_logging_context
 
 logger = logging.getLogger(__name__)
 
@@ -49,11 +49,8 @@ class LogContextScopeManager(ScopeManager):
             (Scope) : the Scope that is active, or None if not
             available.
         """
-        ctx = LoggingContext.current_context()
-        if ctx is LoggingContext.sentinel:
-            return None
-        else:
-            return ctx.scope
+        ctx = current_context()
+        return ctx.scope
 
     def activate(self, span, finish_on_close):
         """
@@ -70,9 +67,9 @@ class LogContextScopeManager(ScopeManager):
         """
 
         enter_logcontext = False
-        ctx = LoggingContext.current_context()
+        ctx = current_context()
 
-        if ctx is LoggingContext.sentinel:
+        if not ctx:
             # We don't want this scope to affect.
             logger.error("Tried to activate scope outside of loggingcontext")
             return Scope(None, span)
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index ca237c6f12..3013f49d32 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -35,7 +35,7 @@ from synapse.api.room_versions import (
 )
 from synapse.events import make_event_from_dict
 from synapse.events.utils import prune_event
-from synapse.logging.context import LoggingContext, PreserveLoggingContext
+from synapse.logging.context import PreserveLoggingContext, current_context
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.database import Database
@@ -409,7 +409,7 @@ class EventsWorkerStore(SQLBaseStore):
         missing_events_ids = [e for e in event_ids if e not in event_entry_map]
 
         if missing_events_ids:
-            log_ctx = LoggingContext.current_context()
+            log_ctx = current_context()
             log_ctx.record_event_fetch(len(missing_events_ids))
 
             # Note that _get_events_from_db is also responsible for turning db rows
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index e61595336c..715c0346dd 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -32,6 +32,7 @@ from synapse.config.database import DatabaseConnectionConfig
 from synapse.logging.context import (
     LoggingContext,
     LoggingContextOrSentinel,
+    current_context,
     make_deferred_yieldable,
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -483,7 +484,7 @@ class Database(object):
             end = monotonic_time()
             duration = end - start
 
-            LoggingContext.current_context().add_database_transaction(duration)
+            current_context().add_database_transaction(duration)
 
             transaction_logger.debug("[TXN END] {%s} %f sec", name, duration)
 
@@ -510,7 +511,7 @@ class Database(object):
         after_callbacks = []  # type: List[_CallbackListEntry]
         exception_callbacks = []  # type: List[_CallbackListEntry]
 
-        if LoggingContext.current_context() == LoggingContext.sentinel:
+        if not current_context():
             logger.warning("Starting db txn '%s' from sentinel context", desc)
 
         try:
@@ -547,10 +548,8 @@ class Database(object):
         Returns:
             Deferred: The result of func
         """
-        parent_context = (
-            LoggingContext.current_context()
-        )  # type: Optional[LoggingContextOrSentinel]
-        if parent_context == LoggingContext.sentinel:
+        parent_context = current_context()  # type: Optional[LoggingContextOrSentinel]
+        if not parent_context:
             logger.warning(
                 "Starting db connection from sentinel context: metrics will be lost"
             )
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 7b18455469..ec61e14423 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -21,7 +21,7 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import LoggingContext, current_context
 from synapse.metrics import InFlightGauge
 
 logger = logging.getLogger(__name__)
@@ -106,7 +106,7 @@ class Measure(object):
             raise RuntimeError("Measure() objects cannot be re-used")
 
         self.start = self.clock.time()
-        parent_context = LoggingContext.current_context()
+        parent_context = current_context()
         self._logging_context = LoggingContext(
             "Measure[%s]" % (self.name,), parent_context
         )
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 3925927f9f..fdff195771 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -32,7 +32,7 @@ def do_patch():
     Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
     """
 
-    from synapse.logging.context import LoggingContext
+    from synapse.logging.context import current_context
 
     global _already_patched
 
@@ -43,35 +43,35 @@ def do_patch():
     def new_inline_callbacks(f):
         @functools.wraps(f)
         def wrapped(*args, **kwargs):
-            start_context = LoggingContext.current_context()
+            start_context = current_context()
             changes = []  # type: List[str]
             orig = orig_inline_callbacks(_check_yield_points(f, changes))
 
             try:
                 res = orig(*args, **kwargs)
             except Exception:
-                if LoggingContext.current_context() != start_context:
+                if current_context() != start_context:
                     for err in changes:
                         print(err, file=sys.stderr)
 
                     err = "%s changed context from %s to %s on exception" % (
                         f,
                         start_context,
-                        LoggingContext.current_context(),
+                        current_context(),
                     )
                     print(err, file=sys.stderr)
                     raise Exception(err)
                 raise
 
             if not isinstance(res, Deferred) or res.called:
-                if LoggingContext.current_context() != start_context:
+                if current_context() != start_context:
                     for err in changes:
                         print(err, file=sys.stderr)
 
                     err = "Completed %s changed context from %s to %s" % (
                         f,
                         start_context,
-                        LoggingContext.current_context(),
+                        current_context(),
                     )
                     # print the error to stderr because otherwise all we
                     # see in travis-ci is the 500 error
@@ -79,23 +79,23 @@ def do_patch():
                     raise Exception(err)
                 return res
 
-            if LoggingContext.current_context() != LoggingContext.sentinel:
+            if current_context():
                 err = (
                     "%s returned incomplete deferred in non-sentinel context "
                     "%s (start was %s)"
-                ) % (f, LoggingContext.current_context(), start_context)
+                ) % (f, current_context(), start_context)
                 print(err, file=sys.stderr)
                 raise Exception(err)
 
             def check_ctx(r):
-                if LoggingContext.current_context() != start_context:
+                if current_context() != start_context:
                     for err in changes:
                         print(err, file=sys.stderr)
                     err = "%s completion of %s changed context from %s to %s" % (
                         "Failure" if isinstance(r, Failure) else "Success",
                         f,
                         start_context,
-                        LoggingContext.current_context(),
+                        current_context(),
                     )
                     print(err, file=sys.stderr)
                     raise Exception(err)
@@ -127,7 +127,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
         function
     """
 
-    from synapse.logging.context import LoggingContext
+    from synapse.logging.context import current_context
 
     @functools.wraps(f)
     def check_yield_points_inner(*args, **kwargs):
@@ -136,7 +136,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
         last_yield_line_no = gen.gi_frame.f_lineno
         result = None  # type: Any
         while True:
-            expected_context = LoggingContext.current_context()
+            expected_context = current_context()
 
             try:
                 isFailure = isinstance(result, Failure)
@@ -145,7 +145,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
                 else:
                     d = gen.send(result)
             except (StopIteration, defer._DefGen_Return) as e:
-                if LoggingContext.current_context() != expected_context:
+                if current_context() != expected_context:
                     # This happens when the context is lost sometime *after* the
                     # final yield and returning. E.g. we forgot to yield on a
                     # function that returns a deferred.
@@ -159,7 +159,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
                         % (
                             f.__qualname__,
                             expected_context,
-                            LoggingContext.current_context(),
+                            current_context(),
                             f.__code__.co_filename,
                             last_yield_line_no,
                         )
@@ -173,13 +173,13 @@ def _check_yield_points(f: Callable, changes: List[str]):
                 # This happens if we yield on a deferred that doesn't follow
                 # the log context rules without wrapping in a `make_deferred_yieldable`.
                 # We raise here as this should never happen.
-                if LoggingContext.current_context() is not LoggingContext.sentinel:
+                if current_context():
                     err = (
                         "%s yielded with context %s rather than sentinel,"
                         " yielded on line %d in %s"
                         % (
                             frame.f_code.co_name,
-                            LoggingContext.current_context(),
+                            current_context(),
                             frame.f_lineno,
                             frame.f_code.co_filename,
                         )
@@ -191,7 +191,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
             except Exception as e:
                 result = Failure(e)
 
-            if LoggingContext.current_context() != expected_context:
+            if current_context() != expected_context:
 
                 # This happens because the context is lost sometime *after* the
                 # previous yield and *after* the current yield. E.g. the
@@ -206,7 +206,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
                     % (
                         frame.f_code.co_name,
                         expected_context,
-                        LoggingContext.current_context(),
+                        current_context(),
                         last_yield_line_no,
                         frame.f_lineno,
                         frame.f_code.co_filename,
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 34d5895f18..70c8e72303 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -34,6 +34,7 @@ from synapse.crypto.keyring import (
 from synapse.logging.context import (
     LoggingContext,
     PreserveLoggingContext,
+    current_context,
     make_deferred_yieldable,
 )
 from synapse.storage.keys import FetchKeyResult
@@ -83,9 +84,7 @@ class KeyringTestCase(unittest.HomeserverTestCase):
         )
 
     def check_context(self, _, expected):
-        self.assertEquals(
-            getattr(LoggingContext.current_context(), "request", None), expected
-        )
+        self.assertEquals(getattr(current_context(), "request", None), expected)
 
     def test_verify_json_objects_for_server_awaits_previous_requests(self):
         key1 = signedjson.key.generate_signing_key(1)
@@ -105,7 +104,7 @@ class KeyringTestCase(unittest.HomeserverTestCase):
 
         @defer.inlineCallbacks
         def get_perspectives(**kwargs):
-            self.assertEquals(LoggingContext.current_context().request, "11")
+            self.assertEquals(current_context().request, "11")
             with PreserveLoggingContext():
                 yield persp_deferred
             return persp_resp
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index fdc1d918ff..562397cdda 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -38,7 +38,7 @@ from synapse.http.federation.well_known_resolver import (
     WellKnownResolver,
     _cache_period_from_headers,
 )
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
 from synapse.util.caches.ttlcache import TTLCache
 
 from tests import unittest
@@ -155,7 +155,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
             self.assertNoResult(fetch_d)
 
             # should have reset logcontext to the sentinel
-            _check_logcontext(LoggingContext.sentinel)
+            _check_logcontext(SENTINEL_CONTEXT)
 
             try:
                 fetch_res = yield fetch_d
@@ -1197,7 +1197,7 @@ class TestCachePeriodFromHeaders(unittest.TestCase):
 
 
 def _check_logcontext(context):
-    current = LoggingContext.current_context()
+    current = current_context()
     if current is not context:
         raise AssertionError("Expected logcontext %s but was %s" % (context, current))
 
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index df034ab237..babc201643 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -22,7 +22,7 @@ from twisted.internet.error import ConnectError
 from twisted.names import dns, error
 
 from synapse.http.federation.srv_resolver import SrvResolver
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
 
 from tests import unittest
 from tests.utils import MockClock
@@ -54,12 +54,12 @@ class SrvResolverTestCase(unittest.TestCase):
                 self.assertNoResult(resolve_d)
 
                 # should have reset to the sentinel context
-                self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+                self.assertIs(current_context(), SENTINEL_CONTEXT)
 
                 result = yield resolve_d
 
                 # should have restored our context
-                self.assertIs(LoggingContext.current_context(), ctx)
+                self.assertIs(current_context(), ctx)
 
                 return result
 
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index 2b01f40a42..fff4f0cbf4 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -29,14 +29,14 @@ from synapse.http.matrixfederationclient import (
     MatrixFederationHttpClient,
     MatrixFederationRequest,
 )
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
 
 from tests.server import FakeTransport
 from tests.unittest import HomeserverTestCase
 
 
 def check_logcontext(context):
-    current = LoggingContext.current_context()
+    current = current_context()
     if current is not context:
         raise AssertionError("Expected logcontext %s but was %s" % (context, current))
 
@@ -64,7 +64,7 @@ class FederationClientTests(HomeserverTestCase):
                 self.assertNoResult(fetch_d)
 
                 # should have reset logcontext to the sentinel
-                check_logcontext(LoggingContext.sentinel)
+                check_logcontext(SENTINEL_CONTEXT)
 
                 try:
                     fetch_res = yield fetch_d
diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py
index a3d7e3c046..171632e195 100644
--- a/tests/rest/client/test_transactions.py
+++ b/tests/rest/client/test_transactions.py
@@ -2,7 +2,7 @@ from mock import Mock, call
 
 from twisted.internet import defer, reactor
 
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
 from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
 from synapse.util import Clock
 
@@ -52,14 +52,14 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
         def test():
             with LoggingContext("c") as c1:
                 res = yield self.cache.fetch_or_execute(self.mock_key, cb)
-                self.assertIs(LoggingContext.current_context(), c1)
+                self.assertIs(current_context(), c1)
                 self.assertEqual(res, "yay")
 
         # run the test twice in parallel
         d = defer.gatherResults([test(), test()])
-        self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+        self.assertIs(current_context(), SENTINEL_CONTEXT)
         yield d
-        self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+        self.assertIs(current_context(), SENTINEL_CONTEXT)
 
     @defer.inlineCallbacks
     def test_does_not_cache_exceptions(self):
@@ -81,11 +81,11 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
                 yield self.cache.fetch_or_execute(self.mock_key, cb)
             except Exception as e:
                 self.assertEqual(e.args[0], "boo")
-            self.assertIs(LoggingContext.current_context(), test_context)
+            self.assertIs(current_context(), test_context)
 
             res = yield self.cache.fetch_or_execute(self.mock_key, cb)
             self.assertEqual(res, self.mock_http_response)
-            self.assertIs(LoggingContext.current_context(), test_context)
+            self.assertIs(current_context(), test_context)
 
     @defer.inlineCallbacks
     def test_does_not_cache_failures(self):
@@ -107,11 +107,11 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
                 yield self.cache.fetch_or_execute(self.mock_key, cb)
             except Exception as e:
                 self.assertEqual(e.args[0], "boo")
-            self.assertIs(LoggingContext.current_context(), test_context)
+            self.assertIs(current_context(), test_context)
 
             res = yield self.cache.fetch_or_execute(self.mock_key, cb)
             self.assertEqual(res, self.mock_http_response)
-            self.assertIs(LoggingContext.current_context(), test_context)
+            self.assertIs(current_context(), test_context)
 
     @defer.inlineCallbacks
     def test_cleans_up(self):
diff --git a/tests/unittest.py b/tests/unittest.py
index 8816a4d152..439174dbfc 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -38,7 +38,11 @@ from synapse.config.ratelimiting import FederationRateLimitConfig
 from synapse.federation.transport import server as federation_server
 from synapse.http.server import JsonResource
 from synapse.http.site import SynapseRequest, SynapseSite
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import (
+    SENTINEL_CONTEXT,
+    current_context,
+    set_current_context,
+)
 from synapse.server import HomeServer
 from synapse.types import Requester, UserID, create_requester
 from synapse.util.ratelimitutils import FederationRateLimiter
@@ -97,10 +101,10 @@ class TestCase(unittest.TestCase):
         def setUp(orig):
             # if we're not starting in the sentinel logcontext, then to be honest
             # all future bets are off.
-            if LoggingContext.current_context() is not LoggingContext.sentinel:
+            if current_context():
                 self.fail(
                     "Test starting with non-sentinel logging context %s"
-                    % (LoggingContext.current_context(),)
+                    % (current_context(),)
                 )
 
             old_level = logging.getLogger().level
@@ -122,7 +126,7 @@ class TestCase(unittest.TestCase):
             # force a GC to workaround problems with deferreds leaking logcontexts when
             # they are GCed (see the logcontext docs)
             gc.collect()
-            LoggingContext.set_current_context(LoggingContext.sentinel)
+            set_current_context(SENTINEL_CONTEXT)
 
             return ret
 
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 39e360fe24..4d2b9e0d64 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -22,8 +22,10 @@ from twisted.internet import defer, reactor
 
 from synapse.api.errors import SynapseError
 from synapse.logging.context import (
+    SENTINEL_CONTEXT,
     LoggingContext,
     PreserveLoggingContext,
+    current_context,
     make_deferred_yieldable,
 )
 from synapse.util.caches import descriptors
@@ -194,7 +196,7 @@ class DescriptorTestCase(unittest.TestCase):
             with LoggingContext() as c1:
                 c1.name = "c1"
                 r = yield obj.fn(1)
-                self.assertEqual(LoggingContext.current_context(), c1)
+                self.assertEqual(current_context(), c1)
             return r
 
         def check_result(r):
@@ -204,12 +206,12 @@ class DescriptorTestCase(unittest.TestCase):
 
         # set off a deferred which will do a cache lookup
         d1 = do_lookup()
-        self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel)
+        self.assertEqual(current_context(), SENTINEL_CONTEXT)
         d1.addCallback(check_result)
 
         # and another
         d2 = do_lookup()
-        self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel)
+        self.assertEqual(current_context(), SENTINEL_CONTEXT)
         d2.addCallback(check_result)
 
         # let the lookup complete
@@ -239,14 +241,14 @@ class DescriptorTestCase(unittest.TestCase):
                 try:
                     d = obj.fn(1)
                     self.assertEqual(
-                        LoggingContext.current_context(), LoggingContext.sentinel
+                        current_context(), SENTINEL_CONTEXT,
                     )
                     yield d
                     self.fail("No exception thrown")
                 except SynapseError:
                     pass
 
-                self.assertEqual(LoggingContext.current_context(), c1)
+                self.assertEqual(current_context(), c1)
 
             # the cache should now be empty
             self.assertEqual(len(obj.fn.cache.cache), 0)
@@ -255,7 +257,7 @@ class DescriptorTestCase(unittest.TestCase):
 
         # set off a deferred which will do a cache lookup
         d1 = do_lookup()
-        self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel)
+        self.assertEqual(current_context(), SENTINEL_CONTEXT)
 
         return d1
 
@@ -366,10 +368,10 @@ class CachedListDescriptorTestCase(unittest.TestCase):
 
             @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
             def list_fn(self, args1, arg2):
-                assert LoggingContext.current_context().request == "c1"
+                assert current_context().request == "c1"
                 # we want this to behave like an asynchronous function
                 yield run_on_reactor()
-                assert LoggingContext.current_context().request == "c1"
+                assert current_context().request == "c1"
                 return self.mock(args1, arg2)
 
         with LoggingContext() as c1:
@@ -377,9 +379,9 @@ class CachedListDescriptorTestCase(unittest.TestCase):
             obj = Cls()
             obj.mock.return_value = {10: "fish", 20: "chips"}
             d1 = obj.list_fn([10, 20], 2)
-            self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel)
+            self.assertEqual(current_context(), SENTINEL_CONTEXT)
             r = yield d1
-            self.assertEqual(LoggingContext.current_context(), c1)
+            self.assertEqual(current_context(), c1)
             obj.mock.assert_called_once_with([10, 20], 2)
             self.assertEqual(r, {10: "fish", 20: "chips"})
             obj.mock.reset_mock()
diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py
index f60918069a..17fd86d02d 100644
--- a/tests/util/test_async_utils.py
+++ b/tests/util/test_async_utils.py
@@ -16,7 +16,12 @@ from twisted.internet import defer
 from twisted.internet.defer import CancelledError, Deferred
 from twisted.internet.task import Clock
 
-from synapse.logging.context import LoggingContext, PreserveLoggingContext
+from synapse.logging.context import (
+    SENTINEL_CONTEXT,
+    LoggingContext,
+    PreserveLoggingContext,
+    current_context,
+)
 from synapse.util.async_helpers import timeout_deferred
 
 from tests.unittest import TestCase
@@ -79,10 +84,10 @@ class TimeoutDeferredTest(TestCase):
             # the errbacks should be run in the test logcontext
             def errback(res, deferred_name):
                 self.assertIs(
-                    LoggingContext.current_context(),
+                    current_context(),
                     context_one,
                     "errback %s run in unexpected logcontext %s"
-                    % (deferred_name, LoggingContext.current_context()),
+                    % (deferred_name, current_context()),
                 )
                 return res
 
@@ -90,7 +95,7 @@ class TimeoutDeferredTest(TestCase):
             original_deferred.addErrback(errback, "orig")
             timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
             self.assertNoResult(timing_out_d)
-            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+            self.assertIs(current_context(), SENTINEL_CONTEXT)
             timing_out_d.addErrback(errback, "timingout")
 
             self.clock.pump((1.0,))
@@ -99,4 +104,4 @@ class TimeoutDeferredTest(TestCase):
                 blocking_was_cancelled[0], "non-completing deferred was not cancelled"
             )
             self.failureResultOf(timing_out_d, defer.TimeoutError)
-            self.assertIs(LoggingContext.current_context(), context_one)
+            self.assertIs(current_context(), context_one)
diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py
index 0ec8ef90ce..852ef23185 100644
--- a/tests/util/test_linearizer.py
+++ b/tests/util/test_linearizer.py
@@ -19,7 +19,7 @@ from six.moves import range
 from twisted.internet import defer, reactor
 from twisted.internet.defer import CancelledError
 
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import LoggingContext, current_context
 from synapse.util import Clock
 from synapse.util.async_helpers import Linearizer
 
@@ -54,11 +54,11 @@ class LinearizerTestCase(unittest.TestCase):
         def func(i, sleep=False):
             with LoggingContext("func(%s)" % i) as lc:
                 with (yield linearizer.queue("")):
-                    self.assertEqual(LoggingContext.current_context(), lc)
+                    self.assertEqual(current_context(), lc)
                     if sleep:
                         yield Clock(reactor).sleep(0)
 
-                self.assertEqual(LoggingContext.current_context(), lc)
+                self.assertEqual(current_context(), lc)
 
         func(0, sleep=True)
         for i in range(1, 100):
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 281b32c4b8..95301c013c 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -2,8 +2,10 @@ import twisted.python.failure
 from twisted.internet import defer, reactor
 
 from synapse.logging.context import (
+    SENTINEL_CONTEXT,
     LoggingContext,
     PreserveLoggingContext,
+    current_context,
     make_deferred_yieldable,
     nested_logging_context,
     run_in_background,
@@ -15,7 +17,7 @@ from .. import unittest
 
 class LoggingContextTestCase(unittest.TestCase):
     def _check_test_key(self, value):
-        self.assertEquals(LoggingContext.current_context().request, value)
+        self.assertEquals(current_context().request, value)
 
     def test_with_context(self):
         with LoggingContext() as context_one:
@@ -41,7 +43,7 @@ class LoggingContextTestCase(unittest.TestCase):
             self._check_test_key("one")
 
     def _test_run_in_background(self, function):
-        sentinel_context = LoggingContext.current_context()
+        sentinel_context = current_context()
 
         callback_completed = [False]
 
@@ -71,7 +73,7 @@ class LoggingContextTestCase(unittest.TestCase):
             # make sure that the context was reset before it got thrown back
             # into the reactor
             try:
-                self.assertIs(LoggingContext.current_context(), sentinel_context)
+                self.assertIs(current_context(), sentinel_context)
                 d2.callback(None)
             except BaseException:
                 d2.errback(twisted.python.failure.Failure())
@@ -108,7 +110,7 @@ class LoggingContextTestCase(unittest.TestCase):
         async def testfunc():
             self._check_test_key("one")
             d = Clock(reactor).sleep(0)
-            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+            self.assertIs(current_context(), SENTINEL_CONTEXT)
             await d
             self._check_test_key("one")
 
@@ -129,14 +131,14 @@ class LoggingContextTestCase(unittest.TestCase):
             reactor.callLater(0, d.callback, None)
             return d
 
-        sentinel_context = LoggingContext.current_context()
+        sentinel_context = current_context()
 
         with LoggingContext() as context_one:
             context_one.request = "one"
 
             d1 = make_deferred_yieldable(blocking_function())
             # make sure that the context was reset by make_deferred_yieldable
-            self.assertIs(LoggingContext.current_context(), sentinel_context)
+            self.assertIs(current_context(), sentinel_context)
 
             yield d1
 
@@ -145,14 +147,14 @@ class LoggingContextTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_make_deferred_yieldable_with_chained_deferreds(self):
-        sentinel_context = LoggingContext.current_context()
+        sentinel_context = current_context()
 
         with LoggingContext() as context_one:
             context_one.request = "one"
 
             d1 = make_deferred_yieldable(_chained_deferred_function())
             # make sure that the context was reset by make_deferred_yieldable
-            self.assertIs(LoggingContext.current_context(), sentinel_context)
+            self.assertIs(current_context(), sentinel_context)
 
             yield d1
 
@@ -189,14 +191,14 @@ class LoggingContextTestCase(unittest.TestCase):
             reactor.callLater(0, d.callback, None)
             await d
 
-        sentinel_context = LoggingContext.current_context()
+        sentinel_context = current_context()
 
         with LoggingContext() as context_one:
             context_one.request = "one"
 
             d1 = make_deferred_yieldable(blocking_function())
             # make sure that the context was reset by make_deferred_yieldable
-            self.assertIs(LoggingContext.current_context(), sentinel_context)
+            self.assertIs(current_context(), sentinel_context)
 
             yield d1
 
diff --git a/tests/utils.py b/tests/utils.py
index 513f358f4f..968d109f77 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -35,7 +35,7 @@ from synapse.config.homeserver import HomeServerConfig
 from synapse.config.server import DEFAULT_ROOM_VERSION
 from synapse.federation.transport import server as federation_server
 from synapse.http.server import HttpServer
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import current_context, set_current_context
 from synapse.server import HomeServer
 from synapse.storage import DataStore
 from synapse.storage.engines import PostgresEngine, create_engine
@@ -493,10 +493,10 @@ class MockClock(object):
         return self.time() * 1000
 
     def call_later(self, delay, callback, *args, **kwargs):
-        current_context = LoggingContext.current_context()
+        ctx = current_context()
 
         def wrapped_callback():
-            LoggingContext.thread_local.current_context = current_context
+            set_current_context(ctx)
             callback(*args, **kwargs)
 
         t = [self.now + delay, wrapped_callback, False]
-- 
cgit 1.4.1


From 7bab642707ecd985ebd736af890f4bfe2c3232fe Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 25 Mar 2020 13:56:40 +0000
Subject: Various cleanups to INSTALL.md (#7141)

---
 INSTALL.md           | 98 +++++++++++++++++++++-------------------------------
 changelog.d/7141.doc |  1 +
 2 files changed, 40 insertions(+), 59 deletions(-)
 create mode 100644 changelog.d/7141.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index f9e13b4cf6..af9a5ef439 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -2,7 +2,6 @@
 - [Installing Synapse](#installing-synapse)
   - [Installing from source](#installing-from-source)
     - [Platform-Specific Instructions](#platform-specific-instructions)
-    - [Troubleshooting Installation](#troubleshooting-installation)
   - [Prebuilt packages](#prebuilt-packages)
 - [Setting up Synapse](#setting-up-synapse)
   - [TLS certificates](#tls-certificates)
@@ -10,6 +9,7 @@
   - [Registering a user](#registering-a-user)
   - [Setting up a TURN server](#setting-up-a-turn-server)
   - [URL previews](#url-previews)
+- [Troubleshooting Installation](#troubleshooting-installation)
 
 # Choosing your server name
 
@@ -70,7 +70,7 @@ pip install -U matrix-synapse
 ```
 
 Before you can start Synapse, you will need to generate a configuration
-file. To do this, run (in your virtualenv, as before)::
+file. To do this, run (in your virtualenv, as before):
 
 ```
 cd ~/synapse
@@ -84,22 +84,24 @@ python -m synapse.app.homeserver \
 ... substituting an appropriate value for `--server-name`.
 
 This command will generate you a config file that you can then customise, but it will
-also generate a set of keys for you. These keys will allow your Home Server to
-identify itself to other Home Servers, so don't lose or delete them. It would be
+also generate a set of keys for you. These keys will allow your homeserver to
+identify itself to other homeserver, so don't lose or delete them. It would be
 wise to back them up somewhere safe. (If, for whatever reason, you do need to
-change your Home Server's keys, you may find that other Home Servers have the
+change your homeserver's keys, you may find that other homeserver have the
 old key cached. If you update the signing key, you should change the name of the
 key in the `.signing.key` file (the second word) to something
 different. See the
 [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
-for more information on key management.)
+for more information on key management).
 
 To actually run your new homeserver, pick a working directory for Synapse to
-run (e.g. `~/synapse`), and::
+run (e.g. `~/synapse`), and:
 
-    cd ~/synapse
-    source env/bin/activate
-    synctl start
+```
+cd ~/synapse
+source env/bin/activate
+synctl start
+```
 
 ### Platform-Specific Instructions
 
@@ -188,7 +190,7 @@ doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
 There is currently no port for OpenBSD. Additionally, OpenBSD's security
 settings require a slightly more difficult installation process.
 
-XXX: I suspect this is out of date.
+(XXX: I suspect this is out of date)
 
 1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
    new user called `_synapse` and set that directory as the new user's home.
@@ -196,7 +198,7 @@ XXX: I suspect this is out of date.
    write and execute permissions on the same memory space to be run from
    `/usr/local`.
 2. `su` to the new `_synapse` user and change to their home directory.
-3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse`
+3. Create a new virtualenv: `virtualenv -p python3 ~/.synapse`
 4. Source the virtualenv configuration located at
    `/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
    using the `.` command, rather than `bash`'s `source`.
@@ -217,45 +219,6 @@ be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
 Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
 for Windows Server.
 
-### Troubleshooting Installation
-
-XXX a bunch of this is no longer relevant.
-
-Synapse requires pip 8 or later, so if your OS provides too old a version you
-may need to manually upgrade it::
-
-    sudo pip install --upgrade pip
-
-Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`.
-You can fix this by manually upgrading pip and virtualenv::
-
-    sudo pip install --upgrade virtualenv
-
-You can next rerun `virtualenv -p python3 synapse` to update the virtual env.
-
-Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`
-You can fix this  by manually installing ndg-httpsclient::
-
-    pip install --upgrade ndg-httpsclient
-
-Installing may fail with `mock requires setuptools>=17.1. Aborting installation`.
-You can fix this by upgrading setuptools::
-
-    pip install --upgrade setuptools
-
-If pip crashes mid-installation for reason (e.g. lost terminal), pip may
-refuse to run until you remove the temporary installation directory it
-created. To reset the installation::
-
-    rm -rf /tmp/pip_install_matrix
-
-pip seems to leak *lots* of memory during installation.  For instance, a Linux
-host with 512MB of RAM may run out of memory whilst installing Twisted.  If this
-happens, you will have to individually install the dependencies which are
-failing, e.g.::
-
-    pip install twisted
-
 ## Prebuilt packages
 
 As an alternative to installing from source, prebuilt packages are available
@@ -314,7 +277,7 @@ For `buster` and `sid`, Synapse is available in the Debian repositories and
 it should be possible to install it with simply:
 
 ```
-    sudo apt install matrix-synapse
+sudo apt install matrix-synapse
 ```
 
 There is also a version of `matrix-synapse` in `stretch-backports`. Please see
@@ -375,8 +338,10 @@ sudo pip install py-bcrypt
 
 Synapse can be found in the void repositories as 'synapse':
 
-    xbps-install -Su
-    xbps-install -S synapse
+```
+xbps-install -Su
+xbps-install -S synapse
+```
 
 ### FreeBSD
 
@@ -420,6 +385,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
       resources:
         - names: [client, federation]
   ```
+
 * You will also need to uncomment the `tls_certificate_path` and
   `tls_private_key_path` lines under the `TLS` section. You can either
   point these settings at an existing certificate and key, or you can
@@ -435,7 +401,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
   `cert.pem`).
 
 For a more detailed guide to configuring your server for federation, see
-[federate.md](docs/federate.md)
+[federate.md](docs/federate.md).
 
 
 ## Email
@@ -482,7 +448,7 @@ on your server even if `enable_registration` is `false`.
 ## Setting up a TURN server
 
 For reliable VoIP calls to be routed via this homeserver, you MUST configure
-a TURN server.  See [docs/turn-howto.md](docs/turn-howto.md) for details.
+a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
 
 ## URL previews
 
@@ -491,10 +457,24 @@ turn it on you must enable the `url_preview_enabled: True` config parameter
 and explicitly specify the IP ranges that Synapse is not allowed to spider for
 previewing in the `url_preview_ip_range_blacklist` configuration parameter.
 This is critical from a security perspective to stop arbitrary Matrix users
-spidering 'internal' URLs on your network.  At the very least we recommend that
+spidering 'internal' URLs on your network. At the very least we recommend that
 your loopback and RFC1918 IP addresses are blacklisted.
 
-This also requires the optional lxml and netaddr python dependencies to be
-installed.  This in turn requires the libxml2 library to be available - on
+This also requires the optional `lxml` and `netaddr` python dependencies to be
+installed. This in turn requires the `libxml2` library to be available - on
 Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
 your OS.
+
+# Troubleshooting Installation
+
+`pip` seems to leak *lots* of memory during installation. For instance, a Linux
+host with 512MB of RAM may run out of memory whilst installing Twisted. If this
+happens, you will have to individually install the dependencies which are
+failing, e.g.:
+
+```
+pip install twisted
+```
+
+If you have any other problems, feel free to ask in
+[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
diff --git a/changelog.d/7141.doc b/changelog.d/7141.doc
new file mode 100644
index 0000000000..2fcbd666c2
--- /dev/null
+++ b/changelog.d/7141.doc
@@ -0,0 +1 @@
+Clean up INSTALL.md a bit.
\ No newline at end of file
-- 
cgit 1.4.1


From 4cff617df1ba6f241fee6957cc44859f57edcc0e Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Wed, 25 Mar 2020 14:54:01 +0000
Subject: Move catchup of replication streams to worker. (#7024)

This changes the replication protocol so that the server does not send down `RDATA` for rows that happened before the client connected. Instead, the server will send a `POSITION` and clients then query the database (or master out of band) to get up to date.
---
 changelog.d/7024.misc                             |   1 +
 docs/tcp_replication.md                           |  46 ++---
 synapse/app/generic_worker.py                     |   3 +
 synapse/federation/sender/__init__.py             |   9 +
 synapse/replication/http/__init__.py              |   2 +
 synapse/replication/http/streams.py               |  78 ++++++++
 synapse/replication/slave/storage/_base.py        |  14 +-
 synapse/replication/slave/storage/pushers.py      |   3 +
 synapse/replication/tcp/client.py                 |   3 +-
 synapse/replication/tcp/commands.py               |  34 +---
 synapse/replication/tcp/protocol.py               | 206 ++++++++--------------
 synapse/replication/tcp/resource.py               |  19 +-
 synapse/replication/tcp/streams/__init__.py       |   8 +-
 synapse/replication/tcp/streams/_base.py          | 160 +++++++++++------
 synapse/replication/tcp/streams/events.py         |   5 +-
 synapse/replication/tcp/streams/federation.py     |  19 +-
 synapse/server.py                                 |   5 +
 synapse/storage/data_stores/main/cache.py         |  44 ++---
 synapse/storage/data_stores/main/deviceinbox.py   |  88 ++++-----
 synapse/storage/data_stores/main/events.py        | 114 ------------
 synapse/storage/data_stores/main/events_worker.py | 114 ++++++++++++
 synapse/storage/data_stores/main/room.py          |  40 ++---
 tests/replication/tcp/streams/_base.py            |  55 ++++--
 tests/replication/tcp/streams/test_receipts.py    |  52 +++++-
 24 files changed, 635 insertions(+), 487 deletions(-)
 create mode 100644 changelog.d/7024.misc
 create mode 100644 synapse/replication/http/streams.py

(limited to 'changelog.d')

diff --git a/changelog.d/7024.misc b/changelog.d/7024.misc
new file mode 100644
index 0000000000..676f285377
--- /dev/null
+++ b/changelog.d/7024.misc
@@ -0,0 +1 @@
+Move catchup of replication streams logic to worker.
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index e3a4634b14..d4f7d9ec18 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -14,16 +14,16 @@ example flow would be (where '>' indicates master to worker and
 '<' worker to master flows):
 
     > SERVER example.com
-    < REPLICATE events 53
+    < REPLICATE
+    > POSITION events 53
     > RDATA events 54 ["$foo1:bar.com", ...]
     > RDATA events 55 ["$foo4:bar.com", ...]
 
-The example shows the server accepting a new connection and sending its
-identity with the `SERVER` command, followed by the client asking to
-subscribe to the `events` stream from the token `53`. The server then
-periodically sends `RDATA` commands which have the format
-`RDATA   `, where the format of `` is
-defined by the individual streams.
+The example shows the server accepting a new connection and sending its identity
+with the `SERVER` command, followed by the client server to respond with the
+position of all streams. The server then periodically sends `RDATA` commands
+which have the format `RDATA   `, where the format of
+`` is defined by the individual streams.
 
 Error reporting happens by either the client or server sending an ERROR
 command, and usually the connection will be closed.
@@ -32,9 +32,6 @@ Since the protocol is a simple line based, its possible to manually
 connect to the server using a tool like netcat. A few things should be
 noted when manually using the protocol:
 
--   When subscribing to a stream using `REPLICATE`, the special token
-    `NOW` can be used to get all future updates. The special stream name
-    `ALL` can be used with `NOW` to subscribe to all available streams.
 -   The federation stream is only available if federation sending has
     been disabled on the main process.
 -   The server will only time connections out that have sent a `PING`
@@ -91,9 +88,7 @@ The client:
 -   Sends a `NAME` command, allowing the server to associate a human
     friendly name with the connection. This is optional.
 -   Sends a `PING` as above
--   For each stream the client wishes to subscribe to it sends a
-    `REPLICATE` with the `stream_name` and token it wants to subscribe
-    from.
+-   Sends a `REPLICATE` to get the current position of all streams.
 -   On receipt of a `SERVER` command, checks that the server name
     matches the expected server name.
 
@@ -140,9 +135,7 @@ the wire:
     > PING 1490197665618
     < NAME synapse.app.appservice
     < PING 1490197665618
-    < REPLICATE events 1
-    < REPLICATE backfill 1
-    < REPLICATE caches 1
+    < REPLICATE
     > POSITION events 1
     > POSITION backfill 1
     > POSITION caches 1
@@ -181,9 +174,9 @@ client (C):
 
 #### POSITION (S)
 
-   The position of the stream has been updated. Sent to the client
-    after all missing updates for a stream have been sent to the client
-    and they're now up to date.
+   On receipt of a POSITION command clients should check if they have missed any
+   updates, and if so then fetch them out of band. Sent in response to a
+   REPLICATE command (but can happen at any time).
 
 #### ERROR (S, C)
 
@@ -199,20 +192,7 @@ client (C):
 
 #### REPLICATE (C)
 
-Asks the server to replicate a given stream. The syntax is:
-
-```
-    REPLICATE  
-```
-
-Where `` may be either:
- * a numeric stream_id to stream updates since (exclusive)
- * `NOW` to stream all subsequent updates.
-
-The `` is the name of a replication stream to subscribe
-to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
-of streams). It can also be `ALL` to subscribe to all known streams,
-in which case the `` must be set to `NOW`.
+Asks the server for the current position of all streams.
 
 #### USER_SYNC (C)
 
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index bd1733573b..fba7ad9551 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -401,6 +401,9 @@ class GenericWorkerTyping(object):
             self._room_serials[row.room_id] = token
             self._room_typing[row.room_id] = row.user_ids
 
+    def get_current_token(self) -> int:
+        return self._latest_room_serial
+
 
 class GenericWorkerSlavedStore(
     # FIXME(#3714): We need to add UserDirectoryStore as we write directly
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 233cb33daf..a477578e44 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -499,4 +499,13 @@ class FederationSender(object):
         self._get_per_destination_queue(destination).attempt_new_transaction()
 
     def get_current_token(self) -> int:
+        # Dummy implementation for case where federation sender isn't offloaded
+        # to a worker.
         return 0
+
+    async def get_replication_rows(
+        self, from_token, to_token, limit, federation_ack=None
+    ):
+        # Dummy implementation for case where federation sender isn't offloaded
+        # to a worker.
+        return []
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 28dbc6fcba..4613b2538c 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -21,6 +21,7 @@ from synapse.replication.http import (
     membership,
     register,
     send_event,
+    streams,
 )
 
 REPLICATION_PREFIX = "/_synapse/replication"
@@ -38,3 +39,4 @@ class ReplicationRestResource(JsonResource):
         login.register_servlets(hs, self)
         register.register_servlets(hs, self)
         devices.register_servlets(hs, self)
+        streams.register_servlets(hs, self)
diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py
new file mode 100644
index 0000000000..ffd4c61993
--- /dev/null
+++ b/synapse/replication/http/streams.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import parse_integer
+from synapse.replication.http._base import ReplicationEndpoint
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationGetStreamUpdates(ReplicationEndpoint):
+    """Fetches stream updates from a server. Used for streams not persisted to
+    the database, e.g. typing notifications.
+
+    The API looks like:
+
+        GET /_synapse/replication/get_repl_stream_updates/events?from_token=0&to_token=10&limit=100
+
+        200 OK
+
+        {
+            updates: [ ... ],
+            upto_token: 10,
+            limited: False,
+        }
+
+    """
+
+    NAME = "get_repl_stream_updates"
+    PATH_ARGS = ("stream_name",)
+    METHOD = "GET"
+
+    def __init__(self, hs):
+        super().__init__(hs)
+
+        # We pull the streams from the replication steamer (if we try and make
+        # them ourselves we end up in an import loop).
+        self.streams = hs.get_replication_streamer().get_streams()
+
+    @staticmethod
+    def _serialize_payload(stream_name, from_token, upto_token, limit):
+        return {"from_token": from_token, "upto_token": upto_token, "limit": limit}
+
+    async def _handle_request(self, request, stream_name):
+        stream = self.streams.get(stream_name)
+        if stream is None:
+            raise SynapseError(400, "Unknown stream")
+
+        from_token = parse_integer(request, "from_token", required=True)
+        upto_token = parse_integer(request, "upto_token", required=True)
+        limit = parse_integer(request, "limit", required=True)
+
+        updates, upto_token, limited = await stream.get_updates_since(
+            from_token, upto_token, limit
+        )
+
+        return (
+            200,
+            {"updates": updates, "upto_token": upto_token, "limited": limited},
+        )
+
+
+def register_servlets(hs, http_server):
+    ReplicationGetStreamUpdates(hs).register(http_server)
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index f45cbd37a0..751c799d94 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -18,8 +18,10 @@ from typing import Dict, Optional
 
 import six
 
-from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.cache import CURRENT_STATE_CACHE_NAME
+from synapse.storage.data_stores.main.cache import (
+    CURRENT_STATE_CACHE_NAME,
+    CacheInvalidationWorkerStore,
+)
 from synapse.storage.database import Database
 from synapse.storage.engines import PostgresEngine
 
@@ -35,7 +37,7 @@ def __func__(inp):
         return inp.__func__
 
 
-class BaseSlavedStore(SQLBaseStore):
+class BaseSlavedStore(CacheInvalidationWorkerStore):
     def __init__(self, database: Database, db_conn, hs):
         super(BaseSlavedStore, self).__init__(database, db_conn, hs)
         if isinstance(self.database_engine, PostgresEngine):
@@ -60,6 +62,12 @@ class BaseSlavedStore(SQLBaseStore):
             pos["caches"] = self._cache_id_gen.get_current_token()
         return pos
 
+    def get_cache_stream_token(self):
+        if self._cache_id_gen:
+            return self._cache_id_gen.get_current_token()
+        else:
+            return 0
+
     def process_replication_rows(self, stream_name, token, rows):
         if stream_name == "caches":
             if self._cache_id_gen:
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
index f22c2d44a3..bce8a3d115 100644
--- a/synapse/replication/slave/storage/pushers.py
+++ b/synapse/replication/slave/storage/pushers.py
@@ -33,6 +33,9 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore):
         result["pushers"] = self._pushers_id_gen.get_current_token()
         return result
 
+    def get_pushers_stream_token(self):
+        return self._pushers_id_gen.get_current_token()
+
     def process_replication_rows(self, stream_name, token, rows):
         if stream_name == "pushers":
             self._pushers_id_gen.advance(token)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 02ab5b66ea..7e7ad0f798 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -55,6 +55,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
         self.client_name = client_name
         self.handler = handler
         self.server_name = hs.config.server_name
+        self.hs = hs
         self._clock = hs.get_clock()  # As self.clock is defined in super class
 
         hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying)
@@ -65,7 +66,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
     def buildProtocol(self, addr):
         logger.info("Connected to replication: %r", addr)
         return ClientReplicationStreamProtocol(
-            self.client_name, self.server_name, self._clock, self.handler
+            self.hs, self.client_name, self.server_name, self._clock, self.handler,
         )
 
     def clientConnectionLost(self, connector, reason):
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 451671412d..5a6b734094 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -136,8 +136,8 @@ class PositionCommand(Command):
     """Sent by the server to tell the client the stream postition without
     needing to send an RDATA.
 
-    Sent to the client after all missing updates for a stream have been sent
-    to the client and they're now up to date.
+    On receipt of a POSITION command clients should check if they have missed
+    any updates, and if so then fetch them out of band.
     """
 
     NAME = "POSITION"
@@ -179,42 +179,24 @@ class NameCommand(Command):
 
 
 class ReplicateCommand(Command):
-    """Sent by the client to subscribe to the stream.
+    """Sent by the client to subscribe to streams.
 
     Format::
 
-        REPLICATE  
-
-    Where  may be either:
-        * a numeric stream_id to stream updates from
-        * "NOW" to stream all subsequent updates.
-
-    The  can be "ALL" to subscribe to all known streams, in which
-    case the  must be set to "NOW", i.e.::
-
-        REPLICATE ALL NOW
+        REPLICATE
     """
 
     NAME = "REPLICATE"
 
-    def __init__(self, stream_name, token):
-        self.stream_name = stream_name
-        self.token = token
+    def __init__(self):
+        pass
 
     @classmethod
     def from_line(cls, line):
-        stream_name, token = line.split(" ", 1)
-        if token in ("NOW", "now"):
-            token = "NOW"
-        else:
-            token = int(token)
-        return cls(stream_name, token)
+        return cls()
 
     def to_line(self):
-        return " ".join((self.stream_name, str(self.token)))
-
-    def get_logcontext_id(self):
-        return "REPLICATE-" + self.stream_name
+        return ""
 
 
 class UserSyncCommand(Command):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index bc1482a9bb..f81d2e2442 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -35,9 +35,7 @@ indicate which side is sending, these are *not* included on the wire::
     > PING 1490197665618
     < NAME synapse.app.appservice
     < PING 1490197665618
-    < REPLICATE events 1
-    < REPLICATE backfill 1
-    < REPLICATE caches 1
+    < REPLICATE
     > POSITION events 1
     > POSITION backfill 1
     > POSITION caches 1
@@ -53,17 +51,15 @@ import fcntl
 import logging
 import struct
 from collections import defaultdict
-from typing import Any, DefaultDict, Dict, List, Set, Tuple
+from typing import Any, DefaultDict, Dict, List, Set
 
-from six import iteritems, iterkeys
+from six import iteritems
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
 from twisted.protocols.basic import LineOnlyReceiver
 from twisted.python.failure import Failure
 
-from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.tcp.commands import (
@@ -82,11 +78,16 @@ from synapse.replication.tcp.commands import (
     SyncCommand,
     UserSyncCommand,
 )
-from synapse.replication.tcp.streams import STREAMS_MAP
+from synapse.replication.tcp.streams import STREAMS_MAP, Stream
 from synapse.types import Collection
 from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
+MYPY = False
+if MYPY:
+    from synapse.server import HomeServer
+
+
 connection_close_counter = Counter(
     "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"]
 )
@@ -411,16 +412,6 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.server_name = server_name
         self.streamer = streamer
 
-        # The streams the client has subscribed to and is up to date with
-        self.replication_streams = set()  # type: Set[str]
-
-        # The streams the client is currently subscribing to.
-        self.connecting_streams = set()  # type:  Set[str]
-
-        # Map from stream name to list of updates to send once we've finished
-        # subscribing the client to the stream.
-        self.pending_rdata = {}  # type: Dict[str, List[Tuple[int, Any]]]
-
     def connectionMade(self):
         self.send_command(ServerCommand(self.server_name))
         BaseReplicationStreamProtocol.connectionMade(self)
@@ -436,21 +427,10 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         )
 
     async def on_REPLICATE(self, cmd):
-        stream_name = cmd.stream_name
-        token = cmd.token
-
-        if stream_name == "ALL":
-            # Subscribe to all streams we're publishing to.
-            deferreds = [
-                run_in_background(self.subscribe_to_stream, stream, token)
-                for stream in iterkeys(self.streamer.streams_by_name)
-            ]
-
-            await make_deferred_yieldable(
-                defer.gatherResults(deferreds, consumeErrors=True)
-            )
-        else:
-            await self.subscribe_to_stream(stream_name, token)
+        # Subscribe to all streams we're publishing to.
+        for stream_name in self.streamer.streams_by_name:
+            current_token = self.streamer.get_stream_token(stream_name)
+            self.send_command(PositionCommand(stream_name, current_token))
 
     async def on_FEDERATION_ACK(self, cmd):
         self.streamer.federation_ack(cmd.token)
@@ -474,87 +454,12 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
             cmd.last_seen,
         )
 
-    async def subscribe_to_stream(self, stream_name, token):
-        """Subscribe the remote to a stream.
-
-        This invloves checking if they've missed anything and sending those
-        updates down if they have. During that time new updates for the stream
-        are queued and sent once we've sent down any missed updates.
-        """
-        self.replication_streams.discard(stream_name)
-        self.connecting_streams.add(stream_name)
-
-        try:
-            # Get missing updates
-            updates, current_token = await self.streamer.get_stream_updates(
-                stream_name, token
-            )
-
-            # Send all the missing updates
-            for update in updates:
-                token, row = update[0], update[1]
-                self.send_command(RdataCommand(stream_name, token, row))
-
-            # We send a POSITION command to ensure that they have an up to
-            # date token (especially useful if we didn't send any updates
-            # above)
-            self.send_command(PositionCommand(stream_name, current_token))
-
-            # Now we can send any updates that came in while we were subscribing
-            pending_rdata = self.pending_rdata.pop(stream_name, [])
-            updates = []
-            for token, update in pending_rdata:
-                # If the token is null, it is part of a batch update. Batches
-                # are multiple updates that share a single token. To denote
-                # this, the token is set to None for all tokens in the batch
-                # except for the last. If we find a None token, we keep looking
-                # through tokens until we find one that is not None and then
-                # process all previous updates in the batch as if they had the
-                # final token.
-                if token is None:
-                    # Store this update as part of a batch
-                    updates.append(update)
-                    continue
-
-                if token <= current_token:
-                    # This update or batch of updates is older than
-                    # current_token, dismiss it
-                    updates = []
-                    continue
-
-                updates.append(update)
-
-                # Send all updates that are part of this batch with the
-                # found token
-                for update in updates:
-                    self.send_command(RdataCommand(stream_name, token, update))
-
-                # Clear stored updates
-                updates = []
-
-            # They're now fully subscribed
-            self.replication_streams.add(stream_name)
-        except Exception as e:
-            logger.exception("[%s] Failed to handle REPLICATE command", self.id())
-            self.send_error("failed to handle replicate: %r", e)
-        finally:
-            self.connecting_streams.discard(stream_name)
-
     def stream_update(self, stream_name, token, data):
         """Called when a new update is available to stream to clients.
 
         We need to check if the client is interested in the stream or not
         """
-        if stream_name in self.replication_streams:
-            # The client is subscribed to the stream
-            self.send_command(RdataCommand(stream_name, token, data))
-        elif stream_name in self.connecting_streams:
-            # The client is being subscribed to the stream
-            logger.debug("[%s] Queuing RDATA %r %r", self.id(), stream_name, token)
-            self.pending_rdata.setdefault(stream_name, []).append((token, data))
-        else:
-            # The client isn't subscribed
-            logger.debug("[%s] Dropping RDATA %r %r", self.id(), stream_name, token)
+        self.send_command(RdataCommand(stream_name, token, data))
 
     def send_sync(self, data):
         self.send_command(SyncCommand(data))
@@ -638,6 +543,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
 
     def __init__(
         self,
+        hs: "HomeServer",
         client_name: str,
         server_name: str,
         clock: Clock,
@@ -649,22 +555,25 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.server_name = server_name
         self.handler = handler
 
+        self.streams = {
+            stream.NAME: stream(hs) for stream in STREAMS_MAP.values()
+        }  # type: Dict[str, Stream]
+
         # Set of stream names that have been subscribe to, but haven't yet
         # caught up with. This is used to track when the client has been fully
         # connected to the remote.
-        self.streams_connecting = set()  # type: Set[str]
+        self.streams_connecting = set(STREAMS_MAP)  # type: Set[str]
 
         # Map of stream to batched updates. See RdataCommand for info on how
         # batching works.
-        self.pending_batches = {}  # type: Dict[str, Any]
+        self.pending_batches = {}  # type: Dict[str, List[Any]]
 
     def connectionMade(self):
         self.send_command(NameCommand(self.client_name))
         BaseReplicationStreamProtocol.connectionMade(self)
 
         # Once we've connected subscribe to the necessary streams
-        for stream_name, token in iteritems(self.handler.get_streams_to_replicate()):
-            self.replicate(stream_name, token)
+        self.replicate()
 
         # Tell the server if we have any users currently syncing (should only
         # happen on synchrotrons)
@@ -676,10 +585,6 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         # We've now finished connecting to so inform the client handler
         self.handler.update_connection(self)
 
-        # This will happen if we don't actually subscribe to any streams
-        if not self.streams_connecting:
-            self.handler.finished_connecting()
-
     async def on_SERVER(self, cmd):
         if cmd.data != self.server_name:
             logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@@ -697,7 +602,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             )
             raise
 
-        if cmd.token is None:
+        if cmd.token is None or stream_name in self.streams_connecting:
             # I.e. this is part of a batch of updates for this stream. Batch
             # until we get an update for the stream with a non None token
             self.pending_batches.setdefault(stream_name, []).append(row)
@@ -707,14 +612,55 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             rows.append(row)
             await self.handler.on_rdata(stream_name, cmd.token, rows)
 
-    async def on_POSITION(self, cmd):
-        # When we get a `POSITION` command it means we've finished getting
-        # missing updates for the given stream, and are now up to date.
+    async def on_POSITION(self, cmd: PositionCommand):
+        stream = self.streams.get(cmd.stream_name)
+        if not stream:
+            logger.error("Got POSITION for unknown stream: %s", cmd.stream_name)
+            return
+
+        # Find where we previously streamed up to.
+        current_token = self.handler.get_streams_to_replicate().get(cmd.stream_name)
+        if current_token is None:
+            logger.warning(
+                "Got POSITION for stream we're not subscribed to: %s", cmd.stream_name
+            )
+            return
+
+        # Fetch all updates between then and now.
+        limited = True
+        while limited:
+            updates, current_token, limited = await stream.get_updates_since(
+                current_token, cmd.token
+            )
+
+            # Check if the connection was closed underneath us, if so we bail
+            # rather than risk having concurrent catch ups going on.
+            if self.state == ConnectionStates.CLOSED:
+                return
+
+            if updates:
+                await self.handler.on_rdata(
+                    cmd.stream_name,
+                    current_token,
+                    [stream.parse_row(update[1]) for update in updates],
+                )
+
+        # We've now caught up to position sent to us, notify handler.
+        await self.handler.on_position(cmd.stream_name, cmd.token)
+
         self.streams_connecting.discard(cmd.stream_name)
         if not self.streams_connecting:
             self.handler.finished_connecting()
 
-        await self.handler.on_position(cmd.stream_name, cmd.token)
+        # Check if the connection was closed underneath us, if so we bail
+        # rather than risk having concurrent catch ups going on.
+        if self.state == ConnectionStates.CLOSED:
+            return
+
+        # Handle any RDATA that came in while we were catching up.
+        rows = self.pending_batches.pop(cmd.stream_name, [])
+        if rows:
+            await self.handler.on_rdata(cmd.stream_name, rows[-1].token, rows)
 
     async def on_SYNC(self, cmd):
         self.handler.on_sync(cmd.data)
@@ -722,22 +668,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
     async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
         self.handler.on_remote_server_up(cmd.data)
 
-    def replicate(self, stream_name, token):
+    def replicate(self):
         """Send the subscription request to the server
         """
-        if stream_name not in STREAMS_MAP:
-            raise Exception("Invalid stream name %r" % (stream_name,))
-
-        logger.info(
-            "[%s] Subscribing to replication stream: %r from %r",
-            self.id(),
-            stream_name,
-            token,
-        )
-
-        self.streams_connecting.add(stream_name)
+        logger.info("[%s] Subscribing to replication streams", self.id())
 
-        self.send_command(ReplicateCommand(stream_name, token))
+        self.send_command(ReplicateCommand())
 
     def on_connection_closed(self):
         BaseReplicationStreamProtocol.on_connection_closed(self)
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 6e2ebaf614..4374e99e32 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -17,7 +17,7 @@
 
 import logging
 import random
-from typing import Any, List
+from typing import Any, Dict, List
 
 from six import itervalues
 
@@ -30,7 +30,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util.metrics import Measure, measure_func
 
 from .protocol import ServerReplicationStreamProtocol
-from .streams import STREAMS_MAP
+from .streams import STREAMS_MAP, Stream
 from .streams.federation import FederationStream
 
 stream_updates_counter = Counter(
@@ -52,7 +52,7 @@ class ReplicationStreamProtocolFactory(Factory):
     """
 
     def __init__(self, hs):
-        self.streamer = ReplicationStreamer(hs)
+        self.streamer = hs.get_replication_streamer()
         self.clock = hs.get_clock()
         self.server_name = hs.config.server_name
 
@@ -133,6 +133,11 @@ class ReplicationStreamer(object):
         for conn in self.connections:
             conn.send_error("server shutting down")
 
+    def get_streams(self) -> Dict[str, Stream]:
+        """Get a mapp from stream name to stream instance.
+        """
+        return self.streams_by_name
+
     def on_notifier_poke(self):
         """Checks if there is actually any new data and sends it to the
         connections if there are.
@@ -190,7 +195,8 @@ class ReplicationStreamer(object):
                             stream.current_token(),
                         )
                         try:
-                            updates, current_token = await stream.get_updates()
+                            updates, current_token, limited = await stream.get_updates()
+                            self.pending_updates |= limited
                         except Exception:
                             logger.info("Failed to handle stream %s", stream.NAME)
                             raise
@@ -226,8 +232,7 @@ class ReplicationStreamer(object):
             self.pending_updates = False
             self.is_looping = False
 
-    @measure_func("repl.get_stream_updates")
-    async def get_stream_updates(self, stream_name, token):
+    def get_stream_token(self, stream_name):
         """For a given stream get all updates since token. This is called when
         a client first subscribes to a stream.
         """
@@ -235,7 +240,7 @@ class ReplicationStreamer(object):
         if not stream:
             raise Exception("unknown stream %s", stream_name)
 
-        return await stream.get_updates_since(token)
+        return stream.current_token()
 
     @measure_func("repl.federation_ack")
     def federation_ack(self, token):
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
index 29199f5b46..37bcd3de66 100644
--- a/synapse/replication/tcp/streams/__init__.py
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -24,6 +24,9 @@ Each stream is defined by the following information:
     current_token:      The function that returns the current token for the stream
     update_function:    The function that returns a list of updates between two tokens
 """
+
+from typing import Dict, Type
+
 from synapse.replication.tcp.streams._base import (
     AccountDataStream,
     BackfillStream,
@@ -35,6 +38,7 @@ from synapse.replication.tcp.streams._base import (
     PushersStream,
     PushRulesStream,
     ReceiptsStream,
+    Stream,
     TagAccountDataStream,
     ToDeviceStream,
     TypingStream,
@@ -63,10 +67,12 @@ STREAMS_MAP = {
         GroupServerStream,
         UserSignatureStream,
     )
-}
+}  # type: Dict[str, Type[Stream]]
+
 
 __all__ = [
     "STREAMS_MAP",
+    "Stream",
     "BackfillStream",
     "PresenceStream",
     "TypingStream",
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 32d9514883..c14dff6c64 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -14,13 +14,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import itertools
 import logging
 from collections import namedtuple
-from typing import Any, List, Optional, Tuple
+from typing import Any, Awaitable, Callable, List, Optional, Tuple
 
 import attr
 
+from synapse.replication.http.streams import ReplicationGetStreamUpdates
 from synapse.types import JsonDict
 
 logger = logging.getLogger(__name__)
@@ -29,6 +29,15 @@ logger = logging.getLogger(__name__)
 MAX_EVENTS_BEHIND = 500000
 
 
+# Some type aliases to make things a bit easier.
+
+# A stream position token
+Token = int
+
+# A pair of position in stream and args used to create an instance of `ROW_TYPE`.
+StreamRow = Tuple[Token, tuple]
+
+
 class Stream(object):
     """Base class for the streams.
 
@@ -56,6 +65,7 @@ class Stream(object):
         return cls.ROW_TYPE(*row)
 
     def __init__(self, hs):
+
         # The token from which we last asked for updates
         self.last_token = self.current_token()
 
@@ -65,61 +75,46 @@ class Stream(object):
         """
         self.last_token = self.current_token()
 
-    async def get_updates(self):
+    async def get_updates(self) -> Tuple[List[Tuple[Token, JsonDict]], Token, bool]:
         """Gets all updates since the last time this function was called (or
         since the stream was constructed if it hadn't been called before).
 
         Returns:
-            Deferred[Tuple[List[Tuple[int, Any]], int]:
-                Resolves to a pair ``(updates, current_token)``, where ``updates`` is a
-                list of ``(token, row)`` entries. ``row`` will be json-serialised and
-                sent over the replication steam.
+            A triplet `(updates, new_last_token, limited)`, where `updates` is
+            a list of `(token, row)` entries, `new_last_token` is the new
+            position in stream, and `limited` is whether there are more updates
+            to fetch.
         """
-        updates, current_token = await self.get_updates_since(self.last_token)
+        current_token = self.current_token()
+        updates, current_token, limited = await self.get_updates_since(
+            self.last_token, current_token
+        )
         self.last_token = current_token
 
-        return updates, current_token
+        return updates, current_token, limited
 
     async def get_updates_since(
-        self, from_token: int
-    ) -> Tuple[List[Tuple[int, JsonDict]], int]:
+        self, from_token: Token, upto_token: Token, limit: int = 100
+    ) -> Tuple[List[Tuple[Token, JsonDict]], Token, bool]:
         """Like get_updates except allows specifying from when we should
         stream updates
 
         Returns:
-            Resolves to a pair `(updates, new_last_token)`, where `updates` is
-            a list of `(token, row)` entries and `new_last_token` is the new
-            position in stream.
+            A triplet `(updates, new_last_token, limited)`, where `updates` is
+            a list of `(token, row)` entries, `new_last_token` is the new
+            position in stream, and `limited` is whether there are more updates
+            to fetch.
         """
 
-        if from_token in ("NOW", "now"):
-            return [], self.current_token()
-
-        current_token = self.current_token()
-
         from_token = int(from_token)
 
-        if from_token == current_token:
-            return [], current_token
+        if from_token == upto_token:
+            return [], upto_token, False
 
-        rows = await self.update_function(
-            from_token, current_token, limit=MAX_EVENTS_BEHIND + 1
+        updates, upto_token, limited = await self.update_function(
+            from_token, upto_token, limit=limit,
         )
-
-        # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
-        rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
-
-        updates = [(row[0], row[1:]) for row in rows]
-
-        # check we didn't get more rows than the limit.
-        # doing it like this allows the update_function to be a generator.
-        if len(updates) >= MAX_EVENTS_BEHIND:
-            raise Exception("stream %s has fallen behind" % (self.NAME))
-
-        # The update function didn't hit the limit, so we must have got all
-        # the updates to `current_token`, and can return that as our new
-        # stream position.
-        return updates, current_token
+        return updates, upto_token, limited
 
     def current_token(self):
         """Gets the current token of the underlying streams. Should be provided
@@ -141,6 +136,48 @@ class Stream(object):
         raise NotImplementedError()
 
 
+def db_query_to_update_function(
+    query_function: Callable[[Token, Token, int], Awaitable[List[tuple]]]
+) -> Callable[[Token, Token, int], Awaitable[Tuple[List[StreamRow], Token, bool]]]:
+    """Wraps a db query function which returns a list of rows to make it
+    suitable for use as an `update_function` for the Stream class
+    """
+
+    async def update_function(from_token, upto_token, limit):
+        rows = await query_function(from_token, upto_token, limit)
+        updates = [(row[0], row[1:]) for row in rows]
+        limited = False
+        if len(updates) == limit:
+            upto_token = rows[-1][0]
+            limited = True
+
+        return updates, upto_token, limited
+
+    return update_function
+
+
+def make_http_update_function(
+    hs, stream_name: str
+) -> Callable[[Token, Token, Token], Awaitable[Tuple[List[StreamRow], Token, bool]]]:
+    """Makes a suitable function for use as an `update_function` that queries
+    the master process for updates.
+    """
+
+    client = ReplicationGetStreamUpdates.make_client(hs)
+
+    async def update_function(
+        from_token: int, upto_token: int, limit: int
+    ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
+        return await client(
+            stream_name=stream_name,
+            from_token=from_token,
+            upto_token=upto_token,
+            limit=limit,
+        )
+
+    return update_function
+
+
 class BackfillStream(Stream):
     """We fetched some old events and either we had never seen that event before
     or it went from being an outlier to not.
@@ -164,7 +201,7 @@ class BackfillStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
         self.current_token = store.get_current_backfill_token  # type: ignore
-        self.update_function = store.get_all_new_backfill_event_rows  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_new_backfill_event_rows)  # type: ignore
 
         super(BackfillStream, self).__init__(hs)
 
@@ -190,8 +227,15 @@ class PresenceStream(Stream):
         store = hs.get_datastore()
         presence_handler = hs.get_presence_handler()
 
+        self._is_worker = hs.config.worker_app is not None
+
         self.current_token = store.get_current_presence_token  # type: ignore
-        self.update_function = presence_handler.get_all_presence_updates  # type: ignore
+
+        if hs.config.worker_app is None:
+            self.update_function = db_query_to_update_function(presence_handler.get_all_presence_updates)  # type: ignore
+        else:
+            # Query master process
+            self.update_function = make_http_update_function(hs, self.NAME)  # type: ignore
 
         super(PresenceStream, self).__init__(hs)
 
@@ -208,7 +252,12 @@ class TypingStream(Stream):
         typing_handler = hs.get_typing_handler()
 
         self.current_token = typing_handler.get_current_token  # type: ignore
-        self.update_function = typing_handler.get_all_typing_updates  # type: ignore
+
+        if hs.config.worker_app is None:
+            self.update_function = db_query_to_update_function(typing_handler.get_all_typing_updates)  # type: ignore
+        else:
+            # Query master process
+            self.update_function = make_http_update_function(hs, self.NAME)  # type: ignore
 
         super(TypingStream, self).__init__(hs)
 
@@ -232,7 +281,7 @@ class ReceiptsStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_max_receipt_stream_id  # type: ignore
-        self.update_function = store.get_all_updated_receipts  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_updated_receipts)  # type: ignore
 
         super(ReceiptsStream, self).__init__(hs)
 
@@ -256,7 +305,13 @@ class PushRulesStream(Stream):
 
     async def update_function(self, from_token, to_token, limit):
         rows = await self.store.get_all_push_rule_updates(from_token, to_token, limit)
-        return [(row[0], row[2]) for row in rows]
+
+        limited = False
+        if len(rows) == limit:
+            to_token = rows[-1][0]
+            limited = True
+
+        return [(row[0], (row[2],)) for row in rows], to_token, limited
 
 
 class PushersStream(Stream):
@@ -275,7 +330,7 @@ class PushersStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_pushers_stream_token  # type: ignore
-        self.update_function = store.get_all_updated_pushers_rows  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_updated_pushers_rows)  # type: ignore
 
         super(PushersStream, self).__init__(hs)
 
@@ -307,7 +362,7 @@ class CachesStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_cache_stream_token  # type: ignore
-        self.update_function = store.get_all_updated_caches  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_updated_caches)  # type: ignore
 
         super(CachesStream, self).__init__(hs)
 
@@ -333,7 +388,7 @@ class PublicRoomsStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_current_public_room_stream_id  # type: ignore
-        self.update_function = store.get_all_new_public_rooms  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_new_public_rooms)  # type: ignore
 
         super(PublicRoomsStream, self).__init__(hs)
 
@@ -354,7 +409,7 @@ class DeviceListsStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_device_stream_token  # type: ignore
-        self.update_function = store.get_all_device_list_changes_for_remotes  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_device_list_changes_for_remotes)  # type: ignore
 
         super(DeviceListsStream, self).__init__(hs)
 
@@ -372,7 +427,7 @@ class ToDeviceStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_to_device_stream_token  # type: ignore
-        self.update_function = store.get_all_new_device_messages  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_new_device_messages)  # type: ignore
 
         super(ToDeviceStream, self).__init__(hs)
 
@@ -392,7 +447,7 @@ class TagAccountDataStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_max_account_data_stream_id  # type: ignore
-        self.update_function = store.get_all_updated_tags  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_updated_tags)  # type: ignore
 
         super(TagAccountDataStream, self).__init__(hs)
 
@@ -412,10 +467,11 @@ class AccountDataStream(Stream):
         self.store = hs.get_datastore()
 
         self.current_token = self.store.get_max_account_data_stream_id  # type: ignore
+        self.update_function = db_query_to_update_function(self._update_function)  # type: ignore
 
         super(AccountDataStream, self).__init__(hs)
 
-    async def update_function(self, from_token, to_token, limit):
+    async def _update_function(self, from_token, to_token, limit):
         global_results, room_results = await self.store.get_all_updated_account_data(
             from_token, from_token, to_token, limit
         )
@@ -442,7 +498,7 @@ class GroupServerStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_group_stream_token  # type: ignore
-        self.update_function = store.get_all_groups_changes  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_groups_changes)  # type: ignore
 
         super(GroupServerStream, self).__init__(hs)
 
@@ -460,6 +516,6 @@ class UserSignatureStream(Stream):
         store = hs.get_datastore()
 
         self.current_token = store.get_device_stream_token  # type: ignore
-        self.update_function = store.get_all_user_signature_changes_for_remotes  # type: ignore
+        self.update_function = db_query_to_update_function(store.get_all_user_signature_changes_for_remotes)  # type: ignore
 
         super(UserSignatureStream, self).__init__(hs)
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index b3afabb8cd..c6a595629f 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -19,7 +19,7 @@ from typing import Tuple, Type
 
 import attr
 
-from ._base import Stream
+from ._base import Stream, db_query_to_update_function
 
 
 """Handling of the 'events' replication stream
@@ -117,10 +117,11 @@ class EventsStream(Stream):
     def __init__(self, hs):
         self._store = hs.get_datastore()
         self.current_token = self._store.get_current_events_token  # type: ignore
+        self.update_function = db_query_to_update_function(self._update_function)  # type: ignore
 
         super(EventsStream, self).__init__(hs)
 
-    async def update_function(self, from_token, current_token, limit=None):
+    async def _update_function(self, from_token, current_token, limit=None):
         event_rows = await self._store.get_all_new_forward_event_rows(
             from_token, current_token, limit
         )
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
index f5f9336430..48c1d45718 100644
--- a/synapse/replication/tcp/streams/federation.py
+++ b/synapse/replication/tcp/streams/federation.py
@@ -15,7 +15,9 @@
 # limitations under the License.
 from collections import namedtuple
 
-from ._base import Stream
+from twisted.internet import defer
+
+from synapse.replication.tcp.streams._base import Stream, db_query_to_update_function
 
 
 class FederationStream(Stream):
@@ -33,11 +35,18 @@ class FederationStream(Stream):
 
     NAME = "federation"
     ROW_TYPE = FederationStreamRow
+    _QUERY_MASTER = True
 
     def __init__(self, hs):
-        federation_sender = hs.get_federation_sender()
-
-        self.current_token = federation_sender.get_current_token  # type: ignore
-        self.update_function = federation_sender.get_replication_rows  # type: ignore
+        # Not all synapse instances will have a federation sender instance,
+        # whether that's a `FederationSender` or a `FederationRemoteSendQueue`,
+        # so we stub the stream out when that is the case.
+        if hs.config.worker_app is None or hs.should_send_federation():
+            federation_sender = hs.get_federation_sender()
+            self.current_token = federation_sender.get_current_token  # type: ignore
+            self.update_function = db_query_to_update_function(federation_sender.get_replication_rows)  # type: ignore
+        else:
+            self.current_token = lambda: 0  # type: ignore
+            self.update_function = lambda from_token, upto_token, limit: defer.succeed(([], upto_token, bool))  # type: ignore
 
         super(FederationStream, self).__init__(hs)
diff --git a/synapse/server.py b/synapse/server.py
index 1b980371de..9426eb1672 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -85,6 +85,7 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient
 from synapse.notifier import Notifier
 from synapse.push.action_generator import ActionGenerator
 from synapse.push.pusherpool import PusherPool
+from synapse.replication.tcp.resource import ReplicationStreamer
 from synapse.rest.media.v1.media_repository import (
     MediaRepository,
     MediaRepositoryResource,
@@ -199,6 +200,7 @@ class HomeServer(object):
         "saml_handler",
         "event_client_serializer",
         "storage",
+        "replication_streamer",
     ]
 
     REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
@@ -536,6 +538,9 @@ class HomeServer(object):
     def build_storage(self) -> Storage:
         return Storage(self, self.datastores)
 
+    def build_replication_streamer(self) -> ReplicationStreamer:
+        return ReplicationStreamer(self)
+
     def remove_pusher(self, app_id, push_key, user_id):
         return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py
index d4c44dcc75..4dc5da3fe8 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/data_stores/main/cache.py
@@ -32,7 +32,29 @@ logger = logging.getLogger(__name__)
 CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
 
 
-class CacheInvalidationStore(SQLBaseStore):
+class CacheInvalidationWorkerStore(SQLBaseStore):
+    def get_all_updated_caches(self, last_id, current_id, limit):
+        if last_id == current_id:
+            return defer.succeed([])
+
+        def get_all_updated_caches_txn(txn):
+            # We purposefully don't bound by the current token, as we want to
+            # send across cache invalidations as quickly as possible. Cache
+            # invalidations are idempotent, so duplicates are fine.
+            sql = (
+                "SELECT stream_id, cache_func, keys, invalidation_ts"
+                " FROM cache_invalidation_stream"
+                " WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?"
+            )
+            txn.execute(sql, (last_id, limit))
+            return txn.fetchall()
+
+        return self.db.runInteraction(
+            "get_all_updated_caches", get_all_updated_caches_txn
+        )
+
+
+class CacheInvalidationStore(CacheInvalidationWorkerStore):
     async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
         """Invalidates the cache and adds it to the cache stream so slaves
         will know to invalidate their caches.
@@ -145,26 +167,6 @@ class CacheInvalidationStore(SQLBaseStore):
                 },
             )
 
-    def get_all_updated_caches(self, last_id, current_id, limit):
-        if last_id == current_id:
-            return defer.succeed([])
-
-        def get_all_updated_caches_txn(txn):
-            # We purposefully don't bound by the current token, as we want to
-            # send across cache invalidations as quickly as possible. Cache
-            # invalidations are idempotent, so duplicates are fine.
-            sql = (
-                "SELECT stream_id, cache_func, keys, invalidation_ts"
-                " FROM cache_invalidation_stream"
-                " WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?"
-            )
-            txn.execute(sql, (last_id, limit))
-            return txn.fetchall()
-
-        return self.db.runInteraction(
-            "get_all_updated_caches", get_all_updated_caches_txn
-        )
-
     def get_cache_stream_token(self):
         if self._cache_id_gen:
             return self._cache_id_gen.get_current_token()
diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py
index 0613b49f4a..9a1178fb39 100644
--- a/synapse/storage/data_stores/main/deviceinbox.py
+++ b/synapse/storage/data_stores/main/deviceinbox.py
@@ -207,6 +207,50 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             "delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn
         )
 
+    def get_all_new_device_messages(self, last_pos, current_pos, limit):
+        """
+        Args:
+            last_pos(int):
+            current_pos(int):
+            limit(int):
+        Returns:
+            A deferred list of rows from the device inbox
+        """
+        if last_pos == current_pos:
+            return defer.succeed([])
+
+        def get_all_new_device_messages_txn(txn):
+            # We limit like this as we might have multiple rows per stream_id, and
+            # we want to make sure we always get all entries for any stream_id
+            # we return.
+            upper_pos = min(current_pos, last_pos + limit)
+            sql = (
+                "SELECT max(stream_id), user_id"
+                " FROM device_inbox"
+                " WHERE ? < stream_id AND stream_id <= ?"
+                " GROUP BY user_id"
+            )
+            txn.execute(sql, (last_pos, upper_pos))
+            rows = txn.fetchall()
+
+            sql = (
+                "SELECT max(stream_id), destination"
+                " FROM device_federation_outbox"
+                " WHERE ? < stream_id AND stream_id <= ?"
+                " GROUP BY destination"
+            )
+            txn.execute(sql, (last_pos, upper_pos))
+            rows.extend(txn)
+
+            # Order by ascending stream ordering
+            rows.sort()
+
+            return rows
+
+        return self.db.runInteraction(
+            "get_all_new_device_messages", get_all_new_device_messages_txn
+        )
+
 
 class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
     DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
@@ -411,47 +455,3 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
                 rows.append((user_id, device_id, stream_id, message_json))
 
         txn.executemany(sql, rows)
-
-    def get_all_new_device_messages(self, last_pos, current_pos, limit):
-        """
-        Args:
-            last_pos(int):
-            current_pos(int):
-            limit(int):
-        Returns:
-            A deferred list of rows from the device inbox
-        """
-        if last_pos == current_pos:
-            return defer.succeed([])
-
-        def get_all_new_device_messages_txn(txn):
-            # We limit like this as we might have multiple rows per stream_id, and
-            # we want to make sure we always get all entries for any stream_id
-            # we return.
-            upper_pos = min(current_pos, last_pos + limit)
-            sql = (
-                "SELECT max(stream_id), user_id"
-                " FROM device_inbox"
-                " WHERE ? < stream_id AND stream_id <= ?"
-                " GROUP BY user_id"
-            )
-            txn.execute(sql, (last_pos, upper_pos))
-            rows = txn.fetchall()
-
-            sql = (
-                "SELECT max(stream_id), destination"
-                " FROM device_federation_outbox"
-                " WHERE ? < stream_id AND stream_id <= ?"
-                " GROUP BY destination"
-            )
-            txn.execute(sql, (last_pos, upper_pos))
-            rows.extend(txn)
-
-            # Order by ascending stream ordering
-            rows.sort()
-
-            return rows
-
-        return self.db.runInteraction(
-            "get_all_new_device_messages", get_all_new_device_messages_txn
-        )
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index d593ef47b8..e71c23541d 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -1267,104 +1267,6 @@ class EventsStore(
         ret = yield self.db.runInteraction("count_daily_active_rooms", _count)
         return ret
 
-    def get_current_backfill_token(self):
-        """The current minimum token that backfilled events have reached"""
-        return -self._backfill_id_gen.get_current_token()
-
-    def get_current_events_token(self):
-        """The current maximum token that events have reached"""
-        return self._stream_id_gen.get_current_token()
-
-    def get_all_new_forward_event_rows(self, last_id, current_id, limit):
-        if last_id == current_id:
-            return defer.succeed([])
-
-        def get_all_new_forward_event_rows(txn):
-            sql = (
-                "SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts, relates_to_id"
-                " FROM events AS e"
-                " LEFT JOIN redactions USING (event_id)"
-                " LEFT JOIN state_events USING (event_id)"
-                " LEFT JOIN event_relations USING (event_id)"
-                " WHERE ? < stream_ordering AND stream_ordering <= ?"
-                " ORDER BY stream_ordering ASC"
-                " LIMIT ?"
-            )
-            txn.execute(sql, (last_id, current_id, limit))
-            new_event_updates = txn.fetchall()
-
-            if len(new_event_updates) == limit:
-                upper_bound = new_event_updates[-1][0]
-            else:
-                upper_bound = current_id
-
-            sql = (
-                "SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts, relates_to_id"
-                " FROM events AS e"
-                " INNER JOIN ex_outlier_stream USING (event_id)"
-                " LEFT JOIN redactions USING (event_id)"
-                " LEFT JOIN state_events USING (event_id)"
-                " LEFT JOIN event_relations USING (event_id)"
-                " WHERE ? < event_stream_ordering"
-                " AND event_stream_ordering <= ?"
-                " ORDER BY event_stream_ordering DESC"
-            )
-            txn.execute(sql, (last_id, upper_bound))
-            new_event_updates.extend(txn)
-
-            return new_event_updates
-
-        return self.db.runInteraction(
-            "get_all_new_forward_event_rows", get_all_new_forward_event_rows
-        )
-
-    def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
-        if last_id == current_id:
-            return defer.succeed([])
-
-        def get_all_new_backfill_event_rows(txn):
-            sql = (
-                "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts, relates_to_id"
-                " FROM events AS e"
-                " LEFT JOIN redactions USING (event_id)"
-                " LEFT JOIN state_events USING (event_id)"
-                " LEFT JOIN event_relations USING (event_id)"
-                " WHERE ? > stream_ordering AND stream_ordering >= ?"
-                " ORDER BY stream_ordering ASC"
-                " LIMIT ?"
-            )
-            txn.execute(sql, (-last_id, -current_id, limit))
-            new_event_updates = txn.fetchall()
-
-            if len(new_event_updates) == limit:
-                upper_bound = new_event_updates[-1][0]
-            else:
-                upper_bound = current_id
-
-            sql = (
-                "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts, relates_to_id"
-                " FROM events AS e"
-                " INNER JOIN ex_outlier_stream USING (event_id)"
-                " LEFT JOIN redactions USING (event_id)"
-                " LEFT JOIN state_events USING (event_id)"
-                " LEFT JOIN event_relations USING (event_id)"
-                " WHERE ? > event_stream_ordering"
-                " AND event_stream_ordering >= ?"
-                " ORDER BY event_stream_ordering DESC"
-            )
-            txn.execute(sql, (-last_id, -upper_bound))
-            new_event_updates.extend(txn.fetchall())
-
-            return new_event_updates
-
-        return self.db.runInteraction(
-            "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
-        )
-
     @cached(num_args=5, max_entries=10)
     def get_all_new_events(
         self,
@@ -1850,22 +1752,6 @@ class EventsStore(
 
         return (int(res["topological_ordering"]), int(res["stream_ordering"]))
 
-    def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
-        def get_all_updated_current_state_deltas_txn(txn):
-            sql = """
-                SELECT stream_id, room_id, type, state_key, event_id
-                FROM current_state_delta_stream
-                WHERE ? < stream_id AND stream_id <= ?
-                ORDER BY stream_id ASC LIMIT ?
-            """
-            txn.execute(sql, (from_token, to_token, limit))
-            return txn.fetchall()
-
-        return self.db.runInteraction(
-            "get_all_updated_current_state_deltas",
-            get_all_updated_current_state_deltas_txn,
-        )
-
     def insert_labels_for_event_txn(
         self, txn, event_id, labels, room_id, topological_ordering
     ):
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 3013f49d32..16ea8948b1 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -963,3 +963,117 @@ class EventsWorkerStore(SQLBaseStore):
         complexity_v1 = round(state_events / 500, 2)
 
         return {"v1": complexity_v1}
+
+    def get_current_backfill_token(self):
+        """The current minimum token that backfilled events have reached"""
+        return -self._backfill_id_gen.get_current_token()
+
+    def get_current_events_token(self):
+        """The current maximum token that events have reached"""
+        return self._stream_id_gen.get_current_token()
+
+    def get_all_new_forward_event_rows(self, last_id, current_id, limit):
+        if last_id == current_id:
+            return defer.succeed([])
+
+        def get_all_new_forward_event_rows(txn):
+            sql = (
+                "SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
+                " state_key, redacts, relates_to_id"
+                " FROM events AS e"
+                " LEFT JOIN redactions USING (event_id)"
+                " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
+                " WHERE ? < stream_ordering AND stream_ordering <= ?"
+                " ORDER BY stream_ordering ASC"
+                " LIMIT ?"
+            )
+            txn.execute(sql, (last_id, current_id, limit))
+            new_event_updates = txn.fetchall()
+
+            if len(new_event_updates) == limit:
+                upper_bound = new_event_updates[-1][0]
+            else:
+                upper_bound = current_id
+
+            sql = (
+                "SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
+                " state_key, redacts, relates_to_id"
+                " FROM events AS e"
+                " INNER JOIN ex_outlier_stream USING (event_id)"
+                " LEFT JOIN redactions USING (event_id)"
+                " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
+                " WHERE ? < event_stream_ordering"
+                " AND event_stream_ordering <= ?"
+                " ORDER BY event_stream_ordering DESC"
+            )
+            txn.execute(sql, (last_id, upper_bound))
+            new_event_updates.extend(txn)
+
+            return new_event_updates
+
+        return self.db.runInteraction(
+            "get_all_new_forward_event_rows", get_all_new_forward_event_rows
+        )
+
+    def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
+        if last_id == current_id:
+            return defer.succeed([])
+
+        def get_all_new_backfill_event_rows(txn):
+            sql = (
+                "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
+                " state_key, redacts, relates_to_id"
+                " FROM events AS e"
+                " LEFT JOIN redactions USING (event_id)"
+                " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
+                " WHERE ? > stream_ordering AND stream_ordering >= ?"
+                " ORDER BY stream_ordering ASC"
+                " LIMIT ?"
+            )
+            txn.execute(sql, (-last_id, -current_id, limit))
+            new_event_updates = txn.fetchall()
+
+            if len(new_event_updates) == limit:
+                upper_bound = new_event_updates[-1][0]
+            else:
+                upper_bound = current_id
+
+            sql = (
+                "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
+                " state_key, redacts, relates_to_id"
+                " FROM events AS e"
+                " INNER JOIN ex_outlier_stream USING (event_id)"
+                " LEFT JOIN redactions USING (event_id)"
+                " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
+                " WHERE ? > event_stream_ordering"
+                " AND event_stream_ordering >= ?"
+                " ORDER BY event_stream_ordering DESC"
+            )
+            txn.execute(sql, (-last_id, -upper_bound))
+            new_event_updates.extend(txn.fetchall())
+
+            return new_event_updates
+
+        return self.db.runInteraction(
+            "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
+        )
+
+    def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
+        def get_all_updated_current_state_deltas_txn(txn):
+            sql = """
+                SELECT stream_id, room_id, type, state_key, event_id
+                FROM current_state_delta_stream
+                WHERE ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC LIMIT ?
+            """
+            txn.execute(sql, (from_token, to_token, limit))
+            return txn.fetchall()
+
+        return self.db.runInteraction(
+            "get_all_updated_current_state_deltas",
+            get_all_updated_current_state_deltas_txn,
+        )
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index e6c10c6316..aaebe427d3 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -732,6 +732,26 @@ class RoomWorkerStore(SQLBaseStore):
 
         return total_media_quarantined
 
+    def get_all_new_public_rooms(self, prev_id, current_id, limit):
+        def get_all_new_public_rooms(txn):
+            sql = """
+                SELECT stream_id, room_id, visibility, appservice_id, network_id
+                FROM public_room_list_stream
+                WHERE stream_id > ? AND stream_id <= ?
+                ORDER BY stream_id ASC
+                LIMIT ?
+            """
+
+            txn.execute(sql, (prev_id, current_id, limit))
+            return txn.fetchall()
+
+        if prev_id == current_id:
+            return defer.succeed([])
+
+        return self.db.runInteraction(
+            "get_all_new_public_rooms", get_all_new_public_rooms
+        )
+
 
 class RoomBackgroundUpdateStore(SQLBaseStore):
     REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@@ -1249,26 +1269,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
     def get_current_public_room_stream_id(self):
         return self._public_room_id_gen.get_current_token()
 
-    def get_all_new_public_rooms(self, prev_id, current_id, limit):
-        def get_all_new_public_rooms(txn):
-            sql = """
-                SELECT stream_id, room_id, visibility, appservice_id, network_id
-                FROM public_room_list_stream
-                WHERE stream_id > ? AND stream_id <= ?
-                ORDER BY stream_id ASC
-                LIMIT ?
-            """
-
-            txn.execute(sql, (prev_id, current_id, limit))
-            return txn.fetchall()
-
-        if prev_id == current_id:
-            return defer.succeed([])
-
-        return self.db.runInteraction(
-            "get_all_new_public_rooms", get_all_new_public_rooms
-        )
-
     @defer.inlineCallbacks
     def block_room(self, room_id, user_id):
         """Marks the room as blocked. Can be called multiple times.
diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py
index e96ad4ca4e..a755fe2879 100644
--- a/tests/replication/tcp/streams/_base.py
+++ b/tests/replication/tcp/streams/_base.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 from mock import Mock
 
 from synapse.replication.tcp.commands import ReplicateCommand
@@ -29,19 +30,37 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
         # build a replication server
         server_factory = ReplicationStreamProtocolFactory(self.hs)
         self.streamer = server_factory.streamer
-        server = server_factory.buildProtocol(None)
+        self.server = server_factory.buildProtocol(None)
 
-        # build a replication client, with a dummy handler
-        handler_factory = Mock()
-        self.test_handler = TestReplicationClientHandler()
-        self.test_handler.factory = handler_factory
+        self.test_handler = Mock(wraps=TestReplicationClientHandler())
         self.client = ClientReplicationStreamProtocol(
-            "client", "test", clock, self.test_handler
+            hs, "client", "test", clock, self.test_handler,
         )
 
-        # wire them together
-        self.client.makeConnection(FakeTransport(server, reactor))
-        server.makeConnection(FakeTransport(self.client, reactor))
+        self._client_transport = None
+        self._server_transport = None
+
+    def reconnect(self):
+        if self._client_transport:
+            self.client.close()
+
+        if self._server_transport:
+            self.server.close()
+
+        self._client_transport = FakeTransport(self.server, self.reactor)
+        self.client.makeConnection(self._client_transport)
+
+        self._server_transport = FakeTransport(self.client, self.reactor)
+        self.server.makeConnection(self._server_transport)
+
+    def disconnect(self):
+        if self._client_transport:
+            self._client_transport = None
+            self.client.close()
+
+        if self._server_transport:
+            self._server_transport = None
+            self.server.close()
 
     def replicate(self):
         """Tell the master side of replication that something has happened, and then
@@ -50,19 +69,24 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
         self.streamer.on_notifier_poke()
         self.pump(0.1)
 
-    def replicate_stream(self, stream, token="NOW"):
+    def replicate_stream(self):
         """Make the client end a REPLICATE command to set up a subscription to a stream"""
-        self.client.send_command(ReplicateCommand(stream, token))
+        self.client.send_command(ReplicateCommand())
 
 
 class TestReplicationClientHandler(object):
     """Drop-in for ReplicationClientHandler which just collects RDATA rows"""
 
     def __init__(self):
-        self.received_rdata_rows = []
+        self.streams = set()
+        self._received_rdata_rows = []
 
     def get_streams_to_replicate(self):
-        return {}
+        positions = {s: 0 for s in self.streams}
+        for stream, token, _ in self._received_rdata_rows:
+            if stream in self.streams:
+                positions[stream] = max(token, positions.get(stream, 0))
+        return positions
 
     def get_currently_syncing_users(self):
         return []
@@ -73,6 +97,9 @@ class TestReplicationClientHandler(object):
     def finished_connecting(self):
         pass
 
+    async def on_position(self, stream_name, token):
+        """Called when we get new position data."""
+
     async def on_rdata(self, stream_name, token, rows):
         for r in rows:
-            self.received_rdata_rows.append((stream_name, token, r))
+            self._received_rdata_rows.append((stream_name, token, r))
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
index fa2493cad6..0ec0825a0e 100644
--- a/tests/replication/tcp/streams/test_receipts.py
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -17,30 +17,64 @@ from synapse.replication.tcp.streams._base import ReceiptsStream
 from tests.replication.tcp.streams._base import BaseStreamTestCase
 
 USER_ID = "@feeling:blue"
-ROOM_ID = "!room:blue"
-EVENT_ID = "$event:blue"
 
 
 class ReceiptsStreamTestCase(BaseStreamTestCase):
     def test_receipt(self):
+        self.reconnect()
+
         # make the client subscribe to the receipts stream
-        self.replicate_stream("receipts", "NOW")
+        self.replicate_stream()
+        self.test_handler.streams.add("receipts")
 
         # tell the master to send a new receipt
         self.get_success(
             self.hs.get_datastore().insert_receipt(
-                ROOM_ID, "m.read", USER_ID, [EVENT_ID], {"a": 1}
+                "!room:blue", "m.read", USER_ID, ["$event:blue"], {"a": 1}
             )
         )
         self.replicate()
 
         # there should be one RDATA command
-        rdata_rows = self.test_handler.received_rdata_rows
+        self.test_handler.on_rdata.assert_called_once()
+        stream_name, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
+        self.assertEqual(stream_name, "receipts")
         self.assertEqual(1, len(rdata_rows))
-        self.assertEqual(rdata_rows[0][0], "receipts")
-        row = rdata_rows[0][2]  # type: ReceiptsStream.ReceiptsStreamRow
-        self.assertEqual(ROOM_ID, row.room_id)
+        row = rdata_rows[0]  # type: ReceiptsStream.ReceiptsStreamRow
+        self.assertEqual("!room:blue", row.room_id)
         self.assertEqual("m.read", row.receipt_type)
         self.assertEqual(USER_ID, row.user_id)
-        self.assertEqual(EVENT_ID, row.event_id)
+        self.assertEqual("$event:blue", row.event_id)
         self.assertEqual({"a": 1}, row.data)
+
+        # Now let's disconnect and insert some data.
+        self.disconnect()
+
+        self.test_handler.on_rdata.reset_mock()
+
+        self.get_success(
+            self.hs.get_datastore().insert_receipt(
+                "!room2:blue", "m.read", USER_ID, ["$event2:foo"], {"a": 2}
+            )
+        )
+        self.replicate()
+
+        # Nothing should have happened as we are disconnected
+        self.test_handler.on_rdata.assert_not_called()
+
+        self.reconnect()
+        self.pump(0.1)
+
+        # We should now have caught up and get the missing data
+        self.test_handler.on_rdata.assert_called_once()
+        stream_name, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
+        self.assertEqual(stream_name, "receipts")
+        self.assertEqual(token, 3)
+        self.assertEqual(1, len(rdata_rows))
+
+        row = rdata_rows[0]  # type: ReceiptsStream.ReceiptsStreamRow
+        self.assertEqual("!room2:blue", row.room_id)
+        self.assertEqual("m.read", row.receipt_type)
+        self.assertEqual(USER_ID, row.user_id)
+        self.assertEqual("$event2:foo", row.event_id)
+        self.assertEqual({"a": 2}, row.data)
-- 
cgit 1.4.1


From 6ca5e56fd12bbccb6b3ab43ed7c0281e4822274a Mon Sep 17 00:00:00 2001
From: Aaron Raimist 
Date: Wed, 25 Mar 2020 12:49:34 -0500
Subject: Remove unused captcha_bypass_secret option (#7137)

Signed-off-by: Aaron Raimist 
---
 changelog.d/7137.removal  | 1 +
 docs/sample_config.yaml   | 4 ----
 synapse/config/captcha.py | 5 -----
 3 files changed, 1 insertion(+), 9 deletions(-)
 create mode 100644 changelog.d/7137.removal

(limited to 'changelog.d')

diff --git a/changelog.d/7137.removal b/changelog.d/7137.removal
new file mode 100644
index 0000000000..75266a06bb
--- /dev/null
+++ b/changelog.d/7137.removal
@@ -0,0 +1 @@
+Remove nonfunctional `captcha_bypass_secret` option from `homeserver.yaml`.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 276e43b732..2ef83646b3 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -872,10 +872,6 @@ media_store_path: "DATADIR/media_store"
 #
 #enable_registration_captcha: false
 
-# A secret key used to bypass the captcha test entirely.
-#
-#captcha_bypass_secret: "YOUR_SECRET_HERE"
-
 # The API endpoint to use for verifying m.login.recaptcha responses.
 #
 #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index f0171bb5b2..56c87fa296 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -24,7 +24,6 @@ class CaptchaConfig(Config):
         self.enable_registration_captcha = config.get(
             "enable_registration_captcha", False
         )
-        self.captcha_bypass_secret = config.get("captcha_bypass_secret")
         self.recaptcha_siteverify_api = config.get(
             "recaptcha_siteverify_api",
             "https://www.recaptcha.net/recaptcha/api/siteverify",
@@ -49,10 +48,6 @@ class CaptchaConfig(Config):
         #
         #enable_registration_captcha: false
 
-        # A secret key used to bypass the captcha test entirely.
-        #
-        #captcha_bypass_secret: "YOUR_SECRET_HERE"
-
         # The API endpoint to use for verifying m.login.recaptcha responses.
         #
         #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
-- 
cgit 1.4.1


From 1c1242acba9694a3a4b1eb3b14ec0bac11ee4ff8 Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 26 Mar 2020 07:39:34 -0400
Subject: Validate that the session is not modified during UI-Auth (#7068)

---
 changelog.d/7068.bugfix                  |  1 +
 synapse/handlers/auth.py                 | 37 +++++++++++++++--
 synapse/rest/client/v2_alpha/account.py  | 11 ++++--
 synapse/rest/client/v2_alpha/devices.py  |  4 +-
 synapse/rest/client/v2_alpha/keys.py     |  2 +-
 synapse/rest/client/v2_alpha/register.py |  5 ++-
 tests/rest/client/v2_alpha/test_auth.py  | 68 +++++++++++++++++++++++++++++++-
 tests/test_terms_auth.py                 |  3 +-
 8 files changed, 117 insertions(+), 14 deletions(-)
 create mode 100644 changelog.d/7068.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7068.bugfix b/changelog.d/7068.bugfix
new file mode 100644
index 0000000000..d1693a7f22
--- /dev/null
+++ b/changelog.d/7068.bugfix
@@ -0,0 +1 @@
+Ensure that a user inteactive authentication session is tied to a single request.
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 7860f9625e..2ce1425dfa 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -125,7 +125,11 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def validate_user_via_ui_auth(
-        self, requester: Requester, request_body: Dict[str, Any], clientip: str
+        self,
+        requester: Requester,
+        request: SynapseRequest,
+        request_body: Dict[str, Any],
+        clientip: str,
     ):
         """
         Checks that the user is who they claim to be, via a UI auth.
@@ -137,6 +141,8 @@ class AuthHandler(BaseHandler):
         Args:
             requester: The user, as given by the access token
 
+            request: The request sent by the client.
+
             request_body: The body of the request sent by the client
 
             clientip: The IP address of the client.
@@ -172,7 +178,9 @@ class AuthHandler(BaseHandler):
         flows = [[login_type] for login_type in self._supported_login_types]
 
         try:
-            result, params, _ = yield self.check_auth(flows, request_body, clientip)
+            result, params, _ = yield self.check_auth(
+                flows, request, request_body, clientip
+            )
         except LoginError:
             # Update the ratelimite to say we failed (`can_do_action` doesn't raise).
             self._failed_uia_attempts_ratelimiter.can_do_action(
@@ -211,7 +219,11 @@ class AuthHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def check_auth(
-        self, flows: List[List[str]], clientdict: Dict[str, Any], clientip: str
+        self,
+        flows: List[List[str]],
+        request: SynapseRequest,
+        clientdict: Dict[str, Any],
+        clientip: str,
     ):
         """
         Takes a dictionary sent by the client in the login / registration
@@ -231,6 +243,8 @@ class AuthHandler(BaseHandler):
                    strings representing auth-types. At least one full
                    flow must be completed in order for auth to be successful.
 
+            request: The request sent by the client.
+
             clientdict: The dictionary from the client root level, not the
                         'auth' key: this method prompts for auth if none is sent.
 
@@ -270,13 +284,27 @@ class AuthHandler(BaseHandler):
             # email auth link on there). It's probably too open to abuse
             # because it lets unauthenticated clients store arbitrary objects
             # on a homeserver.
-            # Revisit: Assumimg the REST APIs do sensible validation, the data
+            # Revisit: Assuming the REST APIs do sensible validation, the data
             # isn't arbintrary.
             session["clientdict"] = clientdict
             self._save_session(session)
         elif "clientdict" in session:
             clientdict = session["clientdict"]
 
+        # Ensure that the queried operation does not vary between stages of
+        # the UI authentication session. This is done by generating a stable
+        # comparator based on the URI, method, and body (minus the auth dict)
+        # and storing it during the initial query. Subsequent queries ensure
+        # that this comparator has not changed.
+        comparator = (request.uri, request.method, clientdict)
+        if "ui_auth" not in session:
+            session["ui_auth"] = comparator
+        elif session["ui_auth"] != comparator:
+            raise SynapseError(
+                403,
+                "Requested operation has changed during the UI authentication session.",
+            )
+
         if not authdict:
             raise InteractiveAuthIncompleteError(
                 self._auth_dict_for_flows(flows, session)
@@ -322,6 +350,7 @@ class AuthHandler(BaseHandler):
                     creds,
                     list(clientdict),
                 )
+
                 return creds, clientdict, session["id"]
 
         ret = self._auth_dict_for_flows(flows, session)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 631cc74cb4..b1249b664c 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -234,13 +234,16 @@ class PasswordRestServlet(RestServlet):
         if self.auth.has_access_token(request):
             requester = await self.auth.get_user_by_req(request)
             params = await self.auth_handler.validate_user_via_ui_auth(
-                requester, body, self.hs.get_ip_from_request(request)
+                requester, request, body, self.hs.get_ip_from_request(request),
             )
             user_id = requester.user.to_string()
         else:
             requester = None
             result, params, _ = await self.auth_handler.check_auth(
-                [[LoginType.EMAIL_IDENTITY]], body, self.hs.get_ip_from_request(request)
+                [[LoginType.EMAIL_IDENTITY]],
+                request,
+                body,
+                self.hs.get_ip_from_request(request),
             )
 
             if LoginType.EMAIL_IDENTITY in result:
@@ -308,7 +311,7 @@ class DeactivateAccountRestServlet(RestServlet):
             return 200, {}
 
         await self.auth_handler.validate_user_via_ui_auth(
-            requester, body, self.hs.get_ip_from_request(request)
+            requester, request, body, self.hs.get_ip_from_request(request),
         )
         result = await self._deactivate_account_handler.deactivate_account(
             requester.user.to_string(), erase, id_server=body.get("id_server")
@@ -656,7 +659,7 @@ class ThreepidAddRestServlet(RestServlet):
         assert_valid_client_secret(client_secret)
 
         await self.auth_handler.validate_user_via_ui_auth(
-            requester, body, self.hs.get_ip_from_request(request)
+            requester, request, body, self.hs.get_ip_from_request(request),
         )
 
         validation_session = await self.identity_handler.validate_threepid_session(
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index 94ff73f384..119d979052 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -81,7 +81,7 @@ class DeleteDevicesRestServlet(RestServlet):
         assert_params_in_dict(body, ["devices"])
 
         await self.auth_handler.validate_user_via_ui_auth(
-            requester, body, self.hs.get_ip_from_request(request)
+            requester, request, body, self.hs.get_ip_from_request(request),
         )
 
         await self.device_handler.delete_devices(
@@ -127,7 +127,7 @@ class DeviceRestServlet(RestServlet):
                 raise
 
         await self.auth_handler.validate_user_via_ui_auth(
-            requester, body, self.hs.get_ip_from_request(request)
+            requester, request, body, self.hs.get_ip_from_request(request),
         )
 
         await self.device_handler.delete_device(requester.user.to_string(), device_id)
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index f7ed4daf90..5eb7ef35a4 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -263,7 +263,7 @@ class SigningKeyUploadServlet(RestServlet):
         body = parse_json_object_from_request(request)
 
         await self.auth_handler.validate_user_via_ui_auth(
-            requester, body, self.hs.get_ip_from_request(request)
+            requester, request, body, self.hs.get_ip_from_request(request),
         )
 
         result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index a09189b1b4..6963d79310 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -499,7 +499,10 @@ class RegisterRestServlet(RestServlet):
             )
 
         auth_result, params, session_id = await self.auth_handler.check_auth(
-            self._registration_flows, body, self.hs.get_ip_from_request(request)
+            self._registration_flows,
+            request,
+            body,
+            self.hs.get_ip_from_request(request),
         )
 
         # Check that we're not trying to register a denied 3pid.
diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py
index b6df1396ad..624bf5ada2 100644
--- a/tests/rest/client/v2_alpha/test_auth.py
+++ b/tests/rest/client/v2_alpha/test_auth.py
@@ -104,7 +104,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase):
         )
         self.render(request)
 
-        # Now we should have fufilled a complete auth flow, including
+        # Now we should have fulfilled a complete auth flow, including
         # the recaptcha fallback step, we can then send a
         # request to the register API with the session in the authdict.
         request, channel = self.make_request(
@@ -115,3 +115,69 @@ class FallbackAuthTests(unittest.HomeserverTestCase):
 
         # We're given a registered user.
         self.assertEqual(channel.json_body["user_id"], "@user:test")
+
+    def test_cannot_change_operation(self):
+        """
+        The initial requested operation cannot be modified during the user interactive authentication session.
+        """
+
+        # Make the initial request to register. (Later on a different password
+        # will be used.)
+        request, channel = self.make_request(
+            "POST",
+            "register",
+            {"username": "user", "type": "m.login.password", "password": "bar"},
+        )
+        self.render(request)
+
+        # Returns a 401 as per the spec
+        self.assertEqual(request.code, 401)
+        # Grab the session
+        session = channel.json_body["session"]
+        # Assert our configured public key is being given
+        self.assertEqual(
+            channel.json_body["params"]["m.login.recaptcha"]["public_key"], "brokencake"
+        )
+
+        request, channel = self.make_request(
+            "GET", "auth/m.login.recaptcha/fallback/web?session=" + session
+        )
+        self.render(request)
+        self.assertEqual(request.code, 200)
+
+        request, channel = self.make_request(
+            "POST",
+            "auth/m.login.recaptcha/fallback/web?session="
+            + session
+            + "&g-recaptcha-response=a",
+        )
+        self.render(request)
+        self.assertEqual(request.code, 200)
+
+        # The recaptcha handler is called with the response given
+        attempts = self.recaptcha_checker.recaptcha_attempts
+        self.assertEqual(len(attempts), 1)
+        self.assertEqual(attempts[0][0]["response"], "a")
+
+        # also complete the dummy auth
+        request, channel = self.make_request(
+            "POST", "register", {"auth": {"session": session, "type": "m.login.dummy"}}
+        )
+        self.render(request)
+
+        # Now we should have fulfilled a complete auth flow, including
+        # the recaptcha fallback step. Make the initial request again, but
+        # with a different password. This causes the request to fail since the
+        # operaiton was modified during the ui auth session.
+        request, channel = self.make_request(
+            "POST",
+            "register",
+            {
+                "username": "user",
+                "type": "m.login.password",
+                "password": "foo",  # Note this doesn't match the original request.
+                "auth": {"session": session},
+            },
+        )
+        self.render(request)
+        self.assertEqual(channel.code, 403)
diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py
index 5ec5d2b358..a3f98a1412 100644
--- a/tests/test_terms_auth.py
+++ b/tests/test_terms_auth.py
@@ -53,7 +53,8 @@ class TermsTestCase(unittest.HomeserverTestCase):
 
     def test_ui_auth(self):
         # Do a UI auth request
-        request, channel = self.make_request(b"POST", self.url, b"{}")
+        request_data = json.dumps({"username": "kermit", "password": "monkey"})
+        request, channel = self.make_request(b"POST", self.url, request_data)
         self.render(request)
 
         self.assertEquals(channel.result["code"], b"401", channel.result)
-- 
cgit 1.4.1


From e8e2ddb60ae11db488f159901d918cb159695912 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Thu, 26 Mar 2020 17:51:13 +0100
Subject: Allow server admins to define and enforce a password policy
 (MSC2000). (#7118)

---
 changelog.d/7118.feature                           |   1 +
 docs/sample_config.yaml                            |  35 ++++
 synapse/api/errors.py                              |  21 +++
 synapse/config/password.py                         |  39 +++++
 synapse/handlers/password_policy.py                |  93 +++++++++++
 synapse/handlers/set_password.py                   |   2 +
 synapse/rest/__init__.py                           |   2 +
 synapse/rest/client/v2_alpha/password_policy.py    |  58 +++++++
 synapse/rest/client/v2_alpha/register.py           |   2 +
 synapse/server.py                                  |   5 +
 tests/rest/client/v2_alpha/test_password_policy.py | 179 +++++++++++++++++++++
 11 files changed, 437 insertions(+)
 create mode 100644 changelog.d/7118.feature
 create mode 100644 synapse/handlers/password_policy.py
 create mode 100644 synapse/rest/client/v2_alpha/password_policy.py
 create mode 100644 tests/rest/client/v2_alpha/test_password_policy.py

(limited to 'changelog.d')

diff --git a/changelog.d/7118.feature b/changelog.d/7118.feature
new file mode 100644
index 0000000000..5cbfd98160
--- /dev/null
+++ b/changelog.d/7118.feature
@@ -0,0 +1 @@
+Allow server admins to define and enforce a password policy (MSC2000).
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 2ef83646b3..1a1d061759 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1482,6 +1482,41 @@ password_config:
    #
    #pepper: "EVEN_MORE_SECRET"
 
+   # Define and enforce a password policy. Each parameter is optional.
+   # This is an implementation of MSC2000.
+   #
+   policy:
+      # Whether to enforce the password policy.
+      # Defaults to 'false'.
+      #
+      #enabled: true
+
+      # Minimum accepted length for a password.
+      # Defaults to 0.
+      #
+      #minimum_length: 15
+
+      # Whether a password must contain at least one digit.
+      # Defaults to 'false'.
+      #
+      #require_digit: true
+
+      # Whether a password must contain at least one symbol.
+      # A symbol is any character that's not a number or a letter.
+      # Defaults to 'false'.
+      #
+      #require_symbol: true
+
+      # Whether a password must contain at least one lowercase letter.
+      # Defaults to 'false'.
+      #
+      #require_lowercase: true
+
+      # Whether a password must contain at least one lowercase letter.
+      # Defaults to 'false'.
+      #
+      #require_uppercase: true
+
 
 # Configuration for sending emails from Synapse.
 #
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 616942b057..11da016ac5 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -64,6 +64,13 @@ class Codes(object):
     INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
     WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
     EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
+    PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
+    PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
+    PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
+    PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
+    PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
+    PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
+    WEAK_PASSWORD = "M_WEAK_PASSWORD"
     INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
     USER_DEACTIVATED = "M_USER_DEACTIVATED"
     BAD_ALIAS = "M_BAD_ALIAS"
@@ -439,6 +446,20 @@ class IncompatibleRoomVersionError(SynapseError):
         return cs_error(self.msg, self.errcode, room_version=self._room_version)
 
 
+class PasswordRefusedError(SynapseError):
+    """A password has been refused, either during password reset/change or registration.
+    """
+
+    def __init__(
+        self,
+        msg="This password doesn't comply with the server's policy",
+        errcode=Codes.WEAK_PASSWORD,
+    ):
+        super(PasswordRefusedError, self).__init__(
+            code=400, msg=msg, errcode=errcode,
+        )
+
+
 class RequestSendFailed(RuntimeError):
     """Sending a HTTP request over federation failed due to not being able to
     talk to the remote server for some reason.
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 2a634ac751..9c0ea8c30a 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -31,6 +31,10 @@ class PasswordConfig(Config):
         self.password_localdb_enabled = password_config.get("localdb_enabled", True)
         self.password_pepper = password_config.get("pepper", "")
 
+        # Password policy
+        self.password_policy = password_config.get("policy") or {}
+        self.password_policy_enabled = self.password_policy.get("enabled", False)
+
     def generate_config_section(self, config_dir_path, server_name, **kwargs):
         return """\
         password_config:
@@ -48,4 +52,39 @@ class PasswordConfig(Config):
            # DO NOT CHANGE THIS AFTER INITIAL SETUP!
            #
            #pepper: "EVEN_MORE_SECRET"
+
+           # Define and enforce a password policy. Each parameter is optional.
+           # This is an implementation of MSC2000.
+           #
+           policy:
+              # Whether to enforce the password policy.
+              # Defaults to 'false'.
+              #
+              #enabled: true
+
+              # Minimum accepted length for a password.
+              # Defaults to 0.
+              #
+              #minimum_length: 15
+
+              # Whether a password must contain at least one digit.
+              # Defaults to 'false'.
+              #
+              #require_digit: true
+
+              # Whether a password must contain at least one symbol.
+              # A symbol is any character that's not a number or a letter.
+              # Defaults to 'false'.
+              #
+              #require_symbol: true
+
+              # Whether a password must contain at least one lowercase letter.
+              # Defaults to 'false'.
+              #
+              #require_lowercase: true
+
+              # Whether a password must contain at least one lowercase letter.
+              # Defaults to 'false'.
+              #
+              #require_uppercase: true
         """
diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py
new file mode 100644
index 0000000000..d06b110269
--- /dev/null
+++ b/synapse/handlers/password_policy.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+
+from synapse.api.errors import Codes, PasswordRefusedError
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyHandler(object):
+    def __init__(self, hs):
+        self.policy = hs.config.password_policy
+        self.enabled = hs.config.password_policy_enabled
+
+        # Regexps for the spec'd policy parameters.
+        self.regexp_digit = re.compile("[0-9]")
+        self.regexp_symbol = re.compile("[^a-zA-Z0-9]")
+        self.regexp_uppercase = re.compile("[A-Z]")
+        self.regexp_lowercase = re.compile("[a-z]")
+
+    def validate_password(self, password):
+        """Checks whether a given password complies with the server's policy.
+
+        Args:
+            password (str): The password to check against the server's policy.
+
+        Raises:
+            PasswordRefusedError: The password doesn't comply with the server's policy.
+        """
+
+        if not self.enabled:
+            return
+
+        minimum_accepted_length = self.policy.get("minimum_length", 0)
+        if len(password) < minimum_accepted_length:
+            raise PasswordRefusedError(
+                msg=(
+                    "The password must be at least %d characters long"
+                    % minimum_accepted_length
+                ),
+                errcode=Codes.PASSWORD_TOO_SHORT,
+            )
+
+        if (
+            self.policy.get("require_digit", False)
+            and self.regexp_digit.search(password) is None
+        ):
+            raise PasswordRefusedError(
+                msg="The password must include at least one digit",
+                errcode=Codes.PASSWORD_NO_DIGIT,
+            )
+
+        if (
+            self.policy.get("require_symbol", False)
+            and self.regexp_symbol.search(password) is None
+        ):
+            raise PasswordRefusedError(
+                msg="The password must include at least one symbol",
+                errcode=Codes.PASSWORD_NO_SYMBOL,
+            )
+
+        if (
+            self.policy.get("require_uppercase", False)
+            and self.regexp_uppercase.search(password) is None
+        ):
+            raise PasswordRefusedError(
+                msg="The password must include at least one uppercase letter",
+                errcode=Codes.PASSWORD_NO_UPPERCASE,
+            )
+
+        if (
+            self.policy.get("require_lowercase", False)
+            and self.regexp_lowercase.search(password) is None
+        ):
+            raise PasswordRefusedError(
+                msg="The password must include at least one lowercase letter",
+                errcode=Codes.PASSWORD_NO_LOWERCASE,
+            )
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index 12657ca698..7d1263caf2 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -32,6 +32,7 @@ class SetPasswordHandler(BaseHandler):
         super(SetPasswordHandler, self).__init__(hs)
         self._auth_handler = hs.get_auth_handler()
         self._device_handler = hs.get_device_handler()
+        self._password_policy_handler = hs.get_password_policy_handler()
 
     @defer.inlineCallbacks
     def set_password(
@@ -44,6 +45,7 @@ class SetPasswordHandler(BaseHandler):
         if not self.hs.config.password_localdb_enabled:
             raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
 
+        self._password_policy_handler.validate_password(new_password)
         password_hash = yield self._auth_handler.hash(new_password)
 
         try:
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 4a1fc2ec2b..46e458e95b 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha import (
     keys,
     notifications,
     openid,
+    password_policy,
     read_marker,
     receipts,
     register,
@@ -118,6 +119,7 @@ class ClientRestResource(JsonResource):
         capabilities.register_servlets(hs, client_resource)
         account_validity.register_servlets(hs, client_resource)
         relations.register_servlets(hs, client_resource)
+        password_policy.register_servlets(hs, client_resource)
 
         # moving to /_synapse/admin
         synapse.rest.admin.register_servlets_for_client_rest_resource(
diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py
new file mode 100644
index 0000000000..968403cca4
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/password_policy.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.http.servlet import RestServlet
+
+from ._base import client_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyServlet(RestServlet):
+    PATTERNS = client_patterns("/password_policy$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(PasswordPolicyServlet, self).__init__()
+
+        self.policy = hs.config.password_policy
+        self.enabled = hs.config.password_policy_enabled
+
+    def on_GET(self, request):
+        if not self.enabled or not self.policy:
+            return (200, {})
+
+        policy = {}
+
+        for param in [
+            "minimum_length",
+            "require_digit",
+            "require_symbol",
+            "require_lowercase",
+            "require_uppercase",
+        ]:
+            if param in self.policy:
+                policy["m.%s" % param] = self.policy[param]
+
+        return (200, policy)
+
+
+def register_servlets(hs, http_server):
+    PasswordPolicyServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 6963d79310..66fc8ec179 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -373,6 +373,7 @@ class RegisterRestServlet(RestServlet):
         self.room_member_handler = hs.get_room_member_handler()
         self.macaroon_gen = hs.get_macaroon_generator()
         self.ratelimiter = hs.get_registration_ratelimiter()
+        self.password_policy_handler = hs.get_password_policy_handler()
         self.clock = hs.get_clock()
 
         self._registration_flows = _calculate_registration_flows(
@@ -420,6 +421,7 @@ class RegisterRestServlet(RestServlet):
                 or len(body["password"]) > 512
             ):
                 raise SynapseError(400, "Invalid password")
+            self.password_policy_handler.validate_password(body["password"])
 
         desired_username = None
         if "username" in body:
diff --git a/synapse/server.py b/synapse/server.py
index 9426eb1672..d0d80e8ac5 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -66,6 +66,7 @@ from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerH
 from synapse.handlers.initial_sync import InitialSyncHandler
 from synapse.handlers.message import EventCreationHandler, MessageHandler
 from synapse.handlers.pagination import PaginationHandler
+from synapse.handlers.password_policy import PasswordPolicyHandler
 from synapse.handlers.presence import PresenceHandler
 from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
 from synapse.handlers.read_marker import ReadMarkerHandler
@@ -199,6 +200,7 @@ class HomeServer(object):
         "account_validity_handler",
         "saml_handler",
         "event_client_serializer",
+        "password_policy_handler",
         "storage",
         "replication_streamer",
     ]
@@ -535,6 +537,9 @@ class HomeServer(object):
     def build_event_client_serializer(self):
         return EventClientSerializer(self)
 
+    def build_password_policy_handler(self):
+        return PasswordPolicyHandler(self)
+
     def build_storage(self) -> Storage:
         return Storage(self, self.datastores)
 
diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py
new file mode 100644
index 0000000000..c57072f50c
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_password_policy.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login
+from synapse.rest.client.v2_alpha import account, password_policy, register
+
+from tests import unittest
+
+
+class PasswordPolicyTestCase(unittest.HomeserverTestCase):
+    """Tests the password policy feature and its compliance with MSC2000.
+
+    When validating a password, Synapse does the necessary checks in this order:
+
+        1. Password is long enough
+        2. Password contains digit(s)
+        3. Password contains symbol(s)
+        4. Password contains uppercase letter(s)
+        5. Password contains lowercase letter(s)
+
+    For each test below that checks whether a password triggers the right error code,
+    that test provides a password good enough to pass the previous tests, but not the
+    one it is currently testing (nor any test that comes afterward).
+    """
+
+    servlets = [
+        admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+        register.register_servlets,
+        password_policy.register_servlets,
+        account.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        self.register_url = "/_matrix/client/r0/register"
+        self.policy = {
+            "enabled": True,
+            "minimum_length": 10,
+            "require_digit": True,
+            "require_symbol": True,
+            "require_lowercase": True,
+            "require_uppercase": True,
+        }
+
+        config = self.default_config()
+        config["password_config"] = {
+            "policy": self.policy,
+        }
+
+        hs = self.setup_test_homeserver(config=config)
+        return hs
+
+    def test_get_policy(self):
+        """Tests if the /password_policy endpoint returns the configured policy."""
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/password_policy"
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 200, channel.result)
+        self.assertEqual(
+            channel.json_body,
+            {
+                "m.minimum_length": 10,
+                "m.require_digit": True,
+                "m.require_symbol": True,
+                "m.require_lowercase": True,
+                "m.require_uppercase": True,
+            },
+            channel.result,
+        )
+
+    def test_password_too_short(self):
+        request_data = json.dumps({"username": "kermit", "password": "shorty"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result,
+        )
+
+    def test_password_no_digit(self):
+        request_data = json.dumps({"username": "kermit", "password": "longerpassword"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result,
+        )
+
+    def test_password_no_symbol(self):
+        request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result,
+        )
+
+    def test_password_no_uppercase(self):
+        request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result,
+        )
+
+    def test_password_no_lowercase(self):
+        request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result,
+        )
+
+    def test_password_compliant(self):
+        request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"})
+        request, channel = self.make_request("POST", self.register_url, request_data)
+        self.render(request)
+
+        # Getting a 401 here means the password has passed validation and the server has
+        # responded with a list of registration flows.
+        self.assertEqual(channel.code, 401, channel.result)
+
+    def test_password_change(self):
+        """This doesn't test every possible use case, only that hitting /account/password
+        triggers the password validation code.
+        """
+        compliant_password = "C0mpl!antpassword"
+        not_compliant_password = "notcompliantpassword"
+
+        user_id = self.register_user("kermit", compliant_password)
+        tok = self.login("kermit", compliant_password)
+
+        request_data = json.dumps(
+            {
+                "new_password": not_compliant_password,
+                "auth": {
+                    "password": compliant_password,
+                    "type": LoginType.PASSWORD,
+                    "user": user_id,
+                },
+            }
+        )
+        request, channel = self.make_request(
+            "POST",
+            "/_matrix/client/r0/account/password",
+            request_data,
+            access_token=tok,
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 400, channel.result)
+        self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT)
-- 
cgit 1.4.1


From 060e7dce09ae2197f29811769b13db30ed340211 Mon Sep 17 00:00:00 2001
From: Jason Robinson 
Date: Thu, 26 Mar 2020 19:02:35 +0200
Subject: Allow RedirectResponse in SAML response handler

Allow custom SAML handlers to redirect after processing an auth response.

Fixes #7149

Signed-off-by: Jason Robinson 
---
 changelog.d/7151.bugfix          | 1 +
 synapse/handlers/saml_handler.py | 4 ++++
 2 files changed, 5 insertions(+)
 create mode 100644 changelog.d/7151.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7151.bugfix b/changelog.d/7151.bugfix
new file mode 100644
index 0000000000..69cde9351d
--- /dev/null
+++ b/changelog.d/7151.bugfix
@@ -0,0 +1 @@
+Allow custom SAML handlers to redirect after processing an auth response.
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index 72c109981b..dc04b53f43 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -26,6 +26,7 @@ from synapse.config import ConfigError
 from synapse.http.server import finish_request
 from synapse.http.servlet import parse_string
 from synapse.module_api import ModuleApi
+from synapse.module_api.errors import RedirectException
 from synapse.types import (
     UserID,
     map_username_to_mxid_localpart,
@@ -119,6 +120,9 @@ class SamlHandler:
 
         try:
             user_id = await self._map_saml_response_to_user(resp_bytes, relay_state)
+        except RedirectException:
+            # Raise the exception as per the wishes of the SAML module response
+            raise
         except Exception as e:
             # If decoding the response or mapping it to a user failed, then log the
             # error and tell the user that something went wrong.
-- 
cgit 1.4.1


From 825fb5d0a5699fb5b5eef9a8c2170d0c76158001 Mon Sep 17 00:00:00 2001
From: Nektarios Katakis 
Date: Thu, 26 Mar 2020 17:13:14 +0000
Subject: Don't default to an invalid sqlite config if no database
 configuration is provided (#6573)

---
 changelog.d/6573.bugfix    |  1 +
 synapse/config/database.py | 69 +++++++++++++++++++++++++++++++---------------
 2 files changed, 48 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/6573.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/6573.bugfix b/changelog.d/6573.bugfix
new file mode 100644
index 0000000000..1bb8014db7
--- /dev/null
+++ b/changelog.d/6573.bugfix
@@ -0,0 +1 @@
+Don't attempt to use an invalid sqlite config if no database configuration is provided. Contributed by @nekatak.
diff --git a/synapse/config/database.py b/synapse/config/database.py
index b8ab2f86ac..c27fef157b 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -20,6 +20,11 @@ from synapse.config._base import Config, ConfigError
 
 logger = logging.getLogger(__name__)
 
+NON_SQLITE_DATABASE_PATH_WARNING = """\
+Ignoring 'database_path' setting: not using a sqlite3 database.
+--------------------------------------------------------------------------------
+"""
+
 DEFAULT_CONFIG = """\
 ## Database ##
 
@@ -105,6 +110,11 @@ class DatabaseConnectionConfig:
 class DatabaseConfig(Config):
     section = "database"
 
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self.databases = []
+
     def read_config(self, config, **kwargs):
         self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K"))
 
@@ -125,12 +135,13 @@ class DatabaseConfig(Config):
 
         multi_database_config = config.get("databases")
         database_config = config.get("database")
+        database_path = config.get("database_path")
 
         if multi_database_config and database_config:
             raise ConfigError("Can't specify both 'database' and 'datbases' in config")
 
         if multi_database_config:
-            if config.get("database_path"):
+            if database_path:
                 raise ConfigError("Can't specify 'database_path' with 'databases'")
 
             self.databases = [
@@ -138,13 +149,17 @@ class DatabaseConfig(Config):
                 for name, db_conf in multi_database_config.items()
             ]
 
-        else:
-            if database_config is None:
-                database_config = {"name": "sqlite3", "args": {}}
-
+        if database_config:
             self.databases = [DatabaseConnectionConfig("master", database_config)]
 
-            self.set_databasepath(config.get("database_path"))
+        if database_path:
+            if self.databases and self.databases[0].name != "sqlite3":
+                logger.warning(NON_SQLITE_DATABASE_PATH_WARNING)
+                return
+
+            database_config = {"name": "sqlite3", "args": {}}
+            self.databases = [DatabaseConnectionConfig("master", database_config)]
+            self.set_databasepath(database_path)
 
     def generate_config_section(self, data_dir_path, **kwargs):
         return DEFAULT_CONFIG % {
@@ -152,27 +167,37 @@ class DatabaseConfig(Config):
         }
 
     def read_arguments(self, args):
-        self.set_databasepath(args.database_path)
+        """
+        Cases for the cli input:
+          - If no databases are configured and no database_path is set, raise.
+          - No databases and only database_path available ==> sqlite3 db.
+          - If there are multiple databases and a database_path raise an error.
+          - If the database set in the config file is sqlite then
+            overwrite with the command line argument.
+        """
 
-    def set_databasepath(self, database_path):
-        if database_path is None:
+        if args.database_path is None:
+            if not self.databases:
+                raise ConfigError("No database config provided")
             return
 
-        if database_path != ":memory:":
-            database_path = self.abspath(database_path)
+        if len(self.databases) == 0:
+            database_config = {"name": "sqlite3", "args": {}}
+            self.databases = [DatabaseConnectionConfig("master", database_config)]
+            self.set_databasepath(args.database_path)
+            return
+
+        if self.get_single_database().name == "sqlite3":
+            self.set_databasepath(args.database_path)
+        else:
+            logger.warning(NON_SQLITE_DATABASE_PATH_WARNING)
 
-        # We only support setting a database path if we have a single sqlite3
-        # database.
-        if len(self.databases) != 1:
-            raise ConfigError("Cannot specify 'database_path' with multiple databases")
+    def set_databasepath(self, database_path):
 
-        database = self.get_single_database()
-        if database.config["name"] != "sqlite3":
-            # We don't raise here as we haven't done so before for this case.
-            logger.warn("Ignoring 'database_path' for non-sqlite3 database")
-            return
+        if database_path != ":memory:":
+            database_path = self.abspath(database_path)
 
-        database.config["args"]["database"] = database_path
+        self.databases[0].config["args"]["database"] = database_path
 
     @staticmethod
     def add_arguments(parser):
@@ -187,7 +212,7 @@ class DatabaseConfig(Config):
     def get_single_database(self) -> DatabaseConnectionConfig:
         """Returns the database if there is only one, useful for e.g. tests
         """
-        if len(self.databases) != 1:
+        if not self.databases:
             raise Exception("More than one database exists")
 
         return self.databases[0]
-- 
cgit 1.4.1


From 55ca6cf88cee15519cd094f60c92ab959973e4c6 Mon Sep 17 00:00:00 2001
From: Jason Robinson 
Date: Thu, 26 Mar 2020 20:35:50 +0200
Subject: Update changelog.d/7151.bugfix

Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>
---
 changelog.d/7151.bugfix | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'changelog.d')

diff --git a/changelog.d/7151.bugfix b/changelog.d/7151.bugfix
index 69cde9351d..8aaa2dc659 100644
--- a/changelog.d/7151.bugfix
+++ b/changelog.d/7151.bugfix
@@ -1 +1 @@
-Allow custom SAML handlers to redirect after processing an auth response.
+Fix error page being shown when a custom SAML handler attempted to redirect when processing an auth response.
-- 
cgit 1.4.1


From fa4f12102d52b75d252d9209b45251d2b1591fdf Mon Sep 17 00:00:00 2001
From: Patrick Cloke 
Date: Thu, 26 Mar 2020 15:05:26 -0400
Subject: Refactor the CAS code (move the logic out of the REST layer to a
 handler) (#7136)

---
 changelog.d/7136.misc           |   1 +
 synapse/handlers/cas_handler.py | 204 ++++++++++++++++++++++++++++++++++++++++
 synapse/rest/client/v1/login.py | 171 ++++-----------------------------
 synapse/server.py               |   5 +
 tox.ini                         |   1 +
 5 files changed, 227 insertions(+), 155 deletions(-)
 create mode 100644 changelog.d/7136.misc
 create mode 100644 synapse/handlers/cas_handler.py

(limited to 'changelog.d')

diff --git a/changelog.d/7136.misc b/changelog.d/7136.misc
new file mode 100644
index 0000000000..3f666d25fd
--- /dev/null
+++ b/changelog.d/7136.misc
@@ -0,0 +1 @@
+Refactored the CAS authentication logic to a separate class.
diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py
new file mode 100644
index 0000000000..f8dc274b78
--- /dev/null
+++ b/synapse/handlers/cas_handler.py
@@ -0,0 +1,204 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import xml.etree.ElementTree as ET
+from typing import AnyStr, Dict, Optional, Tuple
+
+from six.moves import urllib
+
+from twisted.web.client import PartialDownloadError
+
+from synapse.api.errors import Codes, LoginError
+from synapse.http.site import SynapseRequest
+from synapse.types import UserID, map_username_to_mxid_localpart
+
+logger = logging.getLogger(__name__)
+
+
+class CasHandler:
+    """
+    Utility class for to handle the response from a CAS SSO service.
+
+    Args:
+        hs (synapse.server.HomeServer)
+    """
+
+    def __init__(self, hs):
+        self._hostname = hs.hostname
+        self._auth_handler = hs.get_auth_handler()
+        self._registration_handler = hs.get_registration_handler()
+
+        self._cas_server_url = hs.config.cas_server_url
+        self._cas_service_url = hs.config.cas_service_url
+        self._cas_displayname_attribute = hs.config.cas_displayname_attribute
+        self._cas_required_attributes = hs.config.cas_required_attributes
+
+        self._http_client = hs.get_proxied_http_client()
+
+    def _build_service_param(self, client_redirect_url: AnyStr) -> str:
+        return "%s%s?%s" % (
+            self._cas_service_url,
+            "/_matrix/client/r0/login/cas/ticket",
+            urllib.parse.urlencode({"redirectUrl": client_redirect_url}),
+        )
+
+    async def _handle_cas_response(
+        self, request: SynapseRequest, cas_response_body: str, client_redirect_url: str
+    ) -> None:
+        """
+        Retrieves the user and display name from the CAS response and continues with the authentication.
+
+        Args:
+            request: The original client request.
+            cas_response_body: The response from the CAS server.
+            client_redirect_url: The URl to redirect the client to when
+                everything is done.
+        """
+        user, attributes = self._parse_cas_response(cas_response_body)
+        displayname = attributes.pop(self._cas_displayname_attribute, None)
+
+        for required_attribute, required_value in self._cas_required_attributes.items():
+            # If required attribute was not in CAS Response - Forbidden
+            if required_attribute not in attributes:
+                raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+            # Also need to check value
+            if required_value is not None:
+                actual_value = attributes[required_attribute]
+                # If required attribute value does not match expected - Forbidden
+                if required_value != actual_value:
+                    raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+        await self._on_successful_auth(user, request, client_redirect_url, displayname)
+
+    def _parse_cas_response(
+        self, cas_response_body: str
+    ) -> Tuple[str, Dict[str, Optional[str]]]:
+        """
+        Retrieve the user and other parameters from the CAS response.
+
+        Args:
+            cas_response_body: The response from the CAS query.
+
+        Returns:
+            A tuple of the user and a mapping of other attributes.
+        """
+        user = None
+        attributes = {}
+        try:
+            root = ET.fromstring(cas_response_body)
+            if not root.tag.endswith("serviceResponse"):
+                raise Exception("root of CAS response is not serviceResponse")
+            success = root[0].tag.endswith("authenticationSuccess")
+            for child in root[0]:
+                if child.tag.endswith("user"):
+                    user = child.text
+                if child.tag.endswith("attributes"):
+                    for attribute in child:
+                        # ElementTree library expands the namespace in
+                        # attribute tags to the full URL of the namespace.
+                        # We don't care about namespace here and it will always
+                        # be encased in curly braces, so we remove them.
+                        tag = attribute.tag
+                        if "}" in tag:
+                            tag = tag.split("}")[1]
+                        attributes[tag] = attribute.text
+            if user is None:
+                raise Exception("CAS response does not contain user")
+        except Exception:
+            logger.exception("Error parsing CAS response")
+            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
+        if not success:
+            raise LoginError(
+                401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED
+            )
+        return user, attributes
+
+    async def _on_successful_auth(
+        self,
+        username: str,
+        request: SynapseRequest,
+        client_redirect_url: str,
+        user_display_name: Optional[str] = None,
+    ) -> None:
+        """Called once the user has successfully authenticated with the SSO.
+
+        Registers the user if necessary, and then returns a redirect (with
+        a login token) to the client.
+
+        Args:
+            username: the remote user id. We'll map this onto
+                something sane for a MXID localpath.
+
+            request: the incoming request from the browser. We'll
+                respond to it with a redirect.
+
+            client_redirect_url: the redirect_url the client gave us when
+                it first started the process.
+
+            user_display_name: if set, and we have to register a new user,
+                we will set their displayname to this.
+        """
+        localpart = map_username_to_mxid_localpart(username)
+        user_id = UserID(localpart, self._hostname).to_string()
+        registered_user_id = await self._auth_handler.check_user_exists(user_id)
+        if not registered_user_id:
+            registered_user_id = await self._registration_handler.register_user(
+                localpart=localpart, default_display_name=user_display_name
+            )
+
+        self._auth_handler.complete_sso_login(
+            registered_user_id, request, client_redirect_url
+        )
+
+    def handle_redirect_request(self, client_redirect_url: bytes) -> bytes:
+        """
+        Generates a URL to the CAS server where the client should be redirected.
+
+        Args:
+            client_redirect_url: The final URL the client should go to after the
+                user has negotiated SSO.
+
+        Returns:
+            The URL to redirect to.
+        """
+        args = urllib.parse.urlencode(
+            {"service": self._build_service_param(client_redirect_url)}
+        )
+
+        return ("%s/login?%s" % (self._cas_server_url, args)).encode("ascii")
+
+    async def handle_ticket_request(
+        self, request: SynapseRequest, client_redirect_url: str, ticket: str
+    ) -> None:
+        """
+        Validates a CAS ticket sent by the client for login/registration.
+
+        On a successful request, writes a redirect to the request.
+        """
+        uri = self._cas_server_url + "/proxyValidate"
+        args = {
+            "ticket": ticket,
+            "service": self._build_service_param(client_redirect_url),
+        }
+        try:
+            body = await self._http_client.get_raw(uri, args)
+        except PartialDownloadError as pde:
+            # Twisted raises this error if the connection is closed,
+            # even if that's being used old-http style to signal end-of-data
+            body = pde.response
+
+        await self._handle_cas_response(request, body, client_redirect_url)
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 56d713462a..59593cbf6e 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -14,11 +14,6 @@
 # limitations under the License.
 
 import logging
-import xml.etree.ElementTree as ET
-
-from six.moves import urllib
-
-from twisted.web.client import PartialDownloadError
 
 from synapse.api.errors import Codes, LoginError, SynapseError
 from synapse.api.ratelimiting import Ratelimiter
@@ -28,9 +23,10 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
     parse_string,
 )
+from synapse.http.site import SynapseRequest
 from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.rest.well_known import WellKnownBuilder
-from synapse.types import UserID, map_username_to_mxid_localpart
+from synapse.types import UserID
 from synapse.util.msisdn import phone_number_to_msisdn
 
 logger = logging.getLogger(__name__)
@@ -72,14 +68,6 @@ def login_id_thirdparty_from_phone(identifier):
     return {"type": "m.id.thirdparty", "medium": "msisdn", "address": msisdn}
 
 
-def build_service_param(cas_service_url, client_redirect_url):
-    return "%s%s?redirectUrl=%s" % (
-        cas_service_url,
-        "/_matrix/client/r0/login/cas/ticket",
-        urllib.parse.quote(client_redirect_url, safe=""),
-    )
-
-
 class LoginRestServlet(RestServlet):
     PATTERNS = client_patterns("/login$", v1=True)
     CAS_TYPE = "m.login.cas"
@@ -409,7 +397,7 @@ class BaseSSORedirectServlet(RestServlet):
 
     PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True)
 
-    def on_GET(self, request):
+    def on_GET(self, request: SynapseRequest):
         args = request.args
         if b"redirectUrl" not in args:
             return 400, "Redirect URL not specified for SSO auth"
@@ -418,15 +406,15 @@ class BaseSSORedirectServlet(RestServlet):
         request.redirect(sso_url)
         finish_request(request)
 
-    def get_sso_url(self, client_redirect_url):
+    def get_sso_url(self, client_redirect_url: bytes) -> bytes:
         """Get the URL to redirect to, to perform SSO auth
 
         Args:
-            client_redirect_url (bytes): the URL that we should redirect the
+            client_redirect_url: the URL that we should redirect the
                 client to when everything is done
 
         Returns:
-            bytes: URL to redirect to
+            URL to redirect to
         """
         # to be implemented by subclasses
         raise NotImplementedError()
@@ -434,16 +422,10 @@ class BaseSSORedirectServlet(RestServlet):
 
 class CasRedirectServlet(BaseSSORedirectServlet):
     def __init__(self, hs):
-        super(CasRedirectServlet, self).__init__()
-        self.cas_server_url = hs.config.cas_server_url
-        self.cas_service_url = hs.config.cas_service_url
+        self._cas_handler = hs.get_cas_handler()
 
-    def get_sso_url(self, client_redirect_url):
-        args = urllib.parse.urlencode(
-            {"service": build_service_param(self.cas_service_url, client_redirect_url)}
-        )
-
-        return "%s/login?%s" % (self.cas_server_url, args)
+    def get_sso_url(self, client_redirect_url: bytes) -> bytes:
+        return self._cas_handler.handle_redirect_request(client_redirect_url)
 
 
 class CasTicketServlet(RestServlet):
@@ -451,81 +433,15 @@ class CasTicketServlet(RestServlet):
 
     def __init__(self, hs):
         super(CasTicketServlet, self).__init__()
-        self.cas_server_url = hs.config.cas_server_url
-        self.cas_service_url = hs.config.cas_service_url
-        self.cas_displayname_attribute = hs.config.cas_displayname_attribute
-        self.cas_required_attributes = hs.config.cas_required_attributes
-        self._sso_auth_handler = SSOAuthHandler(hs)
-        self._http_client = hs.get_proxied_http_client()
-
-    async def on_GET(self, request):
-        client_redirect_url = parse_string(request, "redirectUrl", required=True)
-        uri = self.cas_server_url + "/proxyValidate"
-        args = {
-            "ticket": parse_string(request, "ticket", required=True),
-            "service": build_service_param(self.cas_service_url, client_redirect_url),
-        }
-        try:
-            body = await self._http_client.get_raw(uri, args)
-        except PartialDownloadError as pde:
-            # Twisted raises this error if the connection is closed,
-            # even if that's being used old-http style to signal end-of-data
-            body = pde.response
-        result = await self.handle_cas_response(request, body, client_redirect_url)
-        return result
+        self._cas_handler = hs.get_cas_handler()
 
-    def handle_cas_response(self, request, cas_response_body, client_redirect_url):
-        user, attributes = self.parse_cas_response(cas_response_body)
-        displayname = attributes.pop(self.cas_displayname_attribute, None)
-
-        for required_attribute, required_value in self.cas_required_attributes.items():
-            # If required attribute was not in CAS Response - Forbidden
-            if required_attribute not in attributes:
-                raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
-
-            # Also need to check value
-            if required_value is not None:
-                actual_value = attributes[required_attribute]
-                # If required attribute value does not match expected - Forbidden
-                if required_value != actual_value:
-                    raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
-
-        return self._sso_auth_handler.on_successful_auth(
-            user, request, client_redirect_url, displayname
+    async def on_GET(self, request: SynapseRequest) -> None:
+        client_redirect_url = parse_string(request, "redirectUrl", required=True)
+        ticket = parse_string(request, "ticket", required=True)
+        await self._cas_handler.handle_ticket_request(
+            request, client_redirect_url, ticket
         )
 
-    def parse_cas_response(self, cas_response_body):
-        user = None
-        attributes = {}
-        try:
-            root = ET.fromstring(cas_response_body)
-            if not root.tag.endswith("serviceResponse"):
-                raise Exception("root of CAS response is not serviceResponse")
-            success = root[0].tag.endswith("authenticationSuccess")
-            for child in root[0]:
-                if child.tag.endswith("user"):
-                    user = child.text
-                if child.tag.endswith("attributes"):
-                    for attribute in child:
-                        # ElementTree library expands the namespace in
-                        # attribute tags to the full URL of the namespace.
-                        # We don't care about namespace here and it will always
-                        # be encased in curly braces, so we remove them.
-                        tag = attribute.tag
-                        if "}" in tag:
-                            tag = tag.split("}")[1]
-                        attributes[tag] = attribute.text
-            if user is None:
-                raise Exception("CAS response does not contain user")
-        except Exception:
-            logger.exception("Error parsing CAS response")
-            raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
-        if not success:
-            raise LoginError(
-                401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED
-            )
-        return user, attributes
-
 
 class SAMLRedirectServlet(BaseSSORedirectServlet):
     PATTERNS = client_patterns("/login/sso/redirect", v1=True)
@@ -533,65 +449,10 @@ class SAMLRedirectServlet(BaseSSORedirectServlet):
     def __init__(self, hs):
         self._saml_handler = hs.get_saml_handler()
 
-    def get_sso_url(self, client_redirect_url):
+    def get_sso_url(self, client_redirect_url: bytes) -> bytes:
         return self._saml_handler.handle_redirect_request(client_redirect_url)
 
 
-class SSOAuthHandler(object):
-    """
-    Utility class for Resources and Servlets which handle the response from a SSO
-    service
-
-    Args:
-        hs (synapse.server.HomeServer)
-    """
-
-    def __init__(self, hs):
-        self._hostname = hs.hostname
-        self._auth_handler = hs.get_auth_handler()
-        self._registration_handler = hs.get_registration_handler()
-        self._macaroon_gen = hs.get_macaroon_generator()
-
-        # cast to tuple for use with str.startswith
-        self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
-
-    async def on_successful_auth(
-        self, username, request, client_redirect_url, user_display_name=None
-    ):
-        """Called once the user has successfully authenticated with the SSO.
-
-        Registers the user if necessary, and then returns a redirect (with
-        a login token) to the client.
-
-        Args:
-            username (unicode|bytes): the remote user id. We'll map this onto
-                something sane for a MXID localpath.
-
-            request (SynapseRequest): the incoming request from the browser. We'll
-                respond to it with a redirect.
-
-            client_redirect_url (unicode): the redirect_url the client gave us when
-                it first started the process.
-
-            user_display_name (unicode|None): if set, and we have to register a new user,
-                we will set their displayname to this.
-
-        Returns:
-            Deferred[none]: Completes once we have handled the request.
-        """
-        localpart = map_username_to_mxid_localpart(username)
-        user_id = UserID(localpart, self._hostname).to_string()
-        registered_user_id = await self._auth_handler.check_user_exists(user_id)
-        if not registered_user_id:
-            registered_user_id = await self._registration_handler.register_user(
-                localpart=localpart, default_display_name=user_display_name
-            )
-
-        self._auth_handler.complete_sso_login(
-            registered_user_id, request, client_redirect_url
-        )
-
-
 def register_servlets(hs, http_server):
     LoginRestServlet(hs).register(http_server)
     if hs.config.cas_enabled:
diff --git a/synapse/server.py b/synapse/server.py
index d0d80e8ac5..c7ca2bda0d 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -56,6 +56,7 @@ from synapse.handlers.account_validity import AccountValidityHandler
 from synapse.handlers.acme import AcmeHandler
 from synapse.handlers.appservice import ApplicationServicesHandler
 from synapse.handlers.auth import AuthHandler, MacaroonGenerator
+from synapse.handlers.cas_handler import CasHandler
 from synapse.handlers.deactivate_account import DeactivateAccountHandler
 from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler
 from synapse.handlers.devicemessage import DeviceMessageHandler
@@ -198,6 +199,7 @@ class HomeServer(object):
         "sendmail",
         "registration_handler",
         "account_validity_handler",
+        "cas_handler",
         "saml_handler",
         "event_client_serializer",
         "password_policy_handler",
@@ -529,6 +531,9 @@ class HomeServer(object):
     def build_account_validity_handler(self):
         return AccountValidityHandler(self)
 
+    def build_cas_handler(self):
+        return CasHandler(self)
+
     def build_saml_handler(self):
         from synapse.handlers.saml_handler import SamlHandler
 
diff --git a/tox.ini b/tox.ini
index 8e3f09e638..a79fc93b57 100644
--- a/tox.ini
+++ b/tox.ini
@@ -186,6 +186,7 @@ commands = mypy \
             synapse/federation/sender \
             synapse/federation/transport \
             synapse/handlers/auth.py \
+            synapse/handlers/cas_handler.py \
             synapse/handlers/directory.py \
             synapse/handlers/presence.py \
             synapse/handlers/sync.py \
-- 
cgit 1.4.1


From 665630fcaab8f09e83ff77f35d5244a718e20701 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff 
Date: Fri, 27 Mar 2020 11:39:43 +0000
Subject: Add tests for outbound device pokes

---
 changelog.d/7157.misc                      |   1 +
 tests/federation/test_federation_sender.py | 303 ++++++++++++++++++++++++++++-
 tests/unittest.py                          |   1 +
 3 files changed, 302 insertions(+), 3 deletions(-)
 create mode 100644 changelog.d/7157.misc

(limited to 'changelog.d')

diff --git a/changelog.d/7157.misc b/changelog.d/7157.misc
new file mode 100644
index 0000000000..0eb1128c7a
--- /dev/null
+++ b/changelog.d/7157.misc
@@ -0,0 +1 @@
+Add tests for outbound device pokes.
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index d456267b87..7763b12159 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -12,19 +12,25 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Optional
 
 from mock import Mock
 
+from signedjson import key, sign
+from signedjson.types import BaseKey, SigningKey
+
 from twisted.internet import defer
 
-from synapse.types import ReadReceipt
+from synapse.rest import admin
+from synapse.rest.client.v1 import login
+from synapse.types import JsonDict, ReadReceipt
 
 from tests.unittest import HomeserverTestCase, override_config
 
 
-class FederationSenderTestCases(HomeserverTestCase):
+class FederationSenderReceiptsTestCases(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
-        return super(FederationSenderTestCases, self).setup_test_homeserver(
+        return self.setup_test_homeserver(
             state_handler=Mock(spec=["get_current_hosts_in_room"]),
             federation_transport_client=Mock(spec=["send_transaction"]),
         )
@@ -147,3 +153,294 @@ class FederationSenderTestCases(HomeserverTestCase):
                 }
             ],
         )
+
+
+class FederationSenderDevicesTestCases(HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        return self.setup_test_homeserver(
+            state_handler=Mock(spec=["get_current_hosts_in_room"]),
+            federation_transport_client=Mock(spec=["send_transaction"]),
+        )
+
+    def default_config(self):
+        c = super().default_config()
+        c["send_federation"] = True
+        return c
+
+    def prepare(self, reactor, clock, hs):
+        # stub out get_current_hosts_in_room
+        mock_state_handler = hs.get_state_handler()
+        mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
+
+        # stub out get_users_who_share_room_with_user so that it claims that
+        # `@user2:host2` is in the room
+        def get_users_who_share_room_with_user(user_id):
+            return defer.succeed({"@user2:host2"})
+
+        hs.get_datastore().get_users_who_share_room_with_user = (
+            get_users_who_share_room_with_user
+        )
+
+        # whenever send_transaction is called, record the edu data
+        self.edus = []
+        self.hs.get_federation_transport_client().send_transaction.side_effect = (
+            self.record_transaction
+        )
+
+    def record_transaction(self, txn, json_cb):
+        data = json_cb()
+        self.edus.extend(data["edus"])
+        return defer.succeed({})
+
+    def test_send_device_updates(self):
+        """Basic case: each device update should result in an EDU"""
+        # create a device
+        u1 = self.register_user("user", "pass")
+        self.login(u1, "pass", device_id="D1")
+
+        # expect one edu
+        self.assertEqual(len(self.edus), 1)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None)
+
+        # a second call should produce no new device EDUs
+        self.hs.get_federation_sender().send_device_messages("host2")
+        self.pump()
+        self.assertEqual(self.edus, [])
+
+        # a second device
+        self.login("user", "pass", device_id="D2")
+
+        self.assertEqual(len(self.edus), 1)
+        self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
+
+    def test_upload_signatures(self):
+        """Uploading signatures on some devices should produce updates for that user"""
+
+        e2e_handler = self.hs.get_e2e_keys_handler()
+
+        # register two devices
+        u1 = self.register_user("user", "pass")
+        self.login(u1, "pass", device_id="D1")
+        self.login(u1, "pass", device_id="D2")
+
+        # expect two edus
+        self.assertEqual(len(self.edus), 2)
+        stream_id = None
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
+
+        # upload signing keys for each device
+        device1_signing_key = self.generate_and_upload_device_signing_key(u1, "D1")
+        device2_signing_key = self.generate_and_upload_device_signing_key(u1, "D2")
+
+        # expect two more edus
+        self.assertEqual(len(self.edus), 2)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
+
+        # upload master key and self-signing key
+        master_signing_key = generate_self_id_key()
+        master_key = {
+            "user_id": u1,
+            "usage": ["master"],
+            "keys": {key_id(master_signing_key): encode_pubkey(master_signing_key)},
+        }
+
+        # private key: HvQBbU+hc2Zr+JP1sE0XwBe1pfZZEYtJNPJLZJtS+F8
+        selfsigning_signing_key = generate_self_id_key()
+        selfsigning_key = {
+            "user_id": u1,
+            "usage": ["self_signing"],
+            "keys": {
+                key_id(selfsigning_signing_key): encode_pubkey(selfsigning_signing_key)
+            },
+        }
+        sign.sign_json(selfsigning_key, u1, master_signing_key)
+
+        cross_signing_keys = {
+            "master_key": master_key,
+            "self_signing_key": selfsigning_key,
+        }
+
+        self.get_success(
+            e2e_handler.upload_signing_keys_for_user(u1, cross_signing_keys)
+        )
+
+        # expect signing key update edu
+        self.assertEqual(len(self.edus), 1)
+        self.assertEqual(self.edus.pop(0)["edu_type"], "org.matrix.signing_key_update")
+
+        # sign the devices
+        d1_json = build_device_dict(u1, "D1", device1_signing_key)
+        sign.sign_json(d1_json, u1, selfsigning_signing_key)
+        d2_json = build_device_dict(u1, "D2", device2_signing_key)
+        sign.sign_json(d2_json, u1, selfsigning_signing_key)
+
+        ret = self.get_success(
+            e2e_handler.upload_signatures_for_device_keys(
+                u1, {u1: {"D1": d1_json, "D2": d2_json}},
+            )
+        )
+        self.assertEqual(ret["failures"], {})
+
+        # expect two edus, in one or two transactions. We don't know what order the
+        # devices will be updated.
+        self.assertEqual(len(self.edus), 2)
+        stream_id = None  # FIXME: there is a discontinuity in the stream IDs: see #7142
+        for edu in self.edus:
+            self.assertEqual(edu["edu_type"], "m.device_list_update")
+            c = edu["content"]
+            if stream_id is not None:
+                self.assertEqual(c["prev_id"], [stream_id])
+            stream_id = c["stream_id"]
+        devices = {edu["content"]["device_id"] for edu in self.edus}
+        self.assertEqual({"D1", "D2"}, devices)
+
+    def test_delete_devices(self):
+        """If devices are deleted, that should result in EDUs too"""
+
+        # create devices
+        u1 = self.register_user("user", "pass")
+        self.login("user", "pass", device_id="D1")
+        self.login("user", "pass", device_id="D2")
+        self.login("user", "pass", device_id="D3")
+
+        # expect three edus
+        self.assertEqual(len(self.edus), 3)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
+        stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D3", stream_id)
+
+        # delete them again
+        self.get_success(
+            self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
+        )
+
+        # expect three edus, in an unknown order
+        self.assertEqual(len(self.edus), 3)
+        for edu in self.edus:
+            self.assertEqual(edu["edu_type"], "m.device_list_update")
+            c = edu["content"]
+            self.assertGreaterEqual(
+                c.items(),
+                {"user_id": u1, "prev_id": [stream_id], "deleted": True}.items(),
+            )
+            stream_id = c["stream_id"]
+        devices = {edu["content"]["device_id"] for edu in self.edus}
+        self.assertEqual({"D1", "D2", "D3"}, devices)
+
+    def test_unreachable_server(self):
+        """If the destination server is unreachable, all the updates should get sent on
+        recovery
+        """
+        mock_send_txn = self.hs.get_federation_transport_client().send_transaction
+        mock_send_txn.side_effect = lambda t, cb: defer.fail("fail")
+
+        # create devices
+        u1 = self.register_user("user", "pass")
+        self.login("user", "pass", device_id="D1")
+        self.login("user", "pass", device_id="D2")
+        self.login("user", "pass", device_id="D3")
+
+        # delete them again
+        self.get_success(
+            self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
+        )
+
+        self.assertGreaterEqual(mock_send_txn.call_count, 4)
+
+        # recover the server
+        mock_send_txn.side_effect = self.record_transaction
+        self.hs.get_federation_sender().send_device_messages("host2")
+        self.pump()
+
+        # for each device, there should be a single update
+        self.assertEqual(len(self.edus), 3)
+        stream_id = None
+        for edu in self.edus:
+            self.assertEqual(edu["edu_type"], "m.device_list_update")
+            c = edu["content"]
+            self.assertEqual(c["prev_id"], [stream_id] if stream_id is not None else [])
+            stream_id = c["stream_id"]
+        devices = {edu["content"]["device_id"] for edu in self.edus}
+        self.assertEqual({"D1", "D2", "D3"}, devices)
+
+    def check_device_update_edu(
+        self,
+        edu: JsonDict,
+        user_id: str,
+        device_id: str,
+        prev_stream_id: Optional[int],
+    ) -> int:
+        """Check that the given EDU is an update for the given device
+        Returns the stream_id.
+        """
+        self.assertEqual(edu["edu_type"], "m.device_list_update")
+        content = edu["content"]
+
+        expected = {
+            "user_id": user_id,
+            "device_id": device_id,
+            "prev_id": [prev_stream_id] if prev_stream_id is not None else [],
+        }
+
+        self.assertLessEqual(expected.items(), content.items())
+        return content["stream_id"]
+
+    def check_signing_key_update_txn(self, txn: JsonDict,) -> None:
+        """Check that the txn has an EDU with a signing key update.
+        """
+        edus = txn["edus"]
+        self.assertEqual(len(edus), 1)
+
+    def generate_and_upload_device_signing_key(
+        self, user_id: str, device_id: str
+    ) -> SigningKey:
+        """Generate a signing keypair for the given device, and upload it"""
+        sk = key.generate_signing_key(device_id)
+
+        device_dict = build_device_dict(user_id, device_id, sk)
+
+        self.get_success(
+            self.hs.get_e2e_keys_handler().upload_keys_for_user(
+                user_id, device_id, {"device_keys": device_dict},
+            )
+        )
+        return sk
+
+
+def generate_self_id_key() -> SigningKey:
+    """generate a signing key whose version is its public key
+
+    ... as used by the cross-signing-keys.
+    """
+    k = key.generate_signing_key("x")
+    k.version = encode_pubkey(k)
+    return k
+
+
+def key_id(k: BaseKey) -> str:
+    return "%s:%s" % (k.alg, k.version)
+
+
+def encode_pubkey(sk: SigningKey) -> str:
+    """Encode the public key corresponding to the given signing key as base64"""
+    return key.encode_verify_key_base64(key.get_verify_key(sk))
+
+
+def build_device_dict(user_id: str, device_id: str, sk: SigningKey):
+    """Build a dict representing the given device"""
+    return {
+        "user_id": user_id,
+        "device_id": device_id,
+        "algorithms": ["m.olm.curve25519-aes-sha256", "m.megolm.v1.aes-sha"],
+        "keys": {
+            "curve25519:" + device_id: "curve25519+key",
+            key_id(sk): encode_pubkey(sk),
+        },
+    }
diff --git a/tests/unittest.py b/tests/unittest.py
index 23b59bea22..3d57b77a5d 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -490,6 +490,7 @@ class HomeserverTestCase(TestCase):
                 "password": password,
                 "admin": admin,
                 "mac": want_mac,
+                "inhibit_login": True,
             }
         )
         request, channel = self.make_request(
-- 
cgit 1.4.1


From d9965fb8d678837947cabd41c410127fb59d1b82 Mon Sep 17 00:00:00 2001
From: David Baker 
Date: Fri, 27 Mar 2020 12:30:59 +0000
Subject: changelog

---
 changelog.d/7160.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/7160.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7160.feature b/changelog.d/7160.feature
new file mode 100644
index 0000000000..c1205969a1
--- /dev/null
+++ b/changelog.d/7160.feature
@@ -0,0 +1 @@
+Always send users their own device updates.
-- 
cgit 1.4.1


From fbf0782c63bd2aba3c504dabd04abdf10d269a22 Mon Sep 17 00:00:00 2001
From: David Vo 
Date: Sat, 28 Mar 2020 00:20:00 +1100
Subject: Only import sqlite3 when type checking (#7155)

Fixes: #7127
Signed-off-by: David Vo 
---
 changelog.d/7155.bugfix           | 1 +
 synapse/storage/engines/sqlite.py | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/7155.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix
new file mode 100644
index 0000000000..0bf51e7aba
--- /dev/null
+++ b/changelog.d/7155.bugfix
@@ -0,0 +1 @@
+Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo.
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 2bfeefd54e..3bc2e8b986 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -12,14 +12,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import sqlite3
 import struct
 import threading
+import typing
 
 from synapse.storage.engines import BaseDatabaseEngine
 
+if typing.TYPE_CHECKING:
+    import sqlite3  # noqa: F401
 
-class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
+
+class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]):
     def __init__(self, database_module, database_config):
         super().__init__(database_module, database_config)
 
-- 
cgit 1.4.1


From 12aa5a7fa761a729364d324405a033cf78da26de Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 27 Mar 2020 13:30:22 +0000
Subject: Ensure is_verified on /_matrix/client/r0/room_keys/keys is a boolean
 (#7150)

---
 changelog.d/7150.bugfix                           | 1 +
 synapse/rest/client/v2_alpha/room_keys.py         | 2 +-
 synapse/storage/data_stores/main/e2e_room_keys.py | 3 ++-
 3 files changed, 4 insertions(+), 2 deletions(-)
 create mode 100644 changelog.d/7150.bugfix

(limited to 'changelog.d')

diff --git a/changelog.d/7150.bugfix b/changelog.d/7150.bugfix
new file mode 100644
index 0000000000..1feb294799
--- /dev/null
+++ b/changelog.d/7150.bugfix
@@ -0,0 +1 @@
+Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param.
\ No newline at end of file
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 38952a1d27..59529707df 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -188,7 +188,7 @@ class RoomKeysServlet(RestServlet):
         """
         requester = await self.auth.get_user_by_req(request, allow_guest=False)
         user_id = requester.user.to_string()
-        version = parse_string(request, "version")
+        version = parse_string(request, "version", required=True)
 
         room_keys = await self.e2e_room_keys_handler.get_room_keys(
             user_id, version, room_id, session_id
diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/data_stores/main/e2e_room_keys.py
index 84594cf0a9..23f4570c4b 100644
--- a/synapse/storage/data_stores/main/e2e_room_keys.py
+++ b/synapse/storage/data_stores/main/e2e_room_keys.py
@@ -146,7 +146,8 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             room_entry["sessions"][row["session_id"]] = {
                 "first_message_index": row["first_message_index"],
                 "forwarded_count": row["forwarded_count"],
-                "is_verified": row["is_verified"],
+                # is_verified must be returned to the client as a boolean
+                "is_verified": bool(row["is_verified"]),
                 "session_data": json.loads(row["session_data"]),
             }
 
-- 
cgit 1.4.1


From ae219fb41127b43f56605dcdc902f66e0bc7b2e7 Mon Sep 17 00:00:00 2001
From: txt-file <44214237+txt-file@users.noreply.github.com>
Date: Fri, 27 Mar 2020 16:02:00 +0100
Subject: update debian installation instructions to recommend installing
 `virtualenv` instead of `python3-virtualenv` (#6892)

* change debian package from python3-virtualenv to virtualenv

The virtualenv package is needed for the virtualenv command. The
virtualenv package depends on python3-virtualenv (at least since
debian jessie) so there is no need to specify python3-virtualenv
additionally.

Signed-off-by: Vieno Hakkerinen 

* Add changelog

Co-authored-by: Andrew Morgan 
---
 INSTALL.md           | 2 +-
 changelog.d/6892.doc | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/6892.doc

(limited to 'changelog.d')

diff --git a/INSTALL.md b/INSTALL.md
index af9a5ef439..9c6f507db8 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -112,7 +112,7 @@ Installing prerequisites on Ubuntu or Debian:
 ```
 sudo apt-get install build-essential python3-dev libffi-dev \
                      python3-pip python3-setuptools sqlite3 \
-                     libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
+                     libssl-dev virtualenv libjpeg-dev libxslt1-dev
 ```
 
 #### ArchLinux
diff --git a/changelog.d/6892.doc b/changelog.d/6892.doc
new file mode 100644
index 0000000000..0d04cf0bdb
--- /dev/null
+++ b/changelog.d/6892.doc
@@ -0,0 +1 @@
+Update Debian installation instructions to recommend installing the `virtualenv` package instead of `python3-virtualenv`.
\ No newline at end of file
-- 
cgit 1.4.1


From 8327eb9280cbcb492e05652a96be9f1cd1c0e7c4 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 27 Mar 2020 20:15:23 +0100
Subject: Add options to prevent users from changing their profile. (#7096)

---
 changelog.d/7096.feature                   |   1 +
 docs/sample_config.yaml                    |  23 +++
 synapse/config/registration.py             |  27 +++
 synapse/handlers/profile.py                |  16 ++
 synapse/rest/client/v2_alpha/account.py    |  16 ++
 tests/handlers/test_profile.py             |  65 ++++++-
 tests/rest/client/v2_alpha/test_account.py | 302 +++++++++++++++++++++++++++++
 7 files changed, 449 insertions(+), 1 deletion(-)
 create mode 100644 changelog.d/7096.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7096.feature b/changelog.d/7096.feature
new file mode 100644
index 0000000000..00f47b2a14
--- /dev/null
+++ b/changelog.d/7096.feature
@@ -0,0 +1 @@
+Add options to prevent users from changing their profile or associated 3PIDs.
\ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 1a1d061759..545226f753 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1086,6 +1086,29 @@ account_threepid_delegates:
     #email: https://example.com     # Delegate email sending to example.com
     #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
+# Whether users are allowed to change their displayname after it has
+# been initially set. Useful when provisioning users based on the
+# contents of a third-party directory.
+#
+# Does not apply to server administrators. Defaults to 'true'
+#
+#enable_set_displayname: false
+
+# Whether users are allowed to change their avatar after it has been
+# initially set. Useful when provisioning users based on the contents
+# of a third-party directory.
+#
+# Does not apply to server administrators. Defaults to 'true'
+#
+#enable_set_avatar_url: false
+
+# Whether users can change the 3PIDs associated with their accounts
+# (email address and msisdn).
+#
+# Defaults to 'true'
+#
+#enable_3pid_changes: false
+
 # Users who register on this homeserver will automatically be joined
 # to these rooms
 #
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 9bb3beedbc..e7ea3a01cb 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -129,6 +129,10 @@ class RegistrationConfig(Config):
                 raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
         self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
 
+        self.enable_set_displayname = config.get("enable_set_displayname", True)
+        self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
+        self.enable_3pid_changes = config.get("enable_3pid_changes", True)
+
         self.disable_msisdn_registration = config.get(
             "disable_msisdn_registration", False
         )
@@ -330,6 +334,29 @@ class RegistrationConfig(Config):
             #email: https://example.com     # Delegate email sending to example.com
             #msisdn: http://localhost:8090  # Delegate SMS sending to this local process
 
+        # Whether users are allowed to change their displayname after it has
+        # been initially set. Useful when provisioning users based on the
+        # contents of a third-party directory.
+        #
+        # Does not apply to server administrators. Defaults to 'true'
+        #
+        #enable_set_displayname: false
+
+        # Whether users are allowed to change their avatar after it has been
+        # initially set. Useful when provisioning users based on the contents
+        # of a third-party directory.
+        #
+        # Does not apply to server administrators. Defaults to 'true'
+        #
+        #enable_set_avatar_url: false
+
+        # Whether users can change the 3PIDs associated with their accounts
+        # (email address and msisdn).
+        #
+        # Defaults to 'true'
+        #
+        #enable_3pid_changes: false
+
         # Users who register on this homeserver will automatically be joined
         # to these rooms
         #
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 50ce0c585b..6aa1c0f5e0 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -157,6 +157,15 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's displayname")
 
+        if not by_admin and not self.hs.config.enable_set_displayname:
+            profile = yield self.store.get_profileinfo(target_user.localpart)
+            if profile.display_name:
+                raise SynapseError(
+                    400,
+                    "Changing display name is disabled on this server",
+                    Codes.FORBIDDEN,
+                )
+
         if len(new_displayname) > MAX_DISPLAYNAME_LEN:
             raise SynapseError(
                 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
@@ -218,6 +227,13 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's avatar_url")
 
+        if not by_admin and not self.hs.config.enable_set_avatar_url:
+            profile = yield self.store.get_profileinfo(target_user.localpart)
+            if profile.avatar_url:
+                raise SynapseError(
+                    400, "Changing avatar is disabled on this server", Codes.FORBIDDEN
+                )
+
         if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
             raise SynapseError(
                 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index b1249b664c..f80b5e40ea 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -605,6 +605,11 @@ class ThreepidRestServlet(RestServlet):
         return 200, {"threepids": threepids}
 
     async def on_POST(self, request):
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
+
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
         body = parse_json_object_from_request(request)
@@ -649,6 +654,11 @@ class ThreepidAddRestServlet(RestServlet):
 
     @interactive_auth_handler
     async def on_POST(self, request):
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
+
         requester = await self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
         body = parse_json_object_from_request(request)
@@ -744,10 +754,16 @@ class ThreepidDeleteRestServlet(RestServlet):
 
     def __init__(self, hs):
         super(ThreepidDeleteRestServlet, self).__init__()
+        self.hs = hs
         self.auth = hs.get_auth()
         self.auth_handler = hs.get_auth_handler()
 
     async def on_POST(self, request):
+        if not self.hs.config.enable_3pid_changes:
+            raise SynapseError(
+                400, "3PID changes are disabled on this server", Codes.FORBIDDEN
+            )
+
         body = parse_json_object_from_request(request)
         assert_params_in_dict(body, ["medium", "address"])
 
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index d60c124eec..be665262c6 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -19,7 +19,7 @@ from mock import Mock, NonCallableMock
 from twisted.internet import defer
 
 import synapse.types
-from synapse.api.errors import AuthError
+from synapse.api.errors import AuthError, SynapseError
 from synapse.handlers.profile import MasterProfileHandler
 from synapse.types import UserID
 
@@ -70,6 +70,7 @@ class ProfileTestCase(unittest.TestCase):
         yield self.store.create_profile(self.frank.localpart)
 
         self.handler = hs.get_profile_handler()
+        self.hs = hs
 
     @defer.inlineCallbacks
     def test_get_my_name(self):
@@ -90,6 +91,33 @@ class ProfileTestCase(unittest.TestCase):
             "Frank Jr.",
         )
 
+        # Set displayname again
+        yield self.handler.set_displayname(
+            self.frank, synapse.types.create_requester(self.frank), "Frank"
+        )
+
+        self.assertEquals(
+            (yield self.store.get_profile_displayname(self.frank.localpart)), "Frank",
+        )
+
+    @defer.inlineCallbacks
+    def test_set_my_name_if_disabled(self):
+        self.hs.config.enable_set_displayname = False
+
+        # Setting displayname for the first time is allowed
+        yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
+
+        self.assertEquals(
+            (yield self.store.get_profile_displayname(self.frank.localpart)), "Frank",
+        )
+
+        # Setting displayname a second time is forbidden
+        d = self.handler.set_displayname(
+            self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
+        )
+
+        yield self.assertFailure(d, SynapseError)
+
     @defer.inlineCallbacks
     def test_set_my_name_noauth(self):
         d = self.handler.set_displayname(
@@ -147,3 +175,38 @@ class ProfileTestCase(unittest.TestCase):
             (yield self.store.get_profile_avatar_url(self.frank.localpart)),
             "http://my.server/pic.gif",
         )
+
+        # Set avatar again
+        yield self.handler.set_avatar_url(
+            self.frank,
+            synapse.types.create_requester(self.frank),
+            "http://my.server/me.png",
+        )
+
+        self.assertEquals(
+            (yield self.store.get_profile_avatar_url(self.frank.localpart)),
+            "http://my.server/me.png",
+        )
+
+    @defer.inlineCallbacks
+    def test_set_my_avatar_if_disabled(self):
+        self.hs.config.enable_set_avatar_url = False
+
+        # Setting displayname for the first time is allowed
+        yield self.store.set_profile_avatar_url(
+            self.frank.localpart, "http://my.server/me.png"
+        )
+
+        self.assertEquals(
+            (yield self.store.get_profile_avatar_url(self.frank.localpart)),
+            "http://my.server/me.png",
+        )
+
+        # Set avatar a second time is forbidden
+        d = self.handler.set_avatar_url(
+            self.frank,
+            synapse.types.create_requester(self.frank),
+            "http://my.server/pic.gif",
+        )
+
+        yield self.assertFailure(d, SynapseError)
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index c3facc00eb..45a9d445f8 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -24,6 +24,7 @@ import pkg_resources
 
 import synapse.rest.admin
 from synapse.api.constants import LoginType, Membership
+from synapse.api.errors import Codes
 from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import account, register
 
@@ -325,3 +326,304 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         )
         self.render(request)
         self.assertEqual(request.code, 200)
+
+
+class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        account.register_servlets,
+        login.register_servlets,
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        config = self.default_config()
+
+        # Email config.
+        self.email_attempts = []
+
+        def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs):
+            self.email_attempts.append(msg)
+
+        config["email"] = {
+            "enable_notifs": False,
+            "template_dir": os.path.abspath(
+                pkg_resources.resource_filename("synapse", "res/templates")
+            ),
+            "smtp_host": "127.0.0.1",
+            "smtp_port": 20,
+            "require_transport_security": False,
+            "smtp_user": None,
+            "smtp_pass": None,
+            "notif_from": "test@example.com",
+        }
+        config["public_baseurl"] = "https://example.com"
+
+        self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
+        return self.hs
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        self.user_id = self.register_user("kermit", "test")
+        self.user_id_tok = self.login("kermit", "test")
+        self.email = "test@example.com"
+        self.url_3pid = b"account/3pid"
+
+    def test_add_email(self):
+        """Test adding an email to profile
+        """
+        client_secret = "foobar"
+        session_id = self._request_token(self.email, client_secret)
+
+        self.assertEquals(len(self.email_attempts), 1)
+        link = self._get_link_from_email()
+
+        self._validate_token(link)
+
+        request, channel = self.make_request(
+            "POST",
+            b"/_matrix/client/unstable/account/3pid/add",
+            {
+                "client_secret": client_secret,
+                "sid": session_id,
+                "auth": {
+                    "type": "m.login.password",
+                    "user": self.user_id,
+                    "password": "test",
+                },
+            },
+            access_token=self.user_id_tok,
+        )
+
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual(self.email, channel.json_body["threepids"][0]["address"])
+
+    def test_add_email_if_disabled(self):
+        """Test adding email to profile when doing so is disallowed
+        """
+        self.hs.config.enable_3pid_changes = False
+
+        client_secret = "foobar"
+        session_id = self._request_token(self.email, client_secret)
+
+        self.assertEquals(len(self.email_attempts), 1)
+        link = self._get_link_from_email()
+
+        self._validate_token(link)
+
+        request, channel = self.make_request(
+            "POST",
+            b"/_matrix/client/unstable/account/3pid/add",
+            {
+                "client_secret": client_secret,
+                "sid": session_id,
+                "auth": {
+                    "type": "m.login.password",
+                    "user": self.user_id,
+                    "password": "test",
+                },
+            },
+            access_token=self.user_id_tok,
+        )
+        self.render(request)
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertFalse(channel.json_body["threepids"])
+
+    def test_delete_email(self):
+        """Test deleting an email from profile
+        """
+        # Add a threepid
+        self.get_success(
+            self.store.user_add_threepid(
+                user_id=self.user_id,
+                medium="email",
+                address=self.email,
+                validated_at=0,
+                added_at=0,
+            )
+        )
+
+        request, channel = self.make_request(
+            "POST",
+            b"account/3pid/delete",
+            {"medium": "email", "address": self.email},
+            access_token=self.user_id_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertFalse(channel.json_body["threepids"])
+
+    def test_delete_email_if_disabled(self):
+        """Test deleting an email from profile when disallowed
+        """
+        self.hs.config.enable_3pid_changes = False
+
+        # Add a threepid
+        self.get_success(
+            self.store.user_add_threepid(
+                user_id=self.user_id,
+                medium="email",
+                address=self.email,
+                validated_at=0,
+                added_at=0,
+            )
+        )
+
+        request, channel = self.make_request(
+            "POST",
+            b"account/3pid/delete",
+            {"medium": "email", "address": self.email},
+            access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+        self.assertEqual(self.email, channel.json_body["threepids"][0]["address"])
+
+    def test_cant_add_email_without_clicking_link(self):
+        """Test that we do actually need to click the link in the email
+        """
+        client_secret = "foobar"
+        session_id = self._request_token(self.email, client_secret)
+
+        self.assertEquals(len(self.email_attempts), 1)
+
+        # Attempt to add email without clicking the link
+        request, channel = self.make_request(
+            "POST",
+            b"/_matrix/client/unstable/account/3pid/add",
+            {
+                "client_secret": client_secret,
+                "sid": session_id,
+                "auth": {
+                    "type": "m.login.password",
+                    "user": self.user_id,
+                    "password": "test",
+                },
+            },
+            access_token=self.user_id_tok,
+        )
+        self.render(request)
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertFalse(channel.json_body["threepids"])
+
+    def test_no_valid_token(self):
+        """Test that we do actually need to request a token and can't just
+        make a session up.
+        """
+        client_secret = "foobar"
+        session_id = "weasle"
+
+        # Attempt to add email without even requesting an email
+        request, channel = self.make_request(
+            "POST",
+            b"/_matrix/client/unstable/account/3pid/add",
+            {
+                "client_secret": client_secret,
+                "sid": session_id,
+                "auth": {
+                    "type": "m.login.password",
+                    "user": self.user_id,
+                    "password": "test",
+                },
+            },
+            access_token=self.user_id_tok,
+        )
+        self.render(request)
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"])
+
+        # Get user
+        request, channel = self.make_request(
+            "GET", self.url_3pid, access_token=self.user_id_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertFalse(channel.json_body["threepids"])
+
+    def _request_token(self, email, client_secret):
+        request, channel = self.make_request(
+            "POST",
+            b"account/3pid/email/requestToken",
+            {"client_secret": client_secret, "email": email, "send_attempt": 1},
+        )
+        self.render(request)
+        self.assertEquals(200, channel.code, channel.result)
+
+        return channel.json_body["sid"]
+
+    def _validate_token(self, link):
+        # Remove the host
+        path = link.replace("https://example.com", "")
+
+        request, channel = self.make_request("GET", path, shorthand=False)
+        self.render(request)
+        self.assertEquals(200, channel.code, channel.result)
+
+    def _get_link_from_email(self):
+        assert self.email_attempts, "No emails have been sent"
+
+        raw_msg = self.email_attempts[-1].decode("UTF-8")
+        mail = Parser().parsestr(raw_msg)
+
+        text = None
+        for part in mail.walk():
+            if part.get_content_type() == "text/plain":
+                text = part.get_payload(decode=True).decode("UTF-8")
+                break
+
+        if not text:
+            self.fail("Could not find text portion of email to parse")
+
+        match = re.search(r"https://example.com\S+", text)
+        assert match, "Could not find link in email"
+
+        return match.group(0)
-- 
cgit 1.4.1


From fb69690761762092c8e44d509d4f72408c4c67e0 Mon Sep 17 00:00:00 2001
From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com>
Date: Fri, 27 Mar 2020 20:16:43 +0100
Subject: Admin API to join users to a room. (#7051)

---
 changelog.d/7051.feature          |   1 +
 docs/admin_api/room_membership.md |  34 +++++
 synapse/rest/admin/__init__.py    |   7 +-
 synapse/rest/admin/rooms.py       |  79 ++++++++++-
 tests/rest/admin/test_room.py     | 288 ++++++++++++++++++++++++++++++++++++++
 5 files changed, 405 insertions(+), 4 deletions(-)
 create mode 100644 changelog.d/7051.feature
 create mode 100644 docs/admin_api/room_membership.md
 create mode 100644 tests/rest/admin/test_room.py

(limited to 'changelog.d')

diff --git a/changelog.d/7051.feature b/changelog.d/7051.feature
new file mode 100644
index 0000000000..3e36a3f65e
--- /dev/null
+++ b/changelog.d/7051.feature
@@ -0,0 +1 @@
+Admin API `POST /_synapse/admin/v1/join/` to join users to a room like `auto_join_rooms` for creation of users.
\ No newline at end of file
diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md
new file mode 100644
index 0000000000..16736d3d37
--- /dev/null
+++ b/docs/admin_api/room_membership.md
@@ -0,0 +1,34 @@
+# Edit Room Membership API
+
+This API allows an administrator to join an user account with a given `user_id`
+to a room with a given `room_id_or_alias`. You can only modify the membership of
+local users. The server administrator must be in the room and have permission to
+invite users.
+
+## Parameters
+
+The following parameters are available:
+
+* `user_id` - Fully qualified user: for example, `@user:server.com`.
+* `room_id_or_alias` - The room identifier or alias to join: for example,
+  `!636q39766251:server.com`.
+
+## Usage
+
+```
+POST /_synapse/admin/v1/join/
+
+{
+  "user_id": "@user:server.com"
+}
+```
+
+Including an `access_token` of a server admin.
+
+Response:
+
+```
+{
+  "room_id": "!636q39766251:server.com"
+}
+```
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 42cc2b062a..ed70d448a1 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -29,7 +29,11 @@ from synapse.rest.admin._base import (
 from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
 from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
-from synapse.rest.admin.rooms import ListRoomRestServlet, ShutdownRoomRestServlet
+from synapse.rest.admin.rooms import (
+    JoinRoomAliasServlet,
+    ListRoomRestServlet,
+    ShutdownRoomRestServlet,
+)
 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.rest.admin.users import (
     AccountValidityRenewServlet,
@@ -189,6 +193,7 @@ def register_servlets(hs, http_server):
     """
     register_servlets_for_client_rest_resource(hs, http_server)
     ListRoomRestServlet(hs).register(http_server)
+    JoinRoomAliasServlet(hs).register(http_server)
     PurgeRoomServlet(hs).register(http_server)
     SendServerNoticeServlet(hs).register(http_server)
     VersionServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f9b8c0a4f0..659b8a10ee 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -13,9 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import List, Optional
 
-from synapse.api.constants import Membership
-from synapse.api.errors import Codes, SynapseError
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
@@ -29,7 +30,7 @@ from synapse.rest.admin._base import (
     historical_admin_path_patterns,
 )
 from synapse.storage.data_stores.main.room import RoomSortOrder
-from synapse.types import create_requester
+from synapse.types import RoomAlias, RoomID, UserID, create_requester
 from synapse.util.async_helpers import maybe_awaitable
 
 logger = logging.getLogger(__name__)
@@ -237,3 +238,75 @@ class ListRoomRestServlet(RestServlet):
                 response["prev_batch"] = 0
 
         return 200, response
+
+
+class JoinRoomAliasServlet(RestServlet):
+
+    PATTERNS = admin_patterns("/join/(?P[^/]*)")
+
+    def __init__(self, hs):
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.room_member_handler = hs.get_room_member_handler()
+        self.admin_handler = hs.get_handlers().admin_handler
+        self.state_handler = hs.get_state_handler()
+
+    async def on_POST(self, request, room_identifier):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        content = parse_json_object_from_request(request)
+
+        assert_params_in_dict(content, ["user_id"])
+        target_user = UserID.from_string(content["user_id"])
+
+        if not self.hs.is_mine(target_user):
+            raise SynapseError(400, "This endpoint can only be used with local users")
+
+        if not await self.admin_handler.get_user(target_user):
+            raise NotFoundError("User not found")
+
+        if RoomID.is_valid(room_identifier):
+            room_id = room_identifier
+            try:
+                remote_room_hosts = [
+                    x.decode("ascii") for x in request.args[b"server_name"]
+                ]  # type: Optional[List[str]]
+            except Exception:
+                remote_room_hosts = None
+        elif RoomAlias.is_valid(room_identifier):
+            handler = self.room_member_handler
+            room_alias = RoomAlias.from_string(room_identifier)
+            room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
+            room_id = room_id.to_string()
+        else:
+            raise SynapseError(
+                400, "%s was not legal room ID or room alias" % (room_identifier,)
+            )
+
+        fake_requester = create_requester(target_user)
+
+        # send invite if room has "JoinRules.INVITE"
+        room_state = await self.state_handler.get_current_state(room_id)
+        join_rules_event = room_state.get((EventTypes.JoinRules, ""))
+        if join_rules_event:
+            if not (join_rules_event.content.get("join_rule") == JoinRules.PUBLIC):
+                await self.room_member_handler.update_membership(
+                    requester=requester,
+                    target=fake_requester.user,
+                    room_id=room_id,
+                    action="invite",
+                    remote_room_hosts=remote_room_hosts,
+                    ratelimit=False,
+                )
+
+        await self.room_member_handler.update_membership(
+            requester=fake_requester,
+            target=fake_requester.user,
+            room_id=room_id,
+            action="join",
+            remote_room_hosts=remote_room_hosts,
+            ratelimit=False,
+        )
+
+        return 200, {"room_id": room_id}
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
new file mode 100644
index 0000000000..672cc3eac5
--- /dev/null
+++ b/tests/rest/admin/test_room.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Dirk Klimpel
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import synapse.rest.admin
+from synapse.api.errors import Codes
+from synapse.rest.client.v1 import login, room
+
+from tests import unittest
+
+"""Tests admin REST events for /rooms paths."""
+
+
+class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, homeserver):
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.creator = self.register_user("creator", "test")
+        self.creator_tok = self.login("creator", "test")
+
+        self.second_user_id = self.register_user("second", "test")
+        self.second_tok = self.login("second", "test")
+
+        self.public_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=True
+        )
+        self.url = "/_synapse/admin/v1/join/{}".format(self.public_room_id)
+
+    def test_requester_is_no_admin(self):
+        """
+        If the user is not a server admin, an error 403 is returned.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.second_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_invalid_parameter(self):
+        """
+        If a parameter is missing, return an error
+        """
+        body = json.dumps({"unknown_parameter": "@unknown:test"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
+
+    def test_local_user_does_not_exist(self):
+        """
+        Tests that a lookup for a user that does not exist returns a 404
+        """
+        body = json.dumps({"user_id": "@unknown:test"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+    def test_remote_user(self):
+        """
+        Check that only local user can join rooms.
+        """
+        body = json.dumps({"user_id": "@not:exist.bla"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "This endpoint can only be used with local users",
+            channel.json_body["error"],
+        )
+
+    def test_room_does_not_exist(self):
+        """
+        Check that unknown rooms/server return error 404.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+        url = "/_synapse/admin/v1/join/!unknown:test"
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("No known servers", channel.json_body["error"])
+
+    def test_room_is_not_valid(self):
+        """
+        Check that invalid room names, return an error 400.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+        url = "/_synapse/admin/v1/join/invalidroom"
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "invalidroom was not legal room ID or room alias",
+            channel.json_body["error"],
+        )
+
+    def test_join_public_room(self):
+        """
+        Test joining a local user to a public room with "JoinRules.PUBLIC"
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.public_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0])
+
+    def test_join_private_room_if_not_member(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE"
+        when server admin is not member of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=False
+        )
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_join_private_room_if_member(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE",
+        when server admin is member of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=False
+        )
+        self.helper.invite(
+            room=private_room_id,
+            src=self.creator,
+            targ=self.admin_user,
+            tok=self.creator_tok,
+        )
+        self.helper.join(
+            room=private_room_id, user=self.admin_user, tok=self.admin_user_tok
+        )
+
+        # Validate if server admin is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
+
+        # Join user to room.
+
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
+
+    def test_join_private_room_if_owner(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE",
+        when server admin is owner of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.admin_user, tok=self.admin_user_tok, is_public=False
+        )
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
-- 
cgit 1.4.1


From 84f7eaed16a8169f1d70d047c9354c8232b9fb9f Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Fri, 27 Mar 2020 15:44:13 +0100
Subject: Improve the UX of the login fallback when using SSO (#7152)

* Don't show the login forms if we're currently logging in with a
  password or a token.
* Submit directly the SSO login form, showing only a spinner to the
  user, in order to eliminate from the clunkiness of SSO through this
  fallback.
---
 changelog.d/7152.feature                |  1 +
 synapse/static/client/login/index.html  |  2 +-
 synapse/static/client/login/js/login.js | 51 +++++++++++++++++++--------------
 3 files changed, 32 insertions(+), 22 deletions(-)
 create mode 100644 changelog.d/7152.feature

(limited to 'changelog.d')

diff --git a/changelog.d/7152.feature b/changelog.d/7152.feature
new file mode 100644
index 0000000000..fafa79c7e7
--- /dev/null
+++ b/changelog.d/7152.feature
@@ -0,0 +1 @@
+Improve the support for SSO authentication on the login fallback page.
diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html
index bcb6bc6bb7..712b0e3980 100644
--- a/synapse/static/client/login/index.html
+++ b/synapse/static/client/login/index.html
@@ -9,7 +9,7 @@
 
     

-

Log in with one of the following methods

+

diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index 276c271bbe..debe464371 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -1,37 +1,41 @@ window.matrixLogin = { endpoint: location.origin + "/_matrix/client/r0/login", serverAcceptsPassword: false, - serverAcceptsCas: false, serverAcceptsSso: false, }; +var title_pre_auth = "Log in with one of the following methods"; +var title_post_auth = "Logging in..."; + var submitPassword = function(user, pwd) { console.log("Logging in with password..."); + set_title(title_post_auth); var data = { type: "m.login.password", user: user, password: pwd, }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var submitToken = function(loginToken) { console.log("Logging in with login token..."); + set_title(title_post_auth); var data = { type: "m.login.token", token: loginToken }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var errorFunc = function(err) { - show_login(); + // We want to show the error to the user rather than redirecting immediately to the + // SSO portal (if SSO is the only login option), so we inhibit the redirect. + show_login(true); if (err.responseJSON && err.responseJSON.error) { setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); @@ -45,26 +49,33 @@ var setFeedbackString = function(text) { $("#feedback").text(text); }; -var show_login = function() { - $("#loading").hide(); - +var show_login = function(inhibit_redirect) { var this_page = window.location.origin + window.location.pathname; $("#sso_redirect_url").val(this_page); - if (matrixLogin.serverAcceptsPassword) { - $("#password_flow").show(); + // If inhibit_redirect is false, and SSO is the only supported login method, we can + // redirect straight to the SSO page + if (matrixLogin.serverAcceptsSso) { + if (!inhibit_redirect && !matrixLogin.serverAcceptsPassword) { + $("#sso_form").submit(); + return; + } + + // Otherwise, show the SSO form + $("#sso_form").show(); } - if (matrixLogin.serverAcceptsSso) { - $("#sso_flow").show(); - } else if (matrixLogin.serverAcceptsCas) { - $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect"); - $("#sso_flow").show(); + if (matrixLogin.serverAcceptsPassword) { + $("#password_flow").show(); } - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) { $("#no_login_types").show(); } + + set_title(title_pre_auth); + + $("#loading").hide(); }; var show_spinner = function() { @@ -74,17 +85,15 @@ var show_spinner = function() { $("#loading").show(); }; +var set_title = function(title) { + $("#title").text(title); +}; var fetch_info = function(cb) { $.get(matrixLogin.endpoint, function(response) { var serverAcceptsPassword = false; - var serverAcceptsCas = false; for (var i=0; i Date: Fri, 27 Mar 2020 20:24:52 +0000 Subject: Always whitelist the login fallback for SSO (#7153) That fallback sets the redirect URL to itself (so it can process the login token then return gracefully to the client). This would make it pointless to ask the user for confirmation, since the URL the confirmation page would be showing wouldn't be the client's. --- changelog.d/7153.feature | 1 + docs/sample_config.yaml | 4 ++++ synapse/config/sso.py | 15 +++++++++++++++ tests/rest/client/v1/test_login.py | 9 ++++++++- 4 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7153.feature (limited to 'changelog.d') diff --git a/changelog.d/7153.feature b/changelog.d/7153.feature new file mode 100644 index 0000000000..414ebe1f69 --- /dev/null +++ b/changelog.d/7153.feature @@ -0,0 +1 @@ +Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 545226f753..743949945a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1444,6 +1444,10 @@ sso: # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 95762689bc..ec3dca9efc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -39,6 +39,17 @@ class SSOConfig(Config): self.sso_client_whitelist = sso_config.get("client_whitelist") or [] + # Attempt to also whitelist the server's login fallback, since that fallback sets + # the redirect URL to itself (so it can process the login token then return + # gracefully to the client). This would make it pointless to ask the user for + # confirmation, since the URL the confirmation page would be showing wouldn't be + # the client's. + # public_baseurl is an optional setting, so we only add the fallback's URL to the + # list if it's provided (because we can't figure out what that URL is otherwise). + if self.public_baseurl: + login_fallback_url = self.public_baseurl + "_matrix/static/client/login" + self.sso_client_whitelist.append(login_fallback_url) + def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. @@ -54,6 +65,10 @@ class SSOConfig(Config): # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index da2c9bfa1e..aed8853d6e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -350,7 +350,14 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): def test_cas_redirect_whitelisted(self): """Tests that the SSO login flow serves a redirect to a whitelisted url """ - redirect_url = "https://legit-site.com/" + self._test_redirect("https://legit-site.com/") + + @override_config({"public_baseurl": "https://example.com"}) + def test_cas_redirect_login_fallback(self): + self._test_redirect("https://example.com/_matrix/static/client/login") + + def _test_redirect(self, redirect_url): + """Tests that the SSO login flow serves a redirect for the given redirect URL.""" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) -- cgit 1.4.1 From c5f89fba55b2529b2c8a76e272a21d551ffa82fe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 30 Mar 2020 07:28:42 -0400 Subject: Add developer documentation for running a local CAS server (#7147) --- changelog.d/7147.doc | 1 + docs/dev/cas.md | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++ docs/dev/saml.md | 8 +++++-- 3 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7147.doc create mode 100644 docs/dev/cas.md (limited to 'changelog.d') diff --git a/changelog.d/7147.doc b/changelog.d/7147.doc new file mode 100644 index 0000000000..2c855ff5f7 --- /dev/null +++ b/changelog.d/7147.doc @@ -0,0 +1 @@ +Add documentation for running a local CAS server for testing. diff --git a/docs/dev/cas.md b/docs/dev/cas.md new file mode 100644 index 0000000000..f8d02cc82c --- /dev/null +++ b/docs/dev/cas.md @@ -0,0 +1,64 @@ +# How to test CAS as a developer without a server + +The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an +easy to run CAS implementation built on top of Django. + +## Prerequisites + +1. Create a new virtualenv: `python3 -m venv ` +2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate` +3. Install Django and django-mama-cas: + ``` + python -m pip install "django<3" "django-mama-cas==2.4.0" + ``` +4. Create a Django project in the current directory: + ``` + django-admin startproject cas_test . + ``` +5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas +6. Setup the SQLite database: `python manage.py migrate` +7. Create a user: + ``` + python manage.py createsuperuser + ``` + 1. Use whatever you want as the username and password. + 2. Leave the other fields blank. +8. Use the built-in Django test server to serve the CAS endpoints on port 8000: + ``` + python manage.py runserver + ``` + +You should now have a Django project configured to serve CAS authentication with +a single user created. + +## Configure Synapse (and Riot) to use CAS + +1. Modify your `homeserver.yaml` to enable CAS and point it to your locally + running Django test server: + ```yaml + cas_config: + enabled: true + server_url: "http://localhost:8000" + service_url: "http://localhost:8081" + #displayname_attribute: name + #required_attributes: + # name: value + ``` +2. Restart Synapse. + +Note that the above configuration assumes the homeserver is running on port 8081 +and that the CAS server is on port 8000, both on localhost. + +## Testing the configuration + +Then in Riot: + +1. Visit the login page with a Riot pointing at your homeserver. +2. Click the Single Sign-On button. +3. Login using the credentials created with `createsuperuser`. +4. You should be logged in. + +If you want to repeat this process you'll need to manually logout first: + +1. http://localhost:8000/admin/ +2. Click "logout" in the top right. diff --git a/docs/dev/saml.md b/docs/dev/saml.md index f41aadce47..a9bfd2dc05 100644 --- a/docs/dev/saml.md +++ b/docs/dev/saml.md @@ -18,9 +18,13 @@ To make Synapse (and therefore Riot) use it: metadata: local: ["samling.xml"] ``` -5. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure +5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`: + ```yaml + public_baseurl: http://localhost:8080/ + ``` +6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure the dependencies are installed and ready to go. -6. Restart Synapse. +7. Restart Synapse. Then in Riot: -- cgit 1.4.1 From 4f21c33be301b8ea6369039c3ad8baa51878e4d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Mar 2020 16:37:24 +0100 Subject: Remove usage of "conn_id" for presence. (#7128) * Remove `conn_id` usage for UserSyncCommand. Each tcp replication connection is assigned a "conn_id", which is used to give an ID to a remotely connected worker. In a redis world, there will no longer be a one to one mapping between connection and instance, so instead we need to replace such usages with an ID generated by the remote instances and included in the replicaiton commands. This really only effects UserSyncCommand. * Add CLEAR_USER_SYNCS command that is sent on shutdown. This should help with the case where a synchrotron gets restarted gracefully, rather than rely on 5 minute timeout. --- changelog.d/7128.misc | 1 + docs/tcp_replication.md | 6 ++++++ synapse/app/generic_worker.py | 20 ++++++++++++++++---- synapse/replication/tcp/client.py | 6 ++++-- synapse/replication/tcp/commands.py | 36 ++++++++++++++++++++++++++++++++---- synapse/replication/tcp/protocol.py | 9 +++++++-- synapse/replication/tcp/resource.py | 17 +++++++---------- synapse/server.py | 11 +++++++++++ synapse/server.pyi | 2 ++ 9 files changed, 86 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7128.misc (limited to 'changelog.d') diff --git a/changelog.d/7128.misc b/changelog.d/7128.misc new file mode 100644 index 0000000000..5703f6d2ec --- /dev/null +++ b/changelog.d/7128.misc @@ -0,0 +1 @@ +Add explicit `instance_id` for USER_SYNC commands and remove implicit `conn_id` usage. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index d4f7d9ec18..3be8e50c4c 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -198,6 +198,12 @@ Asks the server for the current position of all streams. A user has started or stopped syncing +#### CLEAR_USER_SYNC (C) + + The server should clear all associated user sync data from the worker. + + This is used when a worker is shutting down. + #### FEDERATION_ACK (C) Acknowledge receipt of some federation data diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index fba7ad9551..1ee266f7c5 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -65,6 +65,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, @@ -124,7 +125,6 @@ from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole -from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") @@ -233,6 +233,7 @@ class GenericWorkerPresence(object): self.user_to_num_current_syncs = {} self.clock = hs.get_clock() self.notifier = hs.get_notifier() + self.instance_id = hs.get_instance_id() active_presence = self.store.take_presence_startup_info() self.user_to_current_state = {state.user_id: state for state in active_presence} @@ -245,13 +246,24 @@ class GenericWorkerPresence(object): self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) - self.process_id = random_string(16) - logger.info("Presence process_id is %r", self.process_id) + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "generic_presence.on_shutdown", + self._on_shutdown, + ) + + def _on_shutdown(self): + if self.hs.config.use_presence: + self.hs.get_tcp_replication().send_command( + ClearUserSyncsCommand(self.instance_id) + ) def send_user_sync(self, user_id, is_syncing, last_sync_ms): if self.hs.config.use_presence: self.hs.get_tcp_replication().send_user_sync( - user_id, is_syncing, last_sync_ms + self.instance_id, user_id, is_syncing, last_sync_ms ) def mark_as_coming_online(self, user_id): diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 7e7ad0f798..e86d9805f1 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -189,10 +189,12 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): """ self.send_command(FederationAckCommand(token)) - def send_user_sync(self, user_id, is_syncing, last_sync_ms): + def send_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): """Poke the master that a user has started/stopped syncing. """ - self.send_command(UserSyncCommand(user_id, is_syncing, last_sync_ms)) + self.send_command( + UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) + ) def send_remove_pusher(self, app_id, push_key, user_id): """Poke the master to remove a pusher for a user diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 5a6b734094..e4eec643f7 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -207,30 +207,32 @@ class UserSyncCommand(Command): Format:: - USER_SYNC + USER_SYNC Where is either "start" or "stop" """ NAME = "USER_SYNC" - def __init__(self, user_id, is_syncing, last_sync_ms): + def __init__(self, instance_id, user_id, is_syncing, last_sync_ms): + self.instance_id = instance_id self.user_id = user_id self.is_syncing = is_syncing self.last_sync_ms = last_sync_ms @classmethod def from_line(cls, line): - user_id, state, last_sync_ms = line.split(" ", 2) + instance_id, user_id, state, last_sync_ms = line.split(" ", 3) if state not in ("start", "end"): raise Exception("Invalid USER_SYNC state %r" % (state,)) - return cls(user_id, state == "start", int(last_sync_ms)) + return cls(instance_id, user_id, state == "start", int(last_sync_ms)) def to_line(self): return " ".join( ( + self.instance_id, self.user_id, "start" if self.is_syncing else "end", str(self.last_sync_ms), @@ -238,6 +240,30 @@ class UserSyncCommand(Command): ) +class ClearUserSyncsCommand(Command): + """Sent by the client to inform the server that it should drop all + information about syncing users sent by the client. + + Mainly used when client is about to shut down. + + Format:: + + CLEAR_USER_SYNC + """ + + NAME = "CLEAR_USER_SYNC" + + def __init__(self, instance_id): + self.instance_id = instance_id + + @classmethod + def from_line(cls, line): + return cls(line) + + def to_line(self): + return self.instance_id + + class FederationAckCommand(Command): """Sent by the client when it has processed up to a given point in the federation stream. This allows the master to drop in-memory caches of the @@ -398,6 +424,7 @@ _COMMANDS = ( InvalidateCacheCommand, UserIpCommand, RemoteServerUpCommand, + ClearUserSyncsCommand, ) # type: Tuple[Type[Command], ...] # Map of command name to command type. @@ -420,6 +447,7 @@ VALID_CLIENT_COMMANDS = ( ReplicateCommand.NAME, PingCommand.NAME, UserSyncCommand.NAME, + ClearUserSyncsCommand.NAME, FederationAckCommand.NAME, RemovePusherCommand.NAME, InvalidateCacheCommand.NAME, diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index f81d2e2442..dae246825f 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -423,9 +423,12 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): async def on_USER_SYNC(self, cmd): await self.streamer.on_user_sync( - self.conn_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms + cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms ) + async def on_CLEAR_USER_SYNC(self, cmd): + await self.streamer.on_clear_user_syncs(cmd.instance_id) + async def on_REPLICATE(self, cmd): # Subscribe to all streams we're publishing to. for stream_name in self.streamer.streams_by_name: @@ -551,6 +554,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): ): BaseReplicationStreamProtocol.__init__(self, clock) + self.instance_id = hs.get_instance_id() + self.client_name = client_name self.server_name = server_name self.handler = handler @@ -580,7 +585,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): currently_syncing = self.handler.get_currently_syncing_users() now = self.clock.time_msec() for user_id in currently_syncing: - self.send_command(UserSyncCommand(user_id, True, now)) + self.send_command(UserSyncCommand(self.instance_id, user_id, True, now)) # We've now finished connecting to so inform the client handler self.handler.update_connection(self) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 4374e99e32..8b6067e20d 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -251,14 +251,19 @@ class ReplicationStreamer(object): self.federation_sender.federation_ack(token) @measure_func("repl.on_user_sync") - async def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms): + async def on_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): """A client has started/stopped syncing on a worker. """ user_sync_counter.inc() await self.presence_handler.update_external_syncs_row( - conn_id, user_id, is_syncing, last_sync_ms + instance_id, user_id, is_syncing, last_sync_ms ) + async def on_clear_user_syncs(self, instance_id): + """A replication client wants us to drop all their UserSync data. + """ + await self.presence_handler.update_external_syncs_clear(instance_id) + @measure_func("repl.on_remove_pusher") async def on_remove_pusher(self, app_id, push_key, user_id): """A client has asked us to remove a pusher @@ -321,14 +326,6 @@ class ReplicationStreamer(object): except ValueError: pass - # We need to tell the presence handler that the connection has been - # lost so that it can handle any ongoing syncs on that connection. - run_as_background_process( - "update_external_syncs_clear", - self.presence_handler.update_external_syncs_clear, - connection.conn_id, - ) - def _batch_updates(updates): """Takes a list of updates of form [(token, row)] and sets the token to diff --git a/synapse/server.py b/synapse/server.py index c7ca2bda0d..cd86475d6b 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -103,6 +103,7 @@ from synapse.storage import DataStores, Storage from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor +from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) @@ -230,6 +231,8 @@ class HomeServer(object): self._listening_services = [] self.start_time = None + self.instance_id = random_string(5) + self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() @@ -242,6 +245,14 @@ class HomeServer(object): for depname in kwargs: setattr(self, depname, kwargs[depname]) + def get_instance_id(self): + """A unique ID for this synapse process instance. + + This is used to distinguish running instances in worker-based + deployments. + """ + return self.instance_id + def setup(self): logger.info("Setting up.") self.start_time = int(self.get_clock().time()) diff --git a/synapse/server.pyi b/synapse/server.pyi index 3844f0e12f..9d1dfa71e7 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -114,3 +114,5 @@ class HomeServer(object): pass def is_mine_id(self, domain_id: str) -> bool: pass + def get_instance_id(self) -> str: + pass -- cgit 1.4.1 From d9f29f8daef2f49464382b0e80ee93ff38681e99 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 30 Mar 2020 17:38:21 +0100 Subject: Fix a small typo in the `metrics_flags` config option. (#7171) --- changelog.d/7171.doc | 1 + docs/sample_config.yaml | 2 +- synapse/config/metrics.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7171.doc (limited to 'changelog.d') diff --git a/changelog.d/7171.doc b/changelog.d/7171.doc new file mode 100644 index 0000000000..25a3bd8ac6 --- /dev/null +++ b/changelog.d/7171.doc @@ -0,0 +1 @@ +Fix a small typo in the `metrics_flags` config option. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 743949945a..6a770508f9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1144,7 +1144,7 @@ account_threepid_delegates: # enabled by default, either for performance reasons or limited use. # metrics_flags: - # Publish synapse_federation_known_servers, a g auge of the number of + # Publish synapse_federation_known_servers, a gauge of the number of # servers this homeserver knows about, including itself. May cause # performance problems on large homeservers. # diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 22538153e1..6f517a71d0 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -86,7 +86,7 @@ class MetricsConfig(Config): # enabled by default, either for performance reasons or limited use. # metrics_flags: - # Publish synapse_federation_known_servers, a g auge of the number of + # Publish synapse_federation_known_servers, a gauge of the number of # servers this homeserver knows about, including itself. May cause # performance problems on large homeservers. # -- cgit 1.4.1 From 7042840b3201644ee71ea3e446576aa347b6d2a3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 30 Mar 2020 17:53:25 +0100 Subject: Transfer alias mappings when joining an upgraded room (#6946) --- changelog.d/6946.bugfix | 1 + synapse/handlers/room_member.py | 3 +++ synapse/storage/data_stores/main/directory.py | 26 +++++++++++++++++++++++--- 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6946.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6946.bugfix b/changelog.d/6946.bugfix new file mode 100644 index 0000000000..a238c83a18 --- /dev/null +++ b/changelog.d/6946.bugfix @@ -0,0 +1 @@ +Transfer alias mappings on room upgrade. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 4260426369..c3ee8db4f0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -519,6 +519,9 @@ class RoomMemberHandler(object): yield self.store.set_room_is_public(old_room_id, False) yield self.store.set_room_is_public(room_id, True) + # Transfer alias mappings in the room directory + yield self.store.update_aliases_for_room(old_room_id, room_id) + # Check if any groups we own contain the predecessor room local_group_ids = yield self.store.get_local_groups_for_room(old_room_id) for group_id in local_group_ids: diff --git a/synapse/storage/data_stores/main/directory.py b/synapse/storage/data_stores/main/directory.py index c9e7de7d12..e1d1bc3e05 100644 --- a/synapse/storage/data_stores/main/directory.py +++ b/synapse/storage/data_stores/main/directory.py @@ -14,6 +14,7 @@ # limitations under the License. from collections import namedtuple +from typing import Optional from twisted.internet import defer @@ -159,10 +160,29 @@ class DirectoryStore(DirectoryWorkerStore): return room_id - def update_aliases_for_room(self, old_room_id, new_room_id, creator): + def update_aliases_for_room( + self, old_room_id: str, new_room_id: str, creator: Optional[str] = None, + ): + """Repoint all of the aliases for a given room, to a different room. + + Args: + old_room_id: + new_room_id: + creator: The user to record as the creator of the new mapping. + If None, the creator will be left unchanged. + """ + def _update_aliases_for_room_txn(txn): - sql = "UPDATE room_aliases SET room_id = ?, creator = ? WHERE room_id = ?" - txn.execute(sql, (new_room_id, creator, old_room_id)) + update_creator_sql = "" + sql_params = (new_room_id, old_room_id) + if creator: + update_creator_sql = ", creator = ?" + sql_params = (new_room_id, creator, old_room_id) + + sql = "UPDATE room_aliases SET room_id = ? %s WHERE room_id = ?" % ( + update_creator_sql, + ) + txn.execute(sql, sql_params) self._invalidate_cache_and_stream( txn, self.get_aliases_for_room, (old_room_id,) ) -- cgit 1.4.1 From 7966a1cde9d4b598faa06620424844f2b35c94af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 30 Mar 2020 19:06:52 +0100 Subject: Rewrite prune_old_outbound_device_pokes for efficiency (#7159) make sure we clear out all but one update for the user --- changelog.d/7159.bugfix | 1 + synapse/handlers/federation.py | 25 +------- synapse/storage/data_stores/main/devices.py | 71 ++++++++++++++++++---- synapse/util/stringutils.py | 21 ++++++- tests/federation/test_federation_sender.py | 92 +++++++++++++++++++++++++++++ 5 files changed, 173 insertions(+), 37 deletions(-) create mode 100644 changelog.d/7159.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7159.bugfix b/changelog.d/7159.bugfix new file mode 100644 index 0000000000..1b341b127b --- /dev/null +++ b/changelog.d/7159.bugfix @@ -0,0 +1 @@ +Fix excessive CPU usage by `prune_old_outbound_device_pokes` job. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 38ab6a8fc3..c7aa7acf3b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -49,6 +49,7 @@ from synapse.event_auth import auth_types_for_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator +from synapse.handlers._base import BaseHandler from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, @@ -69,10 +70,9 @@ from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server -from ._base import BaseHandler - logger = logging.getLogger(__name__) @@ -93,27 +93,6 @@ class _NewEventInfo: auth_events = attr.ib(type=Optional[StateMap[EventBase]], default=None) -def shortstr(iterable, maxitems=5): - """If iterable has maxitems or fewer, return the stringification of a list - containing those items. - - Otherwise, return the stringification of a a list with the first maxitems items, - followed by "...". - - Args: - iterable (Iterable): iterable to truncate - maxitems (int): number of items to return before truncating - - Returns: - unicode - """ - - items = list(itertools.islice(iterable, maxitems + 1)) - if len(items) <= maxitems: - return str(items) - return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]" - - class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 2d47cfd131..3140e1b722 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -41,6 +41,7 @@ from synapse.util.caches.descriptors import ( cachedList, ) from synapse.util.iterutils import batch_iter +from synapse.util.stringutils import shortstr logger = logging.getLogger(__name__) @@ -1092,18 +1093,47 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ], ) - def _prune_old_outbound_device_pokes(self): + def _prune_old_outbound_device_pokes(self, prune_age=24 * 60 * 60 * 1000): """Delete old entries out of the device_lists_outbound_pokes to ensure - that we don't fill up due to dead servers. We keep one entry per - (destination, user_id) tuple to ensure that the prev_ids remain correct - if the server does come back. + that we don't fill up due to dead servers. + + Normally, we try to send device updates as a delta since a previous known point: + this is done by setting the prev_id in the m.device_list_update EDU. However, + for that to work, we have to have a complete record of each change to + each device, which can add up to quite a lot of data. + + An alternative mechanism is that, if the remote server sees that it has missed + an entry in the stream_id sequence for a given user, it will request a full + list of that user's devices. Hence, we can reduce the amount of data we have to + store (and transmit in some future transaction), by clearing almost everything + for a given destination out of the database, and having the remote server + resync. + + All we need to do is make sure we keep at least one row for each + (user, destination) pair, to remind us to send a m.device_list_update EDU for + that user when the destination comes back. It doesn't matter which device + we keep. """ - yesterday = self._clock.time_msec() - 24 * 60 * 60 * 1000 + yesterday = self._clock.time_msec() - prune_age def _prune_txn(txn): + # look for (user, destination) pairs which have an update older than + # the cutoff. + # + # For each pair, we also need to know the most recent stream_id, and + # an arbitrary device_id at that stream_id. select_sql = """ - SELECT destination, user_id, max(stream_id) as stream_id - FROM device_lists_outbound_pokes + SELECT + dlop1.destination, + dlop1.user_id, + MAX(dlop1.stream_id) AS stream_id, + (SELECT MIN(dlop2.device_id) AS device_id FROM + device_lists_outbound_pokes dlop2 + WHERE dlop2.destination = dlop1.destination AND + dlop2.user_id=dlop1.user_id AND + dlop2.stream_id=MAX(dlop1.stream_id) + ) + FROM device_lists_outbound_pokes dlop1 GROUP BY destination, user_id HAVING min(ts) < ? AND count(*) > 1 """ @@ -1114,14 +1144,29 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): if not rows: return + logger.info( + "Pruning old outbound device list updates for %i users/destinations: %s", + len(rows), + shortstr((row[0], row[1]) for row in rows), + ) + + # we want to keep the update with the highest stream_id for each user. + # + # there might be more than one update (with different device_ids) with the + # same stream_id, so we also delete all but one rows with the max stream id. delete_sql = """ DELETE FROM device_lists_outbound_pokes - WHERE ts < ? AND destination = ? AND user_id = ? AND stream_id < ? + WHERE destination = ? AND user_id = ? AND ( + stream_id < ? OR + (stream_id = ? AND device_id != ?) + ) """ - - txn.executemany( - delete_sql, ((yesterday, row[0], row[1], row[2]) for row in rows) - ) + count = 0 + for (destination, user_id, stream_id, device_id) in rows: + txn.execute( + delete_sql, (destination, user_id, stream_id, stream_id, device_id) + ) + count += txn.rowcount # Since we've deleted unsent deltas, we need to remove the entry # of last successful sent so that the prev_ids are correctly set. @@ -1131,7 +1176,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): """ txn.executemany(sql, ((row[0], row[1]) for row in rows)) - logger.info("Pruned %d device list outbound pokes", txn.rowcount) + logger.info("Pruned %d device list outbound pokes", count) return run_as_background_process( "prune_old_outbound_device_pokes", diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 2c0dcb5208..6899bcb788 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -13,10 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import itertools import random import re import string +from collections import Iterable import six from six import PY2, PY3 @@ -126,3 +127,21 @@ def assert_valid_client_secret(client_secret): raise SynapseError( 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM ) + + +def shortstr(iterable: Iterable, maxitems: int = 5) -> str: + """If iterable has maxitems or fewer, return the stringification of a list + containing those items. + + Otherwise, return the stringification of a a list with the first maxitems items, + followed by "...". + + Args: + iterable: iterable to truncate + maxitems: number of items to return before truncating + """ + + items = list(itertools.islice(iterable, maxitems + 1)) + if len(items) <= maxitems: + return str(items) + return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]" diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 7763b12159..a5fe5c6880 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -370,6 +370,98 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) + def test_prune_outbound_device_pokes1(self): + """If a destination is unreachable, and the updates are pruned, we should get + a single update. + + This case tests the behaviour when the server has never been reachable. + """ + mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + + # create devices + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + self.assertGreaterEqual(mock_send_txn.call_count, 4) + + # run the prune job + self.reactor.advance(10) + self.get_success( + self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + ) + + # recover the server + mock_send_txn.side_effect = self.record_transaction + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + + # there should be a single update for this user. + self.assertEqual(len(self.edus), 1) + edu = self.edus.pop(0) + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + + # synapse uses an empty prev_id list to indicate "needs a full resync". + self.assertEqual(c["prev_id"], []) + + def test_prune_outbound_device_pokes2(self): + """If a destination is unreachable, and the updates are pruned, we should get + a single update. + + This case tests the behaviour when the server was reachable, but then goes + offline. + """ + + # create first device + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + + # expect the update EDU + self.assertEqual(len(self.edus), 1) + self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + + # now the server goes offline + mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + self.assertGreaterEqual(mock_send_txn.call_count, 3) + + # run the prune job + self.reactor.advance(10) + self.get_success( + self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + ) + + # recover the server + mock_send_txn.side_effect = self.record_transaction + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + + # ... and we should get a single update for this user. + self.assertEqual(len(self.edus), 1) + edu = self.edus.pop(0) + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + + # synapse uses an empty prev_id list to indicate "needs a full resync". + self.assertEqual(c["prev_id"], []) + def check_device_update_edu( self, edu: JsonDict, -- cgit 1.4.1 From 5bd2b275254bcbf001bee20d821c0ef567b9587f Mon Sep 17 00:00:00 2001 From: David Vo Date: Fri, 27 Mar 2020 12:26:55 +1100 Subject: Only import sqlite3 when type checking Fixes: #7127 Signed-off-by: David Vo --- changelog.d/7155.bugfix | 1 + synapse/storage/engines/sqlite.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7155.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix new file mode 100644 index 0000000000..0bf51e7aba --- /dev/null +++ b/changelog.d/7155.bugfix @@ -0,0 +1 @@ +Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 2bfeefd54e..3bc2e8b986 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -12,14 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sqlite3 import struct import threading +import typing from synapse.storage.engines import BaseDatabaseEngine +if typing.TYPE_CHECKING: + import sqlite3 # noqa: F401 -class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): + +class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): def __init__(self, database_module, database_config): super().__init__(database_module, database_config) -- cgit 1.4.1 From 2cb38ca871a65eaa4236a908c34b2b9873371b93 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 30 Mar 2020 19:15:06 +0100 Subject: Add changelog --- changelog.d/7177.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7177.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix new file mode 100644 index 0000000000..908801c360 --- /dev/null +++ b/changelog.d/7177.bugfix @@ -0,0 +1 @@ +Mitigation for a bug in `_get_e2e_device_keys_txn` which broke federation for some servers. \ No newline at end of file -- cgit 1.4.1 From 2cf115f0ea3932bb65cbe3e1e563dca69aa642d7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 31 Mar 2020 11:20:07 +0100 Subject: Rewrite changelog --- changelog.d/7177.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'changelog.d') diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix index 908801c360..329a96cb0b 100644 --- a/changelog.d/7177.bugfix +++ b/changelog.d/7177.bugfix @@ -1 +1 @@ -Mitigation for a bug in `_get_e2e_device_keys_txn` which broke federation for some servers. \ No newline at end of file +Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file -- cgit 1.4.1 From 5d99bde7883201d2c6011ca40ee87dbc64b938e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Mar 2020 10:36:44 +0000 Subject: Newsfile --- changelog.d/7133.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7133.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix new file mode 100644 index 0000000000..61a86fd34e --- /dev/null +++ b/changelog.d/7133.bugfix @@ -0,0 +1 @@ +Fix starting workers when federation sending not split out. -- cgit 1.4.1 From 3fb9fc40f59e3688f82672410f812022a1af9daa Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 31 Mar 2020 11:49:43 +0100 Subject: 1.12.1rc1 --- CHANGES.md | 11 +++++++++++ changelog.d/7133.bugfix | 1 - changelog.d/7155.bugfix | 1 - changelog.d/7177.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/7133.bugfix delete mode 100644 changelog.d/7155.bugfix delete mode 100644 changelog.d/7177.bugfix (limited to 'changelog.d') diff --git a/CHANGES.md b/CHANGES.md index f794c585b7..5b97d7ff82 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.12.1rc1 (2020-03-31) +============================== + +Bugfixes +-------- + +- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)) +- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155)) +- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177)) + + Synapse 1.12.0 (2020-03-23) =========================== diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix deleted file mode 100644 index 61a86fd34e..0000000000 --- a/changelog.d/7133.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix starting workers when federation sending not split out. diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix deleted file mode 100644 index 0bf51e7aba..0000000000 --- a/changelog.d/7155.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix deleted file mode 100644 index 329a96cb0b..0000000000 --- a/changelog.d/7177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 5b86008945..c3c5b20f11 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.0" +__version__ = "1.12.1rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.4.1 From 62a7289133840b4f4a55844b4f24ec664c3d917b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 31 Mar 2020 13:09:16 +0100 Subject: Fix a bug which could cause incorrect 'cyclic dependency' error. (#7178) If there was an exception setting up one of the attributes of the Homeserver god object, then future attempts to fetch that attribute would raise a confusing "Cyclic dependency" error. Let's make sure that we clear the `building` flag so that we just get the original exception. Ref: #7169 --- changelog.d/7178.bugfix | 1 + synapse/server.py | 22 ++++++++++------------ 2 files changed, 11 insertions(+), 12 deletions(-) create mode 100644 changelog.d/7178.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7178.bugfix b/changelog.d/7178.bugfix new file mode 100644 index 0000000000..35ea645d75 --- /dev/null +++ b/changelog.d/7178.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause incorrect 'cyclic dependency' error. diff --git a/synapse/server.py b/synapse/server.py index cd86475d6b..9228e1c892 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -583,24 +583,22 @@ def _make_dependency_method(depname): try: builder = getattr(hs, "build_%s" % (depname)) except AttributeError: - builder = None + raise NotImplementedError( + "%s has no %s nor a builder for it" % (type(hs).__name__, depname) + ) - if builder: - # Prevent cyclic dependencies from deadlocking - if depname in hs._building: - raise ValueError("Cyclic dependency while building %s" % (depname,)) - hs._building[depname] = 1 + # Prevent cyclic dependencies from deadlocking + if depname in hs._building: + raise ValueError("Cyclic dependency while building %s" % (depname,)) + hs._building[depname] = 1 + try: dep = builder() setattr(hs, depname, dep) - + finally: del hs._building[depname] - return dep - - raise NotImplementedError( - "%s has no %s nor a builder for it" % (type(hs).__name__, depname) - ) + return dep setattr(HomeServer, "get_%s" % (depname), _get) -- cgit 1.4.1 From 0a7b0882c1d1f52bde46d6f367f265bc330e8bd0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Mar 2020 09:33:02 -0400 Subject: Fix use of async/await in media code (#7184) --- changelog.d/7184.misc | 1 + synapse/storage/data_stores/main/media_repository.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7184.misc (limited to 'changelog.d') diff --git a/changelog.d/7184.misc b/changelog.d/7184.misc new file mode 100644 index 0000000000..fac5bc0403 --- /dev/null +++ b/changelog.d/7184.misc @@ -0,0 +1 @@ +Convert some of synapse.rest.media to async/await. diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index 80ca36dedf..cf195f8aa6 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -340,7 +340,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "get_expired_url_cache", _get_expired_url_cache_txn ) - def delete_url_cache(self, media_ids): + async def delete_url_cache(self, media_ids): if len(media_ids) == 0: return @@ -349,7 +349,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): def _delete_url_cache_txn(txn): txn.executemany(sql, [(media_id,) for media_id in media_ids]) - return self.db.runInteraction("delete_url_cache", _delete_url_cache_txn) + return await self.db.runInteraction("delete_url_cache", _delete_url_cache_txn) def get_url_cache_media_before(self, before_ts): sql = ( -- cgit 1.4.1 From b994e86e359fd095f82feabbf38fb18a5d10e0ae Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 31 Mar 2020 14:51:22 +0100 Subject: Only setdefault for signatures if device has key_json (#7177) --- changelog.d/7177.bugfix | 1 + synapse/storage/data_stores/main/devices.py | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 changelog.d/7177.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix new file mode 100644 index 0000000000..329a96cb0b --- /dev/null +++ b/changelog.d/7177.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 3140e1b722..20995e1b78 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -286,14 +286,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) else: result["deleted"] = True @@ -494,14 +496,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) results.append(result) -- cgit 1.4.1 From fe1580bfd91151c2c375d3c403ed911828f3899e Mon Sep 17 00:00:00 2001 From: Karlinde Date: Tue, 31 Mar 2020 16:08:56 +0200 Subject: Fill in the 'default' field for user-defined push rules (#6639) Signed-off-by: Karl Linderhed --- changelog.d/6639.bugfix | 1 + synapse/storage/data_stores/main/push_rule.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/6639.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6639.bugfix b/changelog.d/6639.bugfix new file mode 100644 index 0000000000..c7593a6e84 --- /dev/null +++ b/changelog.d/6639.bugfix @@ -0,0 +1 @@ +Fix missing field `default` when fetching user-defined push rules. diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py index 62ac88d9f2..46f9bda773 100644 --- a/synapse/storage/data_stores/main/push_rule.py +++ b/synapse/storage/data_stores/main/push_rule.py @@ -41,6 +41,7 @@ def _load_rules(rawrules, enabled_map): rule = dict(rawrule) rule["conditions"] = json.loads(rawrule["conditions"]) rule["actions"] = json.loads(rawrule["actions"]) + rule["default"] = False ruleslist.append(rule) # We're going to be mutating this a lot, so do a deep copy -- cgit 1.4.1 From 60adcbed919afd5c85442775eca822fec43d816d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 31 Mar 2020 15:18:41 +0100 Subject: Fix "'NoneType' has no attribute start|stop" logcontext errors (#7181) Fixes #7179. --- changelog.d/7181.misc | 1 + synapse/http/site.py | 13 ++++++------- synapse/logging/context.py | 5 +++++ 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/7181.misc (limited to 'changelog.d') diff --git a/changelog.d/7181.misc b/changelog.d/7181.misc new file mode 100644 index 0000000000..731f4dcb52 --- /dev/null +++ b/changelog.d/7181.misc @@ -0,0 +1 @@ +Clean up some LoggingContext code. diff --git a/synapse/http/site.py b/synapse/http/site.py index e092193c9c..32feb0d968 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -193,6 +193,12 @@ class SynapseRequest(Request): self.finish_time = time.time() Request.connectionLost(self, reason) + if self.logcontext is None: + logger.info( + "Connection from %s lost before request headers were read", self.client + ) + return + # we only get here if the connection to the client drops before we send # the response. # @@ -236,13 +242,6 @@ class SynapseRequest(Request): def _finished_processing(self): """Log the completion of this request and update the metrics """ - - if self.logcontext is None: - # this can happen if the connection closed before we read the - # headers (so render was never called). In that case we'll already - # have logged a warning, so just bail out. - return - usage = self.logcontext.get_resource_usage() if self._processing_finished_time is None: diff --git a/synapse/logging/context.py b/synapse/logging/context.py index a8eafb1c7c..3254d6a8df 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -539,6 +539,11 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe Returns: The context that was previously active """ + # everything blows up if we allow current_context to be set to None, so sanity-check + # that now. + if context is None: + raise TypeError("'context' argument may not be None") + current = current_context() if current is not context: -- cgit 1.4.1 From 2e826cd80c97cbdcec3e600b802c43ec27263e39 Mon Sep 17 00:00:00 2001 From: Jostein Kjønigsen Date: Tue, 31 Mar 2020 16:50:48 +0200 Subject: Improve TURN documentation. (#7167) --- changelog.d/7167.doc | 1 + docs/turn-howto.md | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/7167.doc (limited to 'changelog.d') diff --git a/changelog.d/7167.doc b/changelog.d/7167.doc new file mode 100644 index 0000000000..a7e7ba9b51 --- /dev/null +++ b/changelog.d/7167.doc @@ -0,0 +1 @@ +Improve README.md by being explicit about public IP recommendation for TURN relaying. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 1bd3943f54..b26e41f19e 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -11,6 +11,13 @@ TURN server. The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse. +## Requirements + +For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP. + +Hosting TURN behind a NAT (even with appropriate port forwarding) is known to cause issues +and to often not work. + ## `coturn` Setup ### Initial installation -- cgit 1.4.1 From b413ab8aa64a3e1a01db8e3e6bce0c486f916618 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:44:36 +0100 Subject: changelog --- changelog.d/7190.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7190.misc (limited to 'changelog.d') diff --git a/changelog.d/7190.misc b/changelog.d/7190.misc new file mode 100644 index 0000000000..34348873f1 --- /dev/null +++ b/changelog.d/7190.misc @@ -0,0 +1 @@ +Only run one background database update at a time. -- cgit 1.4.1 From dfa07822542da96b93ef9d871d43bf1a36dc4664 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Apr 2020 10:40:46 +0100 Subject: Remove connections per replication stream metric. (#7195) This broke in a recent PR (#7024) and is no longer useful due to all replication clients implicitly subscribing to all streams, so let's just remove it. --- changelog.d/7195.misc | 1 + synapse/replication/tcp/resource.py | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/7195.misc (limited to 'changelog.d') diff --git a/changelog.d/7195.misc b/changelog.d/7195.misc new file mode 100644 index 0000000000..676f285377 --- /dev/null +++ b/changelog.d/7195.misc @@ -0,0 +1 @@ +Move catchup of replication streams logic to worker. diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 8b6067e20d..30021ee309 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -99,22 +99,6 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} - LaterGauge( - "synapse_replication_tcp_resource_connections_per_stream", - "", - ["stream_name"], - lambda: { - (stream_name,): len( - [ - conn - for conn in self.connections - if stream_name in conn.replication_streams - ] - ) - for stream_name in self.streams_by_name - }, - ) - self.federation_sender = None if not hs.config.send_federation: self.federation_sender = hs.get_federation_sender() -- cgit 1.4.1 From 250f87d0dec15f33fced7d06252e27d9c258b90c Mon Sep 17 00:00:00 2001 From: siroccal <41478263+siroccal@users.noreply.github.com> Date: Wed, 1 Apr 2020 13:44:51 +0200 Subject: Update postgres.md (#7119) --- changelog.d/7119.doc | 1 + docs/postgres.md | 28 +++++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7119.doc (limited to 'changelog.d') diff --git a/changelog.d/7119.doc b/changelog.d/7119.doc new file mode 100644 index 0000000000..05192966c3 --- /dev/null +++ b/changelog.d/7119.doc @@ -0,0 +1 @@ +Update postgres docs with login troubleshooting information. \ No newline at end of file diff --git a/docs/postgres.md b/docs/postgres.md index 04aa746051..70fe29cdcc 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -61,7 +61,33 @@ Note that the PostgreSQL database *must* have the correct encoding set You may need to enable password authentication so `synapse_user` can connect to the database. See -. +. + +If you get an error along the lines of `FATAL: Ident authentication failed for +user "synapse_user"`, you may need to use an authentication method other than +`ident`: + +* If the `synapse_user` user has a password, add the password to the `database:` + section of `homeserver.yaml`. Then add the following to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that + ``` + +* If the `synapse_user` user does not have a password, then a password doesn't + have to be added to `homeserver.yaml`. But the following does need to be added + to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 trust + ``` + +Note that line order matters in `pg_hba.conf`, so make sure that if you do add a +new line, it is inserted before: + +``` +host all all ::1/128 ident +``` ### Fixing incorrect `COLLATE` or `CTYPE` -- cgit 1.4.1 From 468dcc767bf379ba2b4ed4b6d1c6537473175eab Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Apr 2020 08:27:05 -0400 Subject: Allow admins to create aliases when they are not in the room (#7191) --- changelog.d/7191.feature | 1 + synapse/handlers/directory.py | 6 +++- tests/handlers/test_directory.py | 62 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7191.feature (limited to 'changelog.d') diff --git a/changelog.d/7191.feature b/changelog.d/7191.feature new file mode 100644 index 0000000000..83d5685bb2 --- /dev/null +++ b/changelog.d/7191.feature @@ -0,0 +1 @@ +Admin users are no longer required to be in a room to create an alias for it. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1d842c369b..53e5f585d9 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -127,7 +127,11 @@ class DirectoryHandler(BaseHandler): errcode=Codes.EXCLUSIVE, ) else: - if self.require_membership and check_membership: + # Server admins are not subject to the same constraints as normal + # users when creating an alias (e.g. being in the room). + is_admin = yield self.auth.is_server_admin(requester.user) + + if (self.require_membership and check_membership) and not is_admin: rooms_for_user = yield self.store.get_rooms_for_user(user_id) if room_id not in rooms_for_user: raise AuthError( diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 5e40adba52..00bb776271 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -102,6 +102,68 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) +class TestCreateAlias(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = hs.get_handlers().directory_handler + + # Create user + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create a test room + self.room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.test_alias = "#test:test" + self.room_alias = RoomAlias.from_string(self.test_alias) + + # Create a test user. + self.test_user = self.register_user("user", "pass", admin=False) + self.test_user_tok = self.login("user", "pass") + self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) + + def test_create_alias_joined_room(self): + """A user can create an alias for a room they're in.""" + self.get_success( + self.handler.create_association( + create_requester(self.test_user), self.room_alias, self.room_id, + ) + ) + + def test_create_alias_other_room(self): + """A user cannot create an alias for a room they're NOT in.""" + other_room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.get_failure( + self.handler.create_association( + create_requester(self.test_user), self.room_alias, other_room_id, + ), + synapse.api.errors.SynapseError, + ) + + def test_create_alias_admin(self): + """An admin can create an alias for a room they're NOT in.""" + other_room_id = self.helper.create_room_as( + self.test_user, tok=self.test_user_tok + ) + + self.get_success( + self.handler.create_association( + create_requester(self.admin_user), self.room_alias, other_room_id, + ) + ) + + class TestDeleteAlias(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, -- cgit 1.4.1 From b9930d24a05e47c36845d8607b12a45eea889be0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Apr 2020 08:48:00 -0400 Subject: Support SAML in the user interactive authentication workflow. (#7102) --- CHANGES.md | 8 ++ changelog.d/7102.feature | 1 + synapse/api/constants.py | 1 + synapse/handlers/auth.py | 116 +++++++++++++++++++++++++++- synapse/handlers/saml_handler.py | 51 +++++++++--- synapse/res/templates/sso_auth_confirm.html | 14 ++++ synapse/rest/client/v2_alpha/account.py | 19 ++++- synapse/rest/client/v2_alpha/auth.py | 42 +++++----- synapse/rest/client/v2_alpha/devices.py | 12 ++- synapse/rest/client/v2_alpha/keys.py | 6 +- synapse/rest/client/v2_alpha/register.py | 1 + 11 files changed, 227 insertions(+), 44 deletions(-) create mode 100644 changelog.d/7102.feature create mode 100644 synapse/res/templates/sso_auth_confirm.html (limited to 'changelog.d') diff --git a/CHANGES.md b/CHANGES.md index f794c585b7..b997af1630 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,11 @@ +Next version +============ + +* A new template (`sso_auth_confirm.html`) was added to Synapse. If your Synapse + is configured to use SSO and a custom `sso_redirect_confirm_template_dir` + configuration then this template will need to be duplicated into that + directory. + Synapse 1.12.0 (2020-03-23) =========================== diff --git a/changelog.d/7102.feature b/changelog.d/7102.feature new file mode 100644 index 0000000000..01057aa396 --- /dev/null +++ b/changelog.d/7102.feature @@ -0,0 +1 @@ +Support SSO in the user interactive authentication workflow. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index cc8577552b..fda2c2e5bb 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -61,6 +61,7 @@ class LoginType(object): MSISDN = "m.login.msisdn" RECAPTCHA = "m.login.recaptcha" TERMS = "m.login.terms" + SSO = "org.matrix.login.sso" DUMMY = "m.login.dummy" # Only for C/S API v1 diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2ce1425dfa..7c09d15a72 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -53,6 +53,31 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +SUCCESS_TEMPLATE = """ + + +Success! + + + + + +
+

Thank you

+

You may now close this window and return to the application

+
+ + +""" + + class AuthHandler(BaseHandler): SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 @@ -91,6 +116,7 @@ class AuthHandler(BaseHandler): self.hs = hs # FIXME better possibility to access registrationHandler later? self.macaroon_gen = hs.get_macaroon_generator() self._password_enabled = hs.config.password_enabled + self._saml2_enabled = hs.config.saml2_enabled # we keep this as a list despite the O(N^2) implication so that we can # keep PASSWORD first and avoid confusing clients which pick the first @@ -106,6 +132,13 @@ class AuthHandler(BaseHandler): if t not in login_types: login_types.append(t) self._supported_login_types = login_types + # Login types and UI Auth types have a heavy overlap, but are not + # necessarily identical. Login types have SSO (and other login types) + # added in the rest layer, see synapse.rest.client.v1.login.LoginRestServerlet.on_GET. + ui_auth_types = login_types.copy() + if self._saml2_enabled: + ui_auth_types.append(LoginType.SSO) + self._supported_ui_auth_types = ui_auth_types # Ratelimiter for failed auth during UIA. Uses same ratelimit config # as per `rc_login.failed_attempts`. @@ -113,10 +146,21 @@ class AuthHandler(BaseHandler): self._clock = self.hs.get_clock() - # Load the SSO redirect confirmation page HTML template + # Load the SSO HTML templates. + + # The following template is shown to the user during a client login via SSO, + # after the SSO completes and before redirecting them back to their client. + # It notifies the user they are about to give access to their matrix account + # to the client. self._sso_redirect_confirm_template = load_jinja2_templates( hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"], )[0] + # The following template is shown during user interactive authentication + # in the fallback auth scenario. It notifies the user that they are + # authenticating for an operation to occur on their account. + self._sso_auth_confirm_template = load_jinja2_templates( + hs.config.sso_redirect_confirm_template_dir, ["sso_auth_confirm.html"], + )[0] self._server_name = hs.config.server_name @@ -130,6 +174,7 @@ class AuthHandler(BaseHandler): request: SynapseRequest, request_body: Dict[str, Any], clientip: str, + description: str, ): """ Checks that the user is who they claim to be, via a UI auth. @@ -147,6 +192,9 @@ class AuthHandler(BaseHandler): clientip: The IP address of the client. + description: A human readable string to be displayed to the user that + describes the operation happening on their account. + Returns: defer.Deferred[dict]: the parameters for this request (which may have been given only in a previous call). @@ -175,11 +223,11 @@ class AuthHandler(BaseHandler): ) # build a list of supported flows - flows = [[login_type] for login_type in self._supported_login_types] + flows = [[login_type] for login_type in self._supported_ui_auth_types] try: result, params, _ = yield self.check_auth( - flows, request, request_body, clientip + flows, request, request_body, clientip, description ) except LoginError: # Update the ratelimite to say we failed (`can_do_action` doesn't raise). @@ -193,7 +241,7 @@ class AuthHandler(BaseHandler): raise # find the completed login type - for login_type in self._supported_login_types: + for login_type in self._supported_ui_auth_types: if login_type not in result: continue @@ -224,6 +272,7 @@ class AuthHandler(BaseHandler): request: SynapseRequest, clientdict: Dict[str, Any], clientip: str, + description: str, ): """ Takes a dictionary sent by the client in the login / registration @@ -250,6 +299,9 @@ class AuthHandler(BaseHandler): clientip: The IP address of the client. + description: A human readable string to be displayed to the user that + describes the operation happening on their account. + Returns: defer.Deferred[dict, dict, str]: a deferred tuple of (creds, params, session_id). @@ -299,12 +351,18 @@ class AuthHandler(BaseHandler): comparator = (request.uri, request.method, clientdict) if "ui_auth" not in session: session["ui_auth"] = comparator + self._save_session(session) elif session["ui_auth"] != comparator: raise SynapseError( 403, "Requested operation has changed during the UI authentication session.", ) + # Add a human readable description to the session. + if "description" not in session: + session["description"] = description + self._save_session(session) + if not authdict: raise InteractiveAuthIncompleteError( self._auth_dict_for_flows(flows, session) @@ -991,6 +1049,56 @@ class AuthHandler(BaseHandler): else: return defer.succeed(False) + def start_sso_ui_auth(self, redirect_url: str, session_id: str) -> str: + """ + Get the HTML for the SSO redirect confirmation page. + + Args: + redirect_url: The URL to redirect to the SSO provider. + session_id: The user interactive authentication session ID. + + Returns: + The HTML to render. + """ + session = self._get_session_info(session_id) + # Get the human readable operation of what is occurring, falling back to + # a generic message if it isn't available for some reason. + description = session.get("description", "modify your account") + return self._sso_auth_confirm_template.render( + description=description, redirect_url=redirect_url, + ) + + def complete_sso_ui_auth( + self, registered_user_id: str, session_id: str, request: SynapseRequest, + ): + """Having figured out a mxid for this user, complete the HTTP request + + Args: + registered_user_id: The registered user ID to complete SSO login for. + request: The request to complete. + client_redirect_url: The URL to which to redirect the user at the end of the + process. + """ + # Mark the stage of the authentication as successful. + sess = self._get_session_info(session_id) + if "creds" not in sess: + sess["creds"] = {} + creds = sess["creds"] + + # Save the user who authenticated with SSO, this will be used to ensure + # that the account be modified is also the person who logged in. + creds[LoginType.SSO] = registered_user_id + self._save_session(sess) + + # Render the HTML and return. + html_bytes = SUCCESS_TEMPLATE.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) + def complete_sso_login( self, registered_user_id: str, diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index dc04b53f43..4741c82f61 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import re -from typing import Tuple +from typing import Optional, Tuple import attr import saml2 @@ -44,11 +44,15 @@ class Saml2SessionData: # time the session was created, in milliseconds creation_time = attr.ib() + # The user interactive authentication session ID associated with this SAML + # session (or None if this SAML session is for an initial login). + ui_auth_session_id = attr.ib(type=Optional[str], default=None) class SamlHandler: def __init__(self, hs): self._saml_client = Saml2Client(hs.config.saml2_sp_config) + self._auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() @@ -77,12 +81,14 @@ class SamlHandler: self._error_html_content = hs.config.saml2_error_html_content - def handle_redirect_request(self, client_redirect_url): + def handle_redirect_request(self, client_redirect_url, ui_auth_session_id=None): """Handle an incoming request to /login/sso/redirect Args: client_redirect_url (bytes): the URL that we should redirect the client to when everything is done + ui_auth_session_id (Optional[str]): The session ID of the ongoing UI Auth (or + None if this is a login). Returns: bytes: URL to redirect to @@ -92,7 +98,9 @@ class SamlHandler: ) now = self._clock.time_msec() - self._outstanding_requests_dict[reqid] = Saml2SessionData(creation_time=now) + self._outstanding_requests_dict[reqid] = Saml2SessionData( + creation_time=now, ui_auth_session_id=ui_auth_session_id, + ) for key, value in info["headers"]: if key == "Location": @@ -119,7 +127,9 @@ class SamlHandler: self.expire_sessions() try: - user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) + user_id, current_session = await self._map_saml_response_to_user( + resp_bytes, relay_state + ) except RedirectException: # Raise the exception as per the wishes of the SAML module response raise @@ -137,9 +147,28 @@ class SamlHandler: finish_request(request) return - self._auth_handler.complete_sso_login(user_id, request, relay_state) + # Complete the interactive auth session or the login. + if current_session and current_session.ui_auth_session_id: + self._auth_handler.complete_sso_ui_auth( + user_id, current_session.ui_auth_session_id, request + ) + + else: + self._auth_handler.complete_sso_login(user_id, request, relay_state) + + async def _map_saml_response_to_user( + self, resp_bytes: str, client_redirect_url: str + ) -> Tuple[str, Optional[Saml2SessionData]]: + """ + Given a sample response, retrieve the cached session and user for it. - async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url): + Args: + resp_bytes: The SAML response. + client_redirect_url: The redirect URL passed in by the client. + + Returns: + Tuple of the user ID and SAML session associated with this response. + """ try: saml2_auth = self._saml_client.parse_authn_request_response( resp_bytes, @@ -167,7 +196,9 @@ class SamlHandler: logger.info("SAML2 mapped attributes: %s", saml2_auth.ava) - self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) + current_session = self._outstanding_requests_dict.pop( + saml2_auth.in_response_to, None + ) remote_user_id = self._user_mapping_provider.get_remote_user_id( saml2_auth, client_redirect_url @@ -188,7 +219,7 @@ class SamlHandler: ) if registered_user_id is not None: logger.info("Found existing mapping %s", registered_user_id) - return registered_user_id + return registered_user_id, current_session # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid @@ -213,7 +244,7 @@ class SamlHandler: await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id + return registered_user_id, current_session # Map saml response to user attributes using the configured mapping provider for i in range(1000): @@ -260,7 +291,7 @@ class SamlHandler: await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id + return registered_user_id, current_session def expire_sessions(self): expire_before = self._clock.time_msec() - self._saml2_session_lifetime diff --git a/synapse/res/templates/sso_auth_confirm.html b/synapse/res/templates/sso_auth_confirm.html new file mode 100644 index 0000000000..0d9de9d465 --- /dev/null +++ b/synapse/res/templates/sso_auth_confirm.html @@ -0,0 +1,14 @@ + + + Authentication + + +
+

+ A client is trying to {{ description | e }}. To confirm this action, + re-authenticate with single sign-on. + If you did not expect this, your account may be compromised! +

+
+ + diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f80b5e40ea..31435b1e1c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -234,7 +234,11 @@ class PasswordRestServlet(RestServlet): if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req(request) params = await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "modify your account password", ) user_id = requester.user.to_string() else: @@ -244,6 +248,7 @@ class PasswordRestServlet(RestServlet): request, body, self.hs.get_ip_from_request(request), + "modify your account password", ) if LoginType.EMAIL_IDENTITY in result: @@ -311,7 +316,11 @@ class DeactivateAccountRestServlet(RestServlet): return 200, {} await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "deactivate your account", ) result = await self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase, id_server=body.get("id_server") @@ -669,7 +678,11 @@ class ThreepidAddRestServlet(RestServlet): assert_valid_client_secret(client_secret) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "add a third-party identifier to your account", ) validation_session = await self.identity_handler.validate_threepid_session( diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 85cf5a14c6..1787562b90 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -18,6 +18,7 @@ import logging from synapse.api.constants import LoginType from synapse.api.errors import SynapseError from synapse.api.urls import CLIENT_API_PREFIX +from synapse.handlers.auth import SUCCESS_TEMPLATE from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string @@ -89,30 +90,6 @@ TERMS_TEMPLATE = """ """ -SUCCESS_TEMPLATE = """ - - -Success! - - - - - -
-

Thank you

-

You may now close this window and return to the application

-
- - -""" - class AuthRestServlet(RestServlet): """ @@ -130,6 +107,11 @@ class AuthRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() + # SSO configuration. + self._saml_enabled = hs.config.saml2_enabled + if self._saml_enabled: + self._saml_handler = hs.get_saml_handler() + def on_GET(self, request, stagetype): session = parse_string(request, "session") if not session: @@ -150,6 +132,15 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } + + elif stagetype == LoginType.SSO and self._saml_enabled: + # Display a confirmation page which prompts the user to + # re-authenticate with their SSO provider. + client_redirect_url = "" + sso_redirect_url = self._saml_handler.handle_redirect_request( + client_redirect_url, session + ) + html = self.auth_handler.start_sso_ui_auth(sso_redirect_url, session) else: raise SynapseError(404, "Unknown auth stage type") @@ -210,6 +201,9 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } + elif stagetype == LoginType.SSO: + # The SSO fallback workflow should not post here, + raise SynapseError(404, "Fallback SSO auth does not support POST requests.") else: raise SynapseError(404, "Unknown auth stage type") diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 119d979052..c0714fcfb1 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -81,7 +81,11 @@ class DeleteDevicesRestServlet(RestServlet): assert_params_in_dict(body, ["devices"]) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "remove device(s) from your account", ) await self.device_handler.delete_devices( @@ -127,7 +131,11 @@ class DeviceRestServlet(RestServlet): raise await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "remove a device from your account", ) await self.device_handler.delete_device(requester.user.to_string(), device_id) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 5eb7ef35a4..8f41a3edbf 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -263,7 +263,11 @@ class SigningKeyUploadServlet(RestServlet): body = parse_json_object_from_request(request) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "add a device signing key to your account", ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 66fc8ec179..431ecf4f84 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -505,6 +505,7 @@ class RegisterRestServlet(RestServlet): request, body, self.hs.get_ip_from_request(request), + "register a new account", ) # Check that we're not trying to register a denied 3pid. -- cgit 1.4.1 From 6d7cec7a57ca258bcf28e7eb174d970670f7a652 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 10:23:36 +0100 Subject: Fix the debian build in a better way. (#7212) --- changelog.d/7212.misc | 1 + debian/changelog | 7 +++++++ debian/rules | 33 +++++++++++++++++++++++++++------ synapse/python_dependencies.py | 4 +--- 4 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 changelog.d/7212.misc (limited to 'changelog.d') diff --git a/changelog.d/7212.misc b/changelog.d/7212.misc new file mode 100644 index 0000000000..b57fc5f288 --- /dev/null +++ b/changelog.d/7212.misc @@ -0,0 +1 @@ +Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. diff --git a/debian/changelog b/debian/changelog index 03b30cd12f..6bafe468d9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.12.2ubuntu1) UNRELEASED; urgency=medium + + * Update the Debian build scripts to handle the new installation paths + for the support libraries introduced by Pillow 7.1.1. + + -- Richard van der Hoff Thu, 02 Apr 2020 23:18:52 +0100 + matrix-synapse-py3 (1.12.2) stable; urgency=medium * New synapse release 1.12.2. diff --git a/debian/rules b/debian/rules index a4d2ce2ba4..c744060a57 100755 --- a/debian/rules +++ b/debian/rules @@ -15,17 +15,38 @@ override_dh_installinit: # we don't really want to strip the symbols from our object files. override_dh_strip: +# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files +# (executables and shared libs) in the package, and looks for the shared +# libraries that they depend on. It then adds a dependency on the package that +# contains that library to the package. +# +# We make two modifications to that process... +# override_dh_shlibdeps: - # make the postgres package's dependencies a recommendation - # rather than a hard dependency. + # Firstly, postgres is not a hard dependency for us, so we want to make + # the things that psycopg2 depends on (such as libpq) be + # recommendations rather than hard dependencies. We do so by + # running dpkg-shlibdeps manually on psycopg2's libs. + # find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \ xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \ -pshlibs1 -dRecommends - # all the other dependencies can be normal 'Depends' requirements, - # except for PIL's, which is self-contained and which confuses - # dpkg-shlibdeps. - dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2 + # secondly, we exclude PIL's libraries from the process. They are known + # to be self-contained, but they have interdependencies and + # dpkg-shlibdeps doesn't know how to resolve them. + # + # As of Pillow 7.1.0, these libraries are in + # site-packages/Pillow.libs. Previously, they were in + # site-packages/PIL/.libs. + # + # (we also need to exclude psycopg2, of course, since we've already + # dealt with that.) + # + dh_shlibdeps \ + -X site-packages/PIL/.libs \ + -X site-packages/Pillow.libs \ + -X site-packages/psycopg2 override_dh_virtualenv: ./debian/build_virtualenv diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 3274eb9863..8de8cb2c12 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -61,9 +61,7 @@ REQUIREMENTS = [ "pyasn1-modules>=0.0.7", "daemonize>=2.3.1", "bcrypt>=3.1.0", - # Pillow 7.1.0 causes the following issue on debian buster: - # https://github.com/python-pillow/Pillow/issues/2377 - "pillow>=4.3.0,<7.1.0", + "pillow>=4.3.0", "sortedcontainers>=1.4.4", "pymacaroons>=0.13.0", "msgpack>=0.5.2", -- cgit 1.4.1 From daa1ac89a0be4dd3cc941da4caeb2ddcbd701eff Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 10:40:22 +0100 Subject: Fix device list update stream ids going backward (#7158) Occasionally we could get a federation device list update transaction which looked like: ``` [ {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D2', 'prev_id': [], 'stream_id': 12, 'deleted': True}}, {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D1', 'prev_id': [12], 'stream_id': 11, 'deleted': True}}, {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D3', 'prev_id': [11], 'stream_id': 13, 'deleted': True}} ] ``` Having `stream_ids` which are lower than `prev_ids` looks odd. It might work (I'm not actually sure), but in any case it doesn't seem like a reasonable thing to expect other implementations to support. --- changelog.d/7158.misc | 1 + synapse/storage/data_stores/main/devices.py | 10 ++++++++-- tests/federation/test_federation_sender.py | 6 ++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7158.misc (limited to 'changelog.d') diff --git a/changelog.d/7158.misc b/changelog.d/7158.misc new file mode 100644 index 0000000000..269b8daeb0 --- /dev/null +++ b/changelog.d/7158.misc @@ -0,0 +1 @@ +Fix device list update stream ids going backward. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 20995e1b78..dd3561e9b2 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -165,7 +165,6 @@ class DeviceWorkerStore(SQLBaseStore): # the max stream_id across each set of duplicate entries # # maps (user_id, device_id) -> (stream_id, opentracing_context) - # as long as their stream_id does not match that of the last row # # opentracing_context contains the opentracing metadata for the request # that created the poke @@ -270,7 +269,14 @@ class DeviceWorkerStore(SQLBaseStore): prev_id = yield self._get_last_device_update_for_remote_user( destination, user_id, from_stream_id ) - for device_id, device in iteritems(user_devices): + + # make sure we go through the devices in stream order + device_ids = sorted( + user_devices.keys(), key=lambda i: query_map[(user_id, i)][0], + ) + + for device_id in device_ids: + device = user_devices[device_id] stream_id, opentracing_context = query_map[(user_id, device_id)] result = { "user_id": user_id, diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index a5fe5c6880..33105576af 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -297,6 +297,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): c = edu["content"] if stream_id is not None: self.assertEqual(c["prev_id"], [stream_id]) + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2"}, devices) @@ -330,6 +331,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): c.items(), {"user_id": u1, "prev_id": [stream_id], "deleted": True}.items(), ) + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) @@ -366,6 +368,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.assertEqual(edu["edu_type"], "m.device_list_update") c = edu["content"] self.assertEqual(c["prev_id"], [stream_id] if stream_id is not None else []) + if stream_id is not None: + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) @@ -482,6 +486,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): } self.assertLessEqual(expected.items(), content.items()) + if prev_stream_id is not None: + self.assertGreaterEqual(content["stream_id"], prev_stream_id) return content["stream_id"] def check_signing_key_update_txn(self, txn: JsonDict,) -> None: -- cgit 1.4.1 From 29ce90358c06ca2452b2ecb55670103de3557109 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 Apr 2020 10:57:07 +0100 Subject: 1.12.3 --- CHANGES.md | 9 +++++++++ changelog.d/7212.misc | 1 - debian/changelog | 8 ++++++-- synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/7212.misc (limited to 'changelog.d') diff --git a/CHANGES.md b/CHANGES.md index 5cec3d817d..e9ca767644 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.12.3 (2020-04-03) +=========================== + +Internal Changes +---------------- + +- Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. ([\#7212](https://github.com/matrix-org/synapse/issues/7212)) + + Synapse 1.12.2 (2020-04-02) =========================== diff --git a/changelog.d/7212.misc b/changelog.d/7212.misc deleted file mode 100644 index b57fc5f288..0000000000 --- a/changelog.d/7212.misc +++ /dev/null @@ -1 +0,0 @@ -Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. diff --git a/debian/changelog b/debian/changelog index 6bafe468d9..642115fc5a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,13 @@ -matrix-synapse-py3 (1.12.2ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.12.3) stable; urgency=medium + [ Richard van der Hoff ] * Update the Debian build scripts to handle the new installation paths for the support libraries introduced by Pillow 7.1.1. - -- Richard van der Hoff Thu, 02 Apr 2020 23:18:52 +0100 + [ Synapse Packaging team ] + * New synapse release 1.12.3. + + -- Synapse Packaging team Fri, 03 Apr 2020 10:55:03 +0100 matrix-synapse-py3 (1.12.2) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index bdad75113d..3bf2d02450 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.2" +__version__ = "1.12.3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.4.1 From bae32740daa5551b6613cafafb5d5bc1a73141ec Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 12:29:30 +0100 Subject: Remove some `run_in_background` calls in replication code (#7203) By running this stuff with `run_in_background`, it won't be correctly reported against the relevant CPU usage stats. Fixes #7202 --- changelog.d/7203.bugfix | 1 + synapse/app/generic_worker.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 changelog.d/7203.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7203.bugfix b/changelog.d/7203.bugfix new file mode 100644 index 0000000000..8b383952e5 --- /dev/null +++ b/changelog.d/7203.bugfix @@ -0,0 +1 @@ +Fix some worker-mode replication handling not being correctly recorded in CPU usage stats. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1ee266f7c5..174bef360f 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -42,7 +42,7 @@ from synapse.handlers.presence import PresenceHandler, get_interested_parties from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background +from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ @@ -635,7 +635,7 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): await super(GenericWorkerReplicationHandler, self).on_rdata( stream_name, token, rows ) - run_in_background(self.process_and_notify, stream_name, token, rows) + await self.process_and_notify(stream_name, token, rows) def get_streams_to_replicate(self): args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate() @@ -650,7 +650,9 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): async def process_and_notify(self, stream_name, token, rows): try: if self.send_handler: - self.send_handler.process_replication_rows(stream_name, token, rows) + await self.send_handler.process_replication_rows( + stream_name, token, rows + ) if stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so @@ -782,12 +784,12 @@ class FederationSenderHandler(object): def stream_positions(self): return {"federation": self.federation_position} - def process_replication_rows(self, stream_name, token, rows): + async def process_replication_rows(self, stream_name, token, rows): # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) - run_in_background(self.update_token, token) + await self.update_token(token) # We also need to poke the federation sender when new events happen elif stream_name == "events": @@ -795,9 +797,7 @@ class FederationSenderHandler(object): # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: - run_as_background_process( - "process_receipts_for_federation", self._on_new_receipts, rows - ) + await self._on_new_receipts(rows) # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: -- cgit 1.4.1 From 0f05fd15304f1931ef167351de63cc8ffa1d3a98 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 13:21:30 +0100 Subject: Reduce the number of calls to `resource.getrusage` (#7183) Let's just call `getrusage` once on each logcontext change, rather than twice. --- changelog.d/7183.misc | 1 + synapse/logging/context.py | 102 ++++++++++++++++++++++++++++----------------- 2 files changed, 64 insertions(+), 39 deletions(-) create mode 100644 changelog.d/7183.misc (limited to 'changelog.d') diff --git a/changelog.d/7183.misc b/changelog.d/7183.misc new file mode 100644 index 0000000000..731f4dcb52 --- /dev/null +++ b/changelog.d/7183.misc @@ -0,0 +1 @@ +Clean up some LoggingContext code. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 3254d6a8df..a8f674d13d 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -51,7 +51,7 @@ try: is_thread_resource_usage_supported = True - def get_thread_resource_usage(): + def get_thread_resource_usage() -> "Optional[resource._RUsage]": return resource.getrusage(RUSAGE_THREAD) @@ -60,7 +60,7 @@ except Exception: # won't track resource usage. is_thread_resource_usage_supported = False - def get_thread_resource_usage(): + def get_thread_resource_usage() -> "Optional[resource._RUsage]": return None @@ -201,10 +201,10 @@ class _Sentinel(object): record["request"] = None record["scope"] = None - def start(self): + def start(self, rusage: "Optional[resource._RUsage]"): pass - def stop(self): + def stop(self, rusage: "Optional[resource._RUsage]"): pass def add_database_transaction(self, duration_sec): @@ -261,7 +261,7 @@ class LoggingContext(object): # The thread resource usage when the logcontext became active. None # if the context is not currently active. - self.usage_start = None + self.usage_start = None # type: Optional[resource._RUsage] self.main_thread = get_thread_id() self.request = None @@ -336,7 +336,17 @@ class LoggingContext(object): record["request"] = self.request record["scope"] = self.scope - def start(self) -> None: + def start(self, rusage: "Optional[resource._RUsage]") -> None: + """ + Record that this logcontext is currently running. + + This should not be called directly: use set_current_context + + Args: + rusage: the resources used by the current thread, at the point of + switching to this logcontext. May be None if this platform doesn't + support getrusuage. + """ if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) return @@ -349,36 +359,48 @@ class LoggingContext(object): if self.usage_start: logger.warning("Re-starting already-active log context %s", self) else: - self.usage_start = get_thread_resource_usage() + self.usage_start = rusage - def stop(self) -> None: - if get_thread_id() != self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) - return + def stop(self, rusage: "Optional[resource._RUsage]") -> None: + """ + Record that this logcontext is no longer running. + + This should not be called directly: use set_current_context + + Args: + rusage: the resources used by the current thread, at the point of + switching away from this logcontext. May be None if this platform + doesn't support getrusuage. + """ + + try: + if get_thread_id() != self.main_thread: + logger.warning("Stopped logcontext %s on different thread", self) + return + + if not rusage: + return - # When we stop, let's record the cpu used since we started - if not self.usage_start: - # Log a warning on platforms that support thread usage tracking - if is_thread_resource_usage_supported: + # Record the cpu used since we started + if not self.usage_start: logger.warning( - "Called stop on logcontext %s without calling start", self + "Called stop on logcontext %s without recording a start rusage", + self, ) - return - - utime_delta, stime_delta = self._get_cputime() - self._resource_usage.ru_utime += utime_delta - self._resource_usage.ru_stime += stime_delta + return - self.usage_start = None + utime_delta, stime_delta = self._get_cputime(rusage) + self._resource_usage.ru_utime += utime_delta + self._resource_usage.ru_stime += stime_delta - # if we have a parent, pass our CPU usage stats on - if self.parent_context is not None and hasattr( - self.parent_context, "_resource_usage" - ): - self.parent_context._resource_usage += self._resource_usage + # if we have a parent, pass our CPU usage stats on + if self.parent_context: + self.parent_context._resource_usage += self._resource_usage - # reset them in case we get entered again - self._resource_usage.reset() + # reset them in case we get entered again + self._resource_usage.reset() + finally: + self.usage_start = None def get_resource_usage(self) -> ContextResourceUsage: """Get resources used by this logcontext so far. @@ -394,24 +416,24 @@ class LoggingContext(object): # can include resource usage so far. is_main_thread = get_thread_id() == self.main_thread if self.usage_start and is_main_thread: - utime_delta, stime_delta = self._get_cputime() + rusage = get_thread_resource_usage() + assert rusage is not None + utime_delta, stime_delta = self._get_cputime(rusage) res.ru_utime += utime_delta res.ru_stime += stime_delta return res - def _get_cputime(self) -> Tuple[float, float]: - """Get the cpu usage time so far + def _get_cputime(self, current: "resource._RUsage") -> Tuple[float, float]: + """Get the cpu usage time between start() and the given rusage + + Args: + rusage: the current resource usage Returns: Tuple[float, float]: seconds in user mode, seconds in system mode """ assert self.usage_start is not None - current = get_thread_resource_usage() - - # Indicate to mypy that we know that self.usage_start is None. - assert self.usage_start is not None - utime_delta = current.ru_utime - self.usage_start.ru_utime stime_delta = current.ru_stime - self.usage_start.ru_stime @@ -547,9 +569,11 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe current = current_context() if current is not context: - current.stop() + rusage = get_thread_resource_usage() + current.stop(rusage) _thread_local.current_context = context - context.start() + context.start(rusage) + return current -- cgit 1.4.1 From 07b88c546de1b24f5cbc9b4cb6da98400a8155af Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 3 Apr 2020 14:26:07 +0100 Subject: Convert http.HTTPStatus objects to their int equivalent (#7188) --- changelog.d/7188.misc | 1 + synapse/api/errors.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7188.misc (limited to 'changelog.d') diff --git a/changelog.d/7188.misc b/changelog.d/7188.misc new file mode 100644 index 0000000000..f72955b95b --- /dev/null +++ b/changelog.d/7188.misc @@ -0,0 +1 @@ +Fix consistency of HTTP status codes reported in log lines. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 11da016ac5..d54dfb385d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -86,7 +86,14 @@ class CodeMessageException(RuntimeError): def __init__(self, code, msg): super(CodeMessageException, self).__init__("%d: %s" % (code, msg)) - self.code = code + + # Some calls to this method pass instances of http.HTTPStatus for `code`. + # While HTTPStatus is a subclass of int, it has magic __str__ methods + # which emit `HTTPStatus.FORBIDDEN` when converted to a str, instead of `403`. + # This causes inconsistency in our log lines. + # + # To eliminate this behaviour, we convert them to their integer equivalents here. + self.code = int(code) self.msg = msg -- cgit 1.4.1 From 334bfdbc9088cfe2fbe43cfe1c349c27734bb341 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 4 Apr 2020 02:31:52 +1100 Subject: Add some benchmarks for LruCache (#6446) --- changelog.d/6446.misc | 1 + synmark/__main__.py | 16 +++++++++++++--- synmark/suites/__init__.py | 10 ++++++++-- synmark/suites/lrucache.py | 34 ++++++++++++++++++++++++++++++++++ synmark/suites/lrucache_evict.py | 35 +++++++++++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6446.misc create mode 100644 synmark/suites/lrucache.py create mode 100644 synmark/suites/lrucache_evict.py (limited to 'changelog.d') diff --git a/changelog.d/6446.misc b/changelog.d/6446.misc new file mode 100644 index 0000000000..c42df16f1a --- /dev/null +++ b/changelog.d/6446.misc @@ -0,0 +1 @@ +Add benchmarks for LruCache. diff --git a/synmark/__main__.py b/synmark/__main__.py index ac59befbd4..17df9ddeb7 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -14,6 +14,7 @@ # limitations under the License. import sys +from argparse import REMAINDER from contextlib import redirect_stderr from io import StringIO @@ -21,7 +22,7 @@ import pyperf from synmark import make_reactor from synmark.suites import SUITES -from twisted.internet.defer import ensureDeferred +from twisted.internet.defer import Deferred, ensureDeferred from twisted.logger import globalLogBeginner, textFileLogObserver from twisted.python.failure import Failure @@ -40,7 +41,8 @@ def make_test(main): file_out = StringIO() with redirect_stderr(file_out): - d = ensureDeferred(main(reactor, loops)) + d = Deferred() + d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) def on_done(_): if isinstance(_, Failure): @@ -50,6 +52,7 @@ def make_test(main): return _ d.addBoth(on_done) + reactor.callWhenRunning(lambda: d.callback(True)) reactor.run() return d.result @@ -62,11 +65,13 @@ if __name__ == "__main__": def add_cmdline_args(cmd, args): if args.log: cmd.extend(["--log"]) + cmd.extend(args.tests) runner = pyperf.Runner( - processes=3, min_time=2, show_name=True, add_cmdline_args=add_cmdline_args + processes=3, min_time=1.5, show_name=True, add_cmdline_args=add_cmdline_args ) runner.argparser.add_argument("--log", action="store_true") + runner.argparser.add_argument("tests", nargs=REMAINDER) runner.parse_args() orig_loops = runner.args.loops @@ -79,6 +84,11 @@ if __name__ == "__main__": ) setupdb() + if runner.args.tests: + SUITES = list( + filter(lambda x: x[0].__name__.split(".")[-1] in runner.args.tests, SUITES) + ) + for suite, loops in SUITES: if loops: runner.args.loops = loops diff --git a/synmark/suites/__init__.py b/synmark/suites/__init__.py index cfa3b0ba38..d8445fc3df 100644 --- a/synmark/suites/__init__.py +++ b/synmark/suites/__init__.py @@ -1,3 +1,9 @@ -from . import logging +from . import logging, lrucache, lrucache_evict -SUITES = [(logging, 1000), (logging, 10000), (logging, None)] +SUITES = [ + (logging, 1000), + (logging, 10000), + (logging, None), + (lrucache, None), + (lrucache_evict, None), +] diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py new file mode 100644 index 0000000000..69ab042ccc --- /dev/null +++ b/synmark/suites/lrucache.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyperf import perf_counter + +from synapse.util.caches.lrucache import LruCache + + +async def main(reactor, loops): + """ + Benchmark `loops` number of insertions into LruCache without eviction. + """ + cache = LruCache(loops) + + start = perf_counter() + + for i in range(loops): + cache[i] = True + + end = perf_counter() - start + + return end diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py new file mode 100644 index 0000000000..532b1cc702 --- /dev/null +++ b/synmark/suites/lrucache_evict.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyperf import perf_counter + +from synapse.util.caches.lrucache import LruCache + + +async def main(reactor, loops): + """ + Benchmark `loops` number of insertions into LruCache where half of them are + evicted. + """ + cache = LruCache(loops // 2) + + start = perf_counter() + + for i in range(loops): + cache[i] = True + + end = perf_counter() - start + + return end -- cgit 1.4.1 From b0db928c633ad2e225623cffb20293629c5d5a43 Mon Sep 17 00:00:00 2001 From: Martin Milata Date: Fri, 3 Apr 2020 17:57:34 +0200 Subject: Extend web_client_location to handle absolute URLs (#7006) Log warning when filesystem path is used. Signed-off-by: Martin Milata --- changelog.d/7006.feature | 1 + docs/sample_config.yaml | 11 ++++++++--- synapse/app/homeserver.py | 16 +++++++++++++--- synapse/config/server.py | 11 ++++++++--- 4 files changed, 30 insertions(+), 9 deletions(-) create mode 100644 changelog.d/7006.feature (limited to 'changelog.d') diff --git a/changelog.d/7006.feature b/changelog.d/7006.feature new file mode 100644 index 0000000000..d2ce9dbaca --- /dev/null +++ b/changelog.d/7006.feature @@ -0,0 +1 @@ +Extend the `web_client_location` option to accept an absolute URL to use as a redirect. Adds a warning when running the web client on the same hostname as homeserver. Contributed by Martin Milata. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6a770508f9..be742969cc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -33,10 +33,15 @@ server_name: "SERVERNAME" # pid_file: DATADIR/homeserver.pid -# The path to the web client which will be served at /_matrix/client/ -# if 'webclient' is configured under the 'listeners' configuration. +# The absolute URL to the web client which /_matrix/client will redirect +# to if 'webclient' is configured under the 'listeners' configuration. # -#web_client_location: "/path/to/web/root" +# This option can be also set to the filesystem path to the web client +# which will be served at /_matrix/client/ if 'webclient' is configured +# under the 'listeners' configuration, however this is a security risk: +# https://github.com/matrix-org/synapse#security-note +# +#web_client_location: https://riot.example.com/ # The public-facing base URL that clients use to access this HS # (not including _matrix/...). This is the same URL a user would diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index f2b56a636f..49df63acd0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -241,16 +241,26 @@ class SynapseHomeServer(HomeServer): resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": - webclient_path = self.get_config().web_client_location + webclient_loc = self.get_config().web_client_location - if webclient_path is None: + if webclient_loc is None: logger.warning( "Not enabling webclient resource, as web_client_location is unset." ) + elif webclient_loc.startswith("http://") or webclient_loc.startswith( + "https://" + ): + resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc) else: + logger.warning( + "Running webclient on the same domain is not recommended: " + "https://github.com/matrix-org/synapse#security-note - " + "after you move webclient to different host you can set " + "web_client_location to its full URL to enable redirection." + ) # GZip is disabled here due to # https://twistedmatrix.com/trac/ticket/7678 - resources[WEB_CLIENT_PREFIX] = File(webclient_path) + resources[WEB_CLIENT_PREFIX] = File(webclient_loc) if name == "metrics" and self.get_config().enable_metrics: resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) diff --git a/synapse/config/server.py b/synapse/config/server.py index 7525765fee..28e2a031fb 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -604,10 +604,15 @@ class ServerConfig(Config): # pid_file: %(pid_file)s - # The path to the web client which will be served at /_matrix/client/ - # if 'webclient' is configured under the 'listeners' configuration. + # The absolute URL to the web client which /_matrix/client will redirect + # to if 'webclient' is configured under the 'listeners' configuration. # - #web_client_location: "/path/to/web/root" + # This option can be also set to the filesystem path to the web client + # which will be served at /_matrix/client/ if 'webclient' is configured + # under the 'listeners' configuration, however this is a security risk: + # https://github.com/matrix-org/synapse#security-note + # + #web_client_location: https://riot.example.com/ # The public-facing base URL that clients use to access this HS # (not including _matrix/...). This is the same URL a user would -- cgit 1.4.1 From 694d8bed0e56366f080a49db0f930d635ca6cdf4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Apr 2020 15:35:05 -0400 Subject: Support CAS in UI Auth flows. (#7186) --- changelog.d/7186.feature | 1 + synapse/handlers/auth.py | 4 +- synapse/handlers/cas_handler.py | 161 +++++++++++++++++++---------------- synapse/rest/client/v1/login.py | 20 ++++- synapse/rest/client/v2_alpha/auth.py | 28 ++++-- 5 files changed, 131 insertions(+), 83 deletions(-) create mode 100644 changelog.d/7186.feature (limited to 'changelog.d') diff --git a/changelog.d/7186.feature b/changelog.d/7186.feature new file mode 100644 index 0000000000..01057aa396 --- /dev/null +++ b/changelog.d/7186.feature @@ -0,0 +1 @@ +Support SSO in the user interactive authentication workflow. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7c09d15a72..892adb00b9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -116,7 +116,7 @@ class AuthHandler(BaseHandler): self.hs = hs # FIXME better possibility to access registrationHandler later? self.macaroon_gen = hs.get_macaroon_generator() self._password_enabled = hs.config.password_enabled - self._saml2_enabled = hs.config.saml2_enabled + self._sso_enabled = hs.config.saml2_enabled or hs.config.cas_enabled # we keep this as a list despite the O(N^2) implication so that we can # keep PASSWORD first and avoid confusing clients which pick the first @@ -136,7 +136,7 @@ class AuthHandler(BaseHandler): # necessarily identical. Login types have SSO (and other login types) # added in the rest layer, see synapse.rest.client.v1.login.LoginRestServerlet.on_GET. ui_auth_types = login_types.copy() - if self._saml2_enabled: + if self._sso_enabled: ui_auth_types.append(LoginType.SSO) self._supported_ui_auth_types = ui_auth_types diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py index f8dc274b78..d977badf35 100644 --- a/synapse/handlers/cas_handler.py +++ b/synapse/handlers/cas_handler.py @@ -15,7 +15,7 @@ import logging import xml.etree.ElementTree as ET -from typing import AnyStr, Dict, Optional, Tuple +from typing import Dict, Optional, Tuple from six.moves import urllib @@ -48,26 +48,47 @@ class CasHandler: self._http_client = hs.get_proxied_http_client() - def _build_service_param(self, client_redirect_url: AnyStr) -> str: + def _build_service_param(self, args: Dict[str, str]) -> str: + """ + Generates a value to use as the "service" parameter when redirecting or + querying the CAS service. + + Args: + args: Additional arguments to include in the final redirect URL. + + Returns: + The URL to use as a "service" parameter. + """ return "%s%s?%s" % ( self._cas_service_url, "/_matrix/client/r0/login/cas/ticket", - urllib.parse.urlencode({"redirectUrl": client_redirect_url}), + urllib.parse.urlencode(args), ) - async def _handle_cas_response( - self, request: SynapseRequest, cas_response_body: str, client_redirect_url: str - ) -> None: + async def _validate_ticket( + self, ticket: str, service_args: Dict[str, str] + ) -> Tuple[str, Optional[str]]: """ - Retrieves the user and display name from the CAS response and continues with the authentication. + Validate a CAS ticket with the server, parse the response, and return the user and display name. Args: - request: The original client request. - cas_response_body: The response from the CAS server. - client_redirect_url: The URl to redirect the client to when - everything is done. + ticket: The CAS ticket from the client. + service_args: Additional arguments to include in the service URL. + Should be the same as those passed to `get_redirect_url`. """ - user, attributes = self._parse_cas_response(cas_response_body) + uri = self._cas_server_url + "/proxyValidate" + args = { + "ticket": ticket, + "service": self._build_service_param(service_args), + } + try: + body = await self._http_client.get_raw(uri, args) + except PartialDownloadError as pde: + # Twisted raises this error if the connection is closed, + # even if that's being used old-http style to signal end-of-data + body = pde.response + + user, attributes = self._parse_cas_response(body) displayname = attributes.pop(self._cas_displayname_attribute, None) for required_attribute, required_value in self._cas_required_attributes.items(): @@ -82,7 +103,7 @@ class CasHandler: if required_value != actual_value: raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) - await self._on_successful_auth(user, request, client_redirect_url, displayname) + return user, displayname def _parse_cas_response( self, cas_response_body: str @@ -127,78 +148,74 @@ class CasHandler: ) return user, attributes - async def _on_successful_auth( - self, - username: str, - request: SynapseRequest, - client_redirect_url: str, - user_display_name: Optional[str] = None, - ) -> None: - """Called once the user has successfully authenticated with the SSO. - - Registers the user if necessary, and then returns a redirect (with - a login token) to the client. + def get_redirect_url(self, service_args: Dict[str, str]) -> str: + """ + Generates a URL for the CAS server where the client should be redirected. Args: - username: the remote user id. We'll map this onto - something sane for a MXID localpath. + service_args: Additional arguments to include in the final redirect URL. - request: the incoming request from the browser. We'll - respond to it with a redirect. + Returns: + The URL to redirect the client to. + """ + args = urllib.parse.urlencode( + {"service": self._build_service_param(service_args)} + ) - client_redirect_url: the redirect_url the client gave us when - it first started the process. + return "%s/login?%s" % (self._cas_server_url, args) - user_display_name: if set, and we have to register a new user, - we will set their displayname to this. + async def handle_ticket( + self, + request: SynapseRequest, + ticket: str, + client_redirect_url: Optional[str], + session: Optional[str], + ) -> None: """ - localpart = map_username_to_mxid_localpart(username) - user_id = UserID(localpart, self._hostname).to_string() - registered_user_id = await self._auth_handler.check_user_exists(user_id) - if not registered_user_id: - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, default_display_name=user_display_name - ) + Called once the user has successfully authenticated with the SSO. + Validates a CAS ticket sent by the client and completes the auth process. - self._auth_handler.complete_sso_login( - registered_user_id, request, client_redirect_url - ) + If the user interactive authentication session is provided, marks the + UI Auth session as complete, then returns an HTML page notifying the + user they are done. - def handle_redirect_request(self, client_redirect_url: bytes) -> bytes: - """ - Generates a URL to the CAS server where the client should be redirected. + Otherwise, this registers the user if necessary, and then returns a + redirect (with a login token) to the client. Args: - client_redirect_url: The final URL the client should go to after the - user has negotiated SSO. + request: the incoming request from the browser. We'll + respond to it with a redirect or an HTML page. - Returns: - The URL to redirect to. - """ - args = urllib.parse.urlencode( - {"service": self._build_service_param(client_redirect_url)} - ) + ticket: The CAS ticket provided by the client. - return ("%s/login?%s" % (self._cas_server_url, args)).encode("ascii") + client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given. + This should be the same as the redirectUrl from the original `/login/sso/redirect` request. - async def handle_ticket_request( - self, request: SynapseRequest, client_redirect_url: str, ticket: str - ) -> None: + session: The session parameter from the `/cas/ticket` HTTP request, if given. + This should be the UI Auth session id. """ - Validates a CAS ticket sent by the client for login/registration. + args = {} + if client_redirect_url: + args["redirectUrl"] = client_redirect_url + if session: + args["session"] = session + username, user_display_name = await self._validate_ticket(ticket, args) - On a successful request, writes a redirect to the request. - """ - uri = self._cas_server_url + "/proxyValidate" - args = { - "ticket": ticket, - "service": self._build_service_param(client_redirect_url), - } - try: - body = await self._http_client.get_raw(uri, args) - except PartialDownloadError as pde: - # Twisted raises this error if the connection is closed, - # even if that's being used old-http style to signal end-of-data - body = pde.response + localpart = map_username_to_mxid_localpart(username) + user_id = UserID(localpart, self._hostname).to_string() + registered_user_id = await self._auth_handler.check_user_exists(user_id) - await self._handle_cas_response(request, body, client_redirect_url) + if session: + self._auth_handler.complete_sso_ui_auth( + registered_user_id, session, request, + ) + + else: + if not registered_user_id: + registered_user_id = await self._registration_handler.register_user( + localpart=localpart, default_display_name=user_display_name + ) + + self._auth_handler.complete_sso_login( + registered_user_id, request, client_redirect_url + ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 59593cbf6e..4de2f97d06 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -425,7 +425,9 @@ class CasRedirectServlet(BaseSSORedirectServlet): self._cas_handler = hs.get_cas_handler() def get_sso_url(self, client_redirect_url: bytes) -> bytes: - return self._cas_handler.handle_redirect_request(client_redirect_url) + return self._cas_handler.get_redirect_url( + {"redirectUrl": client_redirect_url} + ).encode("ascii") class CasTicketServlet(RestServlet): @@ -436,10 +438,20 @@ class CasTicketServlet(RestServlet): self._cas_handler = hs.get_cas_handler() async def on_GET(self, request: SynapseRequest) -> None: - client_redirect_url = parse_string(request, "redirectUrl", required=True) + client_redirect_url = parse_string(request, "redirectUrl") ticket = parse_string(request, "ticket", required=True) - await self._cas_handler.handle_ticket_request( - request, client_redirect_url, ticket + + # Maybe get a session ID (if this ticket is from user interactive + # authentication). + session = parse_string(request, "session") + + # Either client_redirect_url or session must be provided. + if not client_redirect_url and not session: + message = "Missing string query parameter redirectUrl or session" + raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) + + await self._cas_handler.handle_ticket( + request, ticket, client_redirect_url, session ) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 1787562b90..13f9604407 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -111,6 +111,11 @@ class AuthRestServlet(RestServlet): self._saml_enabled = hs.config.saml2_enabled if self._saml_enabled: self._saml_handler = hs.get_saml_handler() + self._cas_enabled = hs.config.cas_enabled + if self._cas_enabled: + self._cas_handler = hs.get_cas_handler() + self._cas_server_url = hs.config.cas_server_url + self._cas_service_url = hs.config.cas_service_url def on_GET(self, request, stagetype): session = parse_string(request, "session") @@ -133,14 +138,27 @@ class AuthRestServlet(RestServlet): % (CLIENT_API_PREFIX, LoginType.TERMS), } - elif stagetype == LoginType.SSO and self._saml_enabled: + elif stagetype == LoginType.SSO: # Display a confirmation page which prompts the user to # re-authenticate with their SSO provider. - client_redirect_url = "" - sso_redirect_url = self._saml_handler.handle_redirect_request( - client_redirect_url, session - ) + if self._cas_enabled: + # Generate a request to CAS that redirects back to an endpoint + # to verify the successful authentication. + sso_redirect_url = self._cas_handler.get_redirect_url( + {"session": session}, + ) + + elif self._saml_enabled: + client_redirect_url = "" + sso_redirect_url = self._saml_handler.handle_redirect_request( + client_redirect_url, session + ) + + else: + raise SynapseError(400, "Homeserver not configured for SSO.") + html = self.auth_handler.start_sso_ui_auth(sso_redirect_url, session) + else: raise SynapseError(404, "Unknown auth stage type") -- cgit 1.4.1 From d73bf18d13031d9f9c0375b83f2cc5ff6f415251 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Sat, 4 Apr 2020 17:27:45 +0200 Subject: Server notices: Dissociate room creation/lookup from invite (#7199) Fixes #6815 Before figuring out whether we should alert a user on MAU, we call get_notice_room_for_user to get some info on the existing server notices room for this user. This function, if the room doesn't exist, creates it and invites the user in it. This means that, if we decide later that no server notice is needed, the user gets invited in a room with no message in it. This happens at every restart of the server, since the room ID returned by get_notice_room_for_user is cached. This PR fixes that by moving the inviting bit to a dedicated function, that's only called when the server actually needs to send a notice to the user. A potential issue with this approach is that the room that's created by get_notice_room_for_user doesn't match how that same function looks for an existing room (i.e. it creates a room that doesn't have an invite or a join for the current user in it, so it could lead to a new room being created each time a user syncs), but I'm not sure this is a problem given it's cached until the server restarts, so that function won't run very often. It also renames get_notice_room_for_user into get_or_create_notice_room_for_user to make what it does clearer. --- changelog.d/7199.bugfix | 1 + .../resource_limits_server_notices.py | 4 +- synapse/server_notices/server_notices_manager.py | 51 +++++++-- .../test_resource_limits_server_notices.py | 120 ++++++++++++++++++--- 4 files changed, 154 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7199.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7199.bugfix b/changelog.d/7199.bugfix new file mode 100644 index 0000000000..b234163ea8 --- /dev/null +++ b/changelog.d/7199.bugfix @@ -0,0 +1 @@ +Fix a bug that could cause a user to be invited to a server notices (aka System Alerts) room without any notice being sent. diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 9fae2e0afe..ce4a828894 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -80,7 +80,9 @@ class ResourceLimitsServerNotices(object): # In practice, not sure we can ever get here return - room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id) + room_id = yield self._server_notices_manager.get_or_create_notice_room_for_user( + user_id + ) if not room_id: logger.warning("Failed to get server notices room") diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index f7432c8d2f..bf0943f265 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership, RoomCreationPreset -from synapse.types import create_requester +from synapse.types import UserID, create_requester from synapse.util.caches.descriptors import cachedInlineCallbacks logger = logging.getLogger(__name__) @@ -36,10 +36,12 @@ class ServerNoticesManager(object): self._store = hs.get_datastore() self._config = hs.config self._room_creation_handler = hs.get_room_creation_handler() + self._room_member_handler = hs.get_room_member_handler() self._event_creation_handler = hs.get_event_creation_handler() self._is_mine_id = hs.is_mine_id self._notifier = hs.get_notifier() + self.server_notices_mxid = self._config.server_notices_mxid def is_enabled(self): """Checks if server notices are enabled on this server. @@ -66,7 +68,8 @@ class ServerNoticesManager(object): Returns: Deferred[FrozenEvent] """ - room_id = yield self.get_notice_room_for_user(user_id) + room_id = yield self.get_or_create_notice_room_for_user(user_id) + yield self.maybe_invite_user_to_room(user_id, room_id) system_mxid = self._config.server_notices_mxid requester = create_requester(system_mxid) @@ -89,10 +92,11 @@ class ServerNoticesManager(object): return res @cachedInlineCallbacks() - def get_notice_room_for_user(self, user_id): + def get_or_create_notice_room_for_user(self, user_id): """Get the room for notices for a given user - If we have not yet created a notice room for this user, create it + If we have not yet created a notice room for this user, create it, but don't + invite the user to it. Args: user_id (str): complete user id for the user we want a room for @@ -108,7 +112,6 @@ class ServerNoticesManager(object): rooms = yield self._store.get_rooms_for_local_user_where_membership_is( user_id, [Membership.INVITE, Membership.JOIN] ) - system_mxid = self._config.server_notices_mxid for room in rooms: # it's worth noting that there is an asymmetry here in that we # expect the user to be invited or joined, but the system user must @@ -116,10 +119,14 @@ class ServerNoticesManager(object): # manages to invite the system user to a room, that doesn't make it # the server notices room. user_ids = yield self._store.get_users_in_room(room.room_id) - if system_mxid in user_ids: + if self.server_notices_mxid in user_ids: # we found a room which our user shares with the system notice # user - logger.info("Using room %s", room.room_id) + logger.info( + "Using existing server notices room %s for user %s", + room.room_id, + user_id, + ) return room.room_id # apparently no existing notice room: create a new one @@ -138,14 +145,13 @@ class ServerNoticesManager(object): "avatar_url": self._config.server_notices_mxid_avatar_url, } - requester = create_requester(system_mxid) + requester = create_requester(self.server_notices_mxid) info = yield self._room_creation_handler.create_room( requester, config={ "preset": RoomCreationPreset.PRIVATE_CHAT, "name": self._config.server_notices_room_name, "power_level_content_override": {"users_default": -10}, - "invite": (user_id,), }, ratelimit=False, creator_join_profile=join_profile, @@ -159,3 +165,30 @@ class ServerNoticesManager(object): logger.info("Created server notices room %s for %s", room_id, user_id) return room_id + + @defer.inlineCallbacks + def maybe_invite_user_to_room(self, user_id: str, room_id: str): + """Invite the given user to the given server room, unless the user has already + joined or been invited to it. + + Args: + user_id: The ID of the user to invite. + room_id: The ID of the room to invite the user to. + """ + requester = create_requester(self.server_notices_mxid) + + # Check whether the user has already joined or been invited to this room. If + # that's the case, there is no need to re-invite them. + joined_rooms = yield self._store.get_rooms_for_local_user_where_membership_is( + user_id, [Membership.INVITE, Membership.JOIN] + ) + for room in joined_rooms: + if room.room_id == room_id: + return + + yield self._room_member_handler.update_membership( + requester=requester, + target=UserID.from_string(user_id), + room_id=room_id, + action="invite", + ) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 0d27b92a86..93eb053b8c 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -19,6 +19,9 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import sync from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) @@ -67,7 +70,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): # self.server_notices_mxid_avatar_url = None # self.server_notices_room_name = "Server Notices" - self._rlsn._server_notices_manager.get_notice_room_for_user = Mock( + self._rlsn._server_notices_manager.get_or_create_notice_room_for_user = Mock( returnValue="" ) self._rlsn._store.add_tag_to_room = Mock() @@ -215,6 +218,26 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def default_config(self): + c = super().default_config() + c["server_notices"] = { + "system_mxid_localpart": "server", + "system_mxid_display_name": None, + "system_mxid_avatar_url": None, + "room_name": "Test Server Notice Room", + } + c["limit_usage_by_mau"] = True + c["max_mau_value"] = 5 + c["admin_contact"] = "mailto:user@test.com" + return c + def prepare(self, reactor, clock, hs): self.store = self.hs.get_datastore() self.server_notices_sender = self.hs.get_server_notices_sender() @@ -228,18 +251,8 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): if not isinstance(self._rlsn, ResourceLimitsServerNotices): raise Exception("Failed to find reference to ResourceLimitsServerNotices") - self.hs.config.limit_usage_by_mau = True - self.hs.config.hs_disabled = False - self.hs.config.max_mau_value = 5 - self.hs.config.server_notices_mxid = "@server:test" - self.hs.config.server_notices_mxid_display_name = None - self.hs.config.server_notices_mxid_avatar_url = None - self.hs.config.server_notices_room_name = "Test Server Notice Room" - self.user_id = "@user_id:test" - self.hs.config.admin_contact = "mailto:user@test.com" - def test_server_notice_only_sent_once(self): self.store.get_monthly_active_count = Mock(return_value=1000) @@ -253,7 +266,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): # Now lets get the last load of messages in the service notice room and # check that there is only one server notice room_id = self.get_success( - self.server_notices_manager.get_notice_room_for_user(self.user_id) + self.server_notices_manager.get_or_create_notice_room_for_user(self.user_id) ) token = self.get_success(self.event_source.get_current_token()) @@ -273,3 +286,86 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): count += 1 self.assertEqual(count, 1) + + def test_no_invite_without_notice(self): + """Tests that a user doesn't get invited to a server notices room without a + server notice being sent. + + The scenario for this test is a single user on a server where the MAU limit + hasn't been reached (since it's the only user and the limit is 5), so users + shouldn't receive a server notice. + """ + self.register_user("user", "password") + tok = self.login("user", "password") + + request, channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) + self.render(request) + + invites = channel.json_body["rooms"]["invite"] + self.assertEqual(len(invites), 0, invites) + + def test_invite_with_notice(self): + """Tests that, if the MAU limit is hit, the server notices user invites each user + to a room in which it has sent a notice. + """ + user_id, tok, room_id = self._trigger_notice_and_join() + + # Sync again to retrieve the events in the room, so we can check whether this + # room has a notice in it. + request, channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) + self.render(request) + + # Scan the events in the room to search for a message from the server notices + # user. + events = channel.json_body["rooms"]["join"][room_id]["timeline"]["events"] + notice_in_room = False + for event in events: + if ( + event["type"] == EventTypes.Message + and event["sender"] == self.hs.config.server_notices_mxid + ): + notice_in_room = True + + self.assertTrue(notice_in_room, "No server notice in room") + + def _trigger_notice_and_join(self): + """Creates enough active users to hit the MAU limit and trigger a system notice + about it, then joins the system notices room with one of the users created. + + Returns: + user_id (str): The ID of the user that joined the room. + tok (str): The access token of the user that joined the room. + room_id (str): The ID of the room that's been joined. + """ + user_id = None + tok = None + invites = [] + + # Register as many users as the MAU limit allows. + for i in range(self.hs.config.max_mau_value): + localpart = "user%d" % i + user_id = self.register_user(localpart, "password") + tok = self.login(localpart, "password") + + # Sync with the user's token to mark the user as active. + request, channel = self.make_request( + "GET", "/sync?timeout=0", access_token=tok, + ) + self.render(request) + + # Also retrieves the list of invites for this user. We don't care about that + # one except if we're processing the last user, which should have received an + # invite to a room with a server notice about the MAU limit being reached. + # We could also pick another user and sync with it, which would return an + # invite to a system notices room, but it doesn't matter which user we're + # using so we use the last one because it saves us an extra sync. + invites = channel.json_body["rooms"]["invite"] + + # Make sure we have an invite to process. + self.assertEqual(len(invites), 1, invites) + + # Join the room. + room_id = list(invites.keys())[0] + self.helper.join(room=room_id, user=user_id, tok=tok) + + return user_id, tok, room_id -- cgit 1.4.1 From 5016b162fcf0372fe35404c64f80aeaf21461f31 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Apr 2020 09:58:42 +0100 Subject: Move client command handling out of TCP protocol (#7185) The aim here is to move the command handling out of the TCP protocol classes and to also merge the client and server command handling (so that we can reuse them for redis protocol). This PR simply moves the client paths to the new `ReplicationCommandHandler`, a future PR will move the server paths too. --- changelog.d/7185.misc | 1 + synapse/app/admin_cmd.py | 12 -- synapse/app/generic_worker.py | 9 +- synapse/replication/tcp/__init__.py | 30 ++- synapse/replication/tcp/client.py | 179 +++--------------- synapse/replication/tcp/handler.py | 252 +++++++++++++++++++++++++ synapse/replication/tcp/protocol.py | 197 +++---------------- synapse/server.py | 8 +- synapse/server.pyi | 7 +- tests/replication/slave/storage/_base.py | 15 +- tests/replication/tcp/streams/_base.py | 38 ++-- tests/replication/tcp/streams/test_receipts.py | 1 - 12 files changed, 378 insertions(+), 371 deletions(-) create mode 100644 changelog.d/7185.misc create mode 100644 synapse/replication/tcp/handler.py (limited to 'changelog.d') diff --git a/changelog.d/7185.misc b/changelog.d/7185.misc new file mode 100644 index 0000000000..deb9ca7021 --- /dev/null +++ b/changelog.d/7185.misc @@ -0,0 +1 @@ +Move client command handling out of TCP protocol. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 1c7c6ec0c8..a37818fe9a 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -43,7 +43,6 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.util.logcontext import LoggingContext from synapse.util.versionstring import get_version_string @@ -79,17 +78,6 @@ class AdminCmdServer(HomeServer): def start_listening(self, listeners): pass - def build_tcp_replication(self): - return AdminCmdReplicationHandler(self) - - -class AdminCmdReplicationHandler(ReplicationClientHandler): - async def on_rdata(self, stream_name, token, rows): - pass - - def get_streams_to_replicate(self): - return {} - @defer.inlineCallbacks def export_data_command(hs, args): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 174bef360f..dcd0709a02 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -64,7 +64,7 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import ( AccountDataStream, @@ -603,7 +603,7 @@ class GenericWorkerServer(HomeServer): def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) - def build_tcp_replication(self): + def build_replication_data_handler(self): return GenericWorkerReplicationHandler(self) def build_presence_handler(self): @@ -613,7 +613,7 @@ class GenericWorkerServer(HomeServer): return GenericWorkerTyping(self) -class GenericWorkerReplicationHandler(ReplicationClientHandler): +class GenericWorkerReplicationHandler(ReplicationDataHandler): def __init__(self, hs): super(GenericWorkerReplicationHandler, self).__init__(hs.get_datastore()) @@ -644,9 +644,6 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): args.update(self.send_handler.stream_positions()) return args - def get_currently_syncing_users(self): - return self.presence_handler.get_currently_syncing_users() - async def process_and_notify(self, stream_name, token, rows): try: if self.send_handler: diff --git a/synapse/replication/tcp/__init__.py b/synapse/replication/tcp/__init__.py index 81c2ea7ee9..523a1358d4 100644 --- a/synapse/replication/tcp/__init__.py +++ b/synapse/replication/tcp/__init__.py @@ -20,11 +20,31 @@ Further details can be found in docs/tcp_replication.rst Structure of the module: - * client.py - the client classes used for workers to connect to master + * handler.py - the classes used to handle sending/receiving commands to + replication * command.py - the definitions of all the valid commands - * protocol.py - contains bot the client and server protocol implementations, - these should not be used directly - * resource.py - the server classes that accepts and handle client connections - * streams.py - the definitons of all the valid streams + * protocol.py - the TCP protocol classes + * resource.py - handles streaming stream updates to replications + * streams/ - the definitons of all the valid streams + +The general interaction of the classes are: + + +---------------------+ + | ReplicationStreamer | + +---------------------+ + | + v + +---------------------------+ +----------------------+ + | ReplicationCommandHandler |---->|ReplicationDataHandler| + +---------------------------+ +----------------------+ + | ^ + v | + +-------------+ + | Protocols | + | (TCP/redis) | + +-------------+ + +Where the ReplicationDataHandler (or subclasses) handles incoming stream +updates. """ diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e86d9805f1..700ae79158 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -16,26 +16,16 @@ """ import logging -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict -from twisted.internet import defer from twisted.internet.protocol import ReconnectingClientFactory from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.tcp.protocol import ( - AbstractReplicationClientHandler, - ClientReplicationStreamProtocol, -) - -from .commands import ( - Command, - FederationAckCommand, - InvalidateCacheCommand, - RemoteServerUpCommand, - RemovePusherCommand, - UserIpCommand, - UserSyncCommand, -) +from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol + +if TYPE_CHECKING: + from synapse.server import HomeServer + from synapse.replication.tcp.handler import ReplicationCommandHandler logger = logging.getLogger(__name__) @@ -44,16 +34,20 @@ class ReplicationClientFactory(ReconnectingClientFactory): """Factory for building connections to the master. Will reconnect if the connection is lost. - Accepts a handler that will be called when new data is available or data - is required. + Accepts a handler that is passed to `ClientReplicationStreamProtocol`. """ initialDelay = 0.1 maxDelay = 1 # Try at least once every N seconds - def __init__(self, hs, client_name, handler: AbstractReplicationClientHandler): + def __init__( + self, + hs: "HomeServer", + client_name: str, + command_handler: "ReplicationCommandHandler", + ): self.client_name = client_name - self.handler = handler + self.command_handler = command_handler self.server_name = hs.config.server_name self.hs = hs self._clock = hs.get_clock() # As self.clock is defined in super class @@ -66,7 +60,11 @@ class ReplicationClientFactory(ReconnectingClientFactory): def buildProtocol(self, addr): logger.info("Connected to replication: %r", addr) return ClientReplicationStreamProtocol( - self.hs, self.client_name, self.server_name, self._clock, self.handler, + self.hs, + self.client_name, + self.server_name, + self._clock, + self.command_handler, ) def clientConnectionLost(self, connector, reason): @@ -78,41 +76,17 @@ class ReplicationClientFactory(ReconnectingClientFactory): ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) -class ReplicationClientHandler(AbstractReplicationClientHandler): - """A base handler that can be passed to the ReplicationClientFactory. +class ReplicationDataHandler: + """Handles incoming stream updates from replication. - By default proxies incoming replication data to the SlaveStore. + This instance notifies the slave data store about updates. Can be subclassed + to handle updates in additional ways. """ def __init__(self, store: BaseSlavedStore): self.store = store - # The current connection. None if we are currently (re)connecting - self.connection = None - - # Any pending commands to be sent once a new connection has been - # established - self.pending_commands = [] # type: List[Command] - - # Map from string -> deferred, to wake up when receiveing a SYNC with - # the given string. - # Used for tests. - self.awaiting_syncs = {} # type: Dict[str, defer.Deferred] - - # The factory used to create connections. - self.factory = None # type: Optional[ReplicationClientFactory] - - def start_replication(self, hs): - """Helper method to start a replication connection to the remote server - using TCP. - """ - client_name = hs.config.worker_name - self.factory = ReplicationClientFactory(hs, client_name, self) - host = hs.config.worker_replication_host - port = hs.config.worker_replication_port - hs.get_reactor().connectTCP(host, port, self.factory) - - async def on_rdata(self, stream_name, token, rows): + async def on_rdata(self, stream_name: str, token: int, rows: list): """Called to handle a batch of replication data with a given stream token. By default this just pokes the slave store. Can be overridden in subclasses to @@ -124,30 +98,8 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): rows (list): a list of Stream.ROW_TYPE objects as returned by Stream.parse_row. """ - logger.debug("Received rdata %s -> %s", stream_name, token) self.store.process_replication_rows(stream_name, token, rows) - async def on_position(self, stream_name, token): - """Called when we get new position data. By default this just pokes - the slave store. - - Can be overriden in subclasses to handle more. - """ - self.store.process_replication_rows(stream_name, token, []) - - def on_sync(self, data): - """When we received a SYNC we wake up any deferreds that were waiting - for the sync with the given data. - - Used by tests. - """ - d = self.awaiting_syncs.pop(data, None) - if d: - d.callback(data) - - def on_remote_server_up(self, server: str): - """Called when get a new REMOTE_SERVER_UP command.""" - def get_streams_to_replicate(self) -> Dict[str, int]: """Called when a new connection has been established and we need to subscribe to streams. @@ -163,85 +115,10 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): args["account_data"] = user_account_data elif room_account_data: args["account_data"] = room_account_data - return args - def get_currently_syncing_users(self): - """Get the list of currently syncing users (if any). This is called - when a connection has been established and we need to send the - currently syncing users. (Overriden by the synchrotron's only) - """ - return [] - - def send_command(self, cmd): - """Send a command to master (when we get establish a connection if we - don't have one already.) - """ - if self.connection: - self.connection.send_command(cmd) - else: - logger.warning("Queuing command as not connected: %r", cmd.NAME) - self.pending_commands.append(cmd) - - def send_federation_ack(self, token): - """Ack data for the federation stream. This allows the master to drop - data stored purely in memory. - """ - self.send_command(FederationAckCommand(token)) - - def send_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): - """Poke the master that a user has started/stopped syncing. - """ - self.send_command( - UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) - ) - - def send_remove_pusher(self, app_id, push_key, user_id): - """Poke the master to remove a pusher for a user - """ - cmd = RemovePusherCommand(app_id, push_key, user_id) - self.send_command(cmd) - - def send_invalidate_cache(self, cache_func, keys): - """Poke the master to invalidate a cache. - """ - cmd = InvalidateCacheCommand(cache_func.__name__, keys) - self.send_command(cmd) - - def send_user_ip(self, user_id, access_token, ip, user_agent, device_id, last_seen): - """Tell the master that the user made a request. - """ - cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen) - self.send_command(cmd) - - def send_remote_server_up(self, server: str): - self.send_command(RemoteServerUpCommand(server)) - - def await_sync(self, data): - """Returns a deferred that is resolved when we receive a SYNC command - with given data. - - [Not currently] used by tests. - """ - return self.awaiting_syncs.setdefault(data, defer.Deferred()) - - def update_connection(self, connection): - """Called when a connection has been established (or lost with None). - """ - self.connection = connection - if connection: - for cmd in self.pending_commands: - connection.send_command(cmd) - self.pending_commands = [] - - def finished_connecting(self): - """Called when we have successfully subscribed and caught up to all - streams we're interested in. - """ - logger.info("Finished connecting to server") + async def on_position(self, stream_name: str, token: int): + self.store.process_replication_rows(stream_name, token, []) - # We don't reset the delay any earlier as otherwise if there is a - # problem during start up we'll end up tight looping connecting to the - # server. - if self.factory: - self.factory.resetDelay() + def on_remote_server_up(self, server: str): + """Called when get a new REMOTE_SERVER_UP command.""" diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py new file mode 100644 index 0000000000..12a1cfd6d1 --- /dev/null +++ b/synapse/replication/tcp/handler.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Callable, Dict, List, Optional, Set + +from prometheus_client import Counter + +from synapse.replication.tcp.client import ReplicationClientFactory +from synapse.replication.tcp.commands import ( + Command, + FederationAckCommand, + InvalidateCacheCommand, + PositionCommand, + RdataCommand, + RemoteServerUpCommand, + RemovePusherCommand, + SyncCommand, + UserIpCommand, + UserSyncCommand, +) +from synapse.replication.tcp.streams import STREAMS_MAP, Stream +from synapse.util.async_helpers import Linearizer + +logger = logging.getLogger(__name__) + + +# number of updates received for each RDATA stream +inbound_rdata_count = Counter( + "synapse_replication_tcp_protocol_inbound_rdata_count", "", ["stream_name"] +) + + +class ReplicationCommandHandler: + """Handles incoming commands from replication as well as sending commands + back out to connections. + """ + + def __init__(self, hs): + self._replication_data_handler = hs.get_replication_data_handler() + self._presence_handler = hs.get_presence_handler() + + # Set of streams that we've caught up with. + self._streams_connected = set() # type: Set[str] + + self._streams = { + stream.NAME: stream(hs) for stream in STREAMS_MAP.values() + } # type: Dict[str, Stream] + + self._position_linearizer = Linearizer("replication_position") + + # Map of stream to batched updates. See RdataCommand for info on how + # batching works. + self._pending_batches = {} # type: Dict[str, List[Any]] + + # The factory used to create connections. + self._factory = None # type: Optional[ReplicationClientFactory] + + # The current connection. None if we are currently (re)connecting + self._connection = None + + def start_replication(self, hs): + """Helper method to start a replication connection to the remote server + using TCP. + """ + client_name = hs.config.worker_name + self._factory = ReplicationClientFactory(hs, client_name, self) + host = hs.config.worker_replication_host + port = hs.config.worker_replication_port + hs.get_reactor().connectTCP(host, port, self._factory) + + async def on_RDATA(self, cmd: RdataCommand): + stream_name = cmd.stream_name + inbound_rdata_count.labels(stream_name).inc() + + try: + row = STREAMS_MAP[stream_name].parse_row(cmd.row) + except Exception: + logger.exception("Failed to parse RDATA: %r %r", stream_name, cmd.row) + raise + + if cmd.token is None or stream_name not in self._streams_connected: + # I.e. either this is part of a batch of updates for this stream (in + # which case batch until we get an update for the stream with a non + # None token) or we're currently connecting so we queue up rows. + self._pending_batches.setdefault(stream_name, []).append(row) + else: + # Check if this is the last of a batch of updates + rows = self._pending_batches.pop(stream_name, []) + rows.append(row) + await self.on_rdata(stream_name, cmd.token, rows) + + async def on_rdata(self, stream_name: str, token: int, rows: list): + """Called to handle a batch of replication data with a given stream token. + + Args: + stream_name: name of the replication stream for this batch of rows + token: stream token for this batch of rows + rows: a list of Stream.ROW_TYPE objects as returned by + Stream.parse_row. + """ + logger.debug("Received rdata %s -> %s", stream_name, token) + await self._replication_data_handler.on_rdata(stream_name, token, rows) + + async def on_POSITION(self, cmd: PositionCommand): + stream = self._streams.get(cmd.stream_name) + if not stream: + logger.error("Got POSITION for unknown stream: %s", cmd.stream_name) + return + + # We protect catching up with a linearizer in case the replication + # connection reconnects under us. + with await self._position_linearizer.queue(cmd.stream_name): + # We're about to go and catch up with the stream, so mark as connecting + # to stop RDATA being handled at the same time by removing stream from + # list of connected streams. We also clear any batched up RDATA from + # before we got the POSITION. + self._streams_connected.discard(cmd.stream_name) + self._pending_batches.clear() + + # Find where we previously streamed up to. + current_token = self._replication_data_handler.get_streams_to_replicate().get( + cmd.stream_name + ) + if current_token is None: + logger.warning( + "Got POSITION for stream we're not subscribed to: %s", + cmd.stream_name, + ) + return + + # Fetch all updates between then and now. + limited = True + while limited: + updates, current_token, limited = await stream.get_updates_since( + current_token, cmd.token + ) + if updates: + await self.on_rdata( + cmd.stream_name, + current_token, + [stream.parse_row(update[1]) for update in updates], + ) + + # We've now caught up to position sent to us, notify handler. + await self._replication_data_handler.on_position(cmd.stream_name, cmd.token) + + # Handle any RDATA that came in while we were catching up. + rows = self._pending_batches.pop(cmd.stream_name, []) + if rows: + await self._replication_data_handler.on_rdata( + cmd.stream_name, rows[-1].token, rows + ) + + self._streams_connected.add(cmd.stream_name) + + async def on_SYNC(self, cmd: SyncCommand): + pass + + async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): + """"Called when get a new REMOTE_SERVER_UP command.""" + self._replication_data_handler.on_remote_server_up(cmd.data) + + def get_currently_syncing_users(self): + """Get the list of currently syncing users (if any). This is called + when a connection has been established and we need to send the + currently syncing users. + """ + return self._presence_handler.get_currently_syncing_users() + + def update_connection(self, connection): + """Called when a connection has been established (or lost with None). + """ + self._connection = connection + + def finished_connecting(self): + """Called when we have successfully subscribed and caught up to all + streams we're interested in. + """ + logger.info("Finished connecting to server") + + # We don't reset the delay any earlier as otherwise if there is a + # problem during start up we'll end up tight looping connecting to the + # server. + if self._factory: + self._factory.resetDelay() + + def send_command(self, cmd: Command): + """Send a command to master (when we get establish a connection if we + don't have one already.) + """ + if self._connection: + self._connection.send_command(cmd) + else: + logger.warning("Dropping command as not connected: %r", cmd.NAME) + + def send_federation_ack(self, token: int): + """Ack data for the federation stream. This allows the master to drop + data stored purely in memory. + """ + self.send_command(FederationAckCommand(token)) + + def send_user_sync( + self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int + ): + """Poke the master that a user has started/stopped syncing. + """ + self.send_command( + UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) + ) + + def send_remove_pusher(self, app_id: str, push_key: str, user_id: str): + """Poke the master to remove a pusher for a user + """ + cmd = RemovePusherCommand(app_id, push_key, user_id) + self.send_command(cmd) + + def send_invalidate_cache(self, cache_func: Callable, keys: tuple): + """Poke the master to invalidate a cache. + """ + cmd = InvalidateCacheCommand(cache_func.__name__, keys) + self.send_command(cmd) + + def send_user_ip( + self, + user_id: str, + access_token: str, + ip: str, + user_agent: str, + device_id: str, + last_seen: int, + ): + """Tell the master that the user made a request. + """ + cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen) + self.send_command(cmd) + + def send_remote_server_up(self, server: str): + self.send_command(RemoteServerUpCommand(server)) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index dae246825f..f2a37f568e 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -46,12 +46,11 @@ indicate which side is sending, these are *not* included on the wire:: > ERROR server stopping * connection closed by server * """ -import abc import fcntl import logging import struct from collections import defaultdict -from typing import Any, DefaultDict, Dict, List, Set +from typing import TYPE_CHECKING, DefaultDict, List from six import iteritems @@ -78,13 +77,12 @@ from synapse.replication.tcp.commands import ( SyncCommand, UserSyncCommand, ) -from synapse.replication.tcp.streams import STREAMS_MAP, Stream from synapse.types import Collection from synapse.util import Clock from synapse.util.stringutils import random_string -MYPY = False -if MYPY: +if TYPE_CHECKING: + from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.server import HomeServer @@ -475,71 +473,6 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): self.streamer.lost_connection(self) -class AbstractReplicationClientHandler(metaclass=abc.ABCMeta): - """ - The interface for the handler that should be passed to - ClientReplicationStreamProtocol - """ - - @abc.abstractmethod - async def on_rdata(self, stream_name, token, rows): - """Called to handle a batch of replication data with a given stream token. - - Args: - stream_name (str): name of the replication stream for this batch of rows - token (int): stream token for this batch of rows - rows (list): a list of Stream.ROW_TYPE objects as returned by - Stream.parse_row. - """ - raise NotImplementedError() - - @abc.abstractmethod - async def on_position(self, stream_name, token): - """Called when we get new position data.""" - raise NotImplementedError() - - @abc.abstractmethod - def on_sync(self, data): - """Called when get a new SYNC command.""" - raise NotImplementedError() - - @abc.abstractmethod - async def on_remote_server_up(self, server: str): - """Called when get a new REMOTE_SERVER_UP command.""" - raise NotImplementedError() - - @abc.abstractmethod - def get_streams_to_replicate(self): - """Called when a new connection has been established and we need to - subscribe to streams. - - Returns: - map from stream name to the most recent update we have for - that stream (ie, the point we want to start replicating from) - """ - raise NotImplementedError() - - @abc.abstractmethod - def get_currently_syncing_users(self): - """Get the list of currently syncing users (if any). This is called - when a connection has been established and we need to send the - currently syncing users.""" - raise NotImplementedError() - - @abc.abstractmethod - def update_connection(self, connection): - """Called when a connection has been established (or lost with None). - """ - raise NotImplementedError() - - @abc.abstractmethod - def finished_connecting(self): - """Called when we have successfully subscribed and caught up to all - streams we're interested in. - """ - raise NotImplementedError() - - class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): VALID_INBOUND_COMMANDS = VALID_SERVER_COMMANDS VALID_OUTBOUND_COMMANDS = VALID_CLIENT_COMMANDS @@ -550,7 +483,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): client_name: str, server_name: str, clock: Clock, - handler: AbstractReplicationClientHandler, + command_handler: "ReplicationCommandHandler", ): BaseReplicationStreamProtocol.__init__(self, clock) @@ -558,20 +491,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): self.client_name = client_name self.server_name = server_name - self.handler = handler - - self.streams = { - stream.NAME: stream(hs) for stream in STREAMS_MAP.values() - } # type: Dict[str, Stream] - - # Set of stream names that have been subscribe to, but haven't yet - # caught up with. This is used to track when the client has been fully - # connected to the remote. - self.streams_connecting = set(STREAMS_MAP) # type: Set[str] - - # Map of stream to batched updates. See RdataCommand for info on how - # batching works. - self.pending_batches = {} # type: Dict[str, List[Any]] + self.handler = command_handler def connectionMade(self): self.send_command(NameCommand(self.client_name)) @@ -589,89 +509,39 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): # We've now finished connecting to so inform the client handler self.handler.update_connection(self) + self.handler.finished_connecting() - async def on_SERVER(self, cmd): - if cmd.data != self.server_name: - logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) - self.send_error("Wrong remote") - - async def on_RDATA(self, cmd): - stream_name = cmd.stream_name - inbound_rdata_count.labels(stream_name).inc() - - try: - row = STREAMS_MAP[stream_name].parse_row(cmd.row) - except Exception: - logger.exception( - "[%s] Failed to parse RDATA: %r %r", self.id(), stream_name, cmd.row - ) - raise - - if cmd.token is None or stream_name in self.streams_connecting: - # I.e. this is part of a batch of updates for this stream. Batch - # until we get an update for the stream with a non None token - self.pending_batches.setdefault(stream_name, []).append(row) - else: - # Check if this is the last of a batch of updates - rows = self.pending_batches.pop(stream_name, []) - rows.append(row) - await self.handler.on_rdata(stream_name, cmd.token, rows) - - async def on_POSITION(self, cmd: PositionCommand): - stream = self.streams.get(cmd.stream_name) - if not stream: - logger.error("Got POSITION for unknown stream: %s", cmd.stream_name) - return - - # Find where we previously streamed up to. - current_token = self.handler.get_streams_to_replicate().get(cmd.stream_name) - if current_token is None: - logger.warning( - "Got POSITION for stream we're not subscribed to: %s", cmd.stream_name - ) - return - - # Fetch all updates between then and now. - limited = True - while limited: - updates, current_token, limited = await stream.get_updates_since( - current_token, cmd.token - ) - - # Check if the connection was closed underneath us, if so we bail - # rather than risk having concurrent catch ups going on. - if self.state == ConnectionStates.CLOSED: - return - - if updates: - await self.handler.on_rdata( - cmd.stream_name, - current_token, - [stream.parse_row(update[1]) for update in updates], - ) + async def handle_command(self, cmd: Command): + """Handle a command we have received over the replication stream. - # We've now caught up to position sent to us, notify handler. - await self.handler.on_position(cmd.stream_name, cmd.token) + Delegates to `command_handler.on_`, which must return an + awaitable. - self.streams_connecting.discard(cmd.stream_name) - if not self.streams_connecting: - self.handler.finished_connecting() + Args: + cmd: received command + """ + handled = False - # Check if the connection was closed underneath us, if so we bail - # rather than risk having concurrent catch ups going on. - if self.state == ConnectionStates.CLOSED: - return + # First call any command handlers on this instance. These are for TCP + # specific handling. + cmd_func = getattr(self, "on_%s" % (cmd.NAME,), None) + if cmd_func: + await cmd_func(cmd) + handled = True - # Handle any RDATA that came in while we were catching up. - rows = self.pending_batches.pop(cmd.stream_name, []) - if rows: - await self.handler.on_rdata(cmd.stream_name, rows[-1].token, rows) + # Then call out to the handler. + cmd_func = getattr(self.handler, "on_%s" % (cmd.NAME,), None) + if cmd_func: + await cmd_func(cmd) + handled = True - async def on_SYNC(self, cmd): - self.handler.on_sync(cmd.data) + if not handled: + logger.warning("Unhandled command: %r", cmd) - async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): - self.handler.on_remote_server_up(cmd.data) + async def on_SERVER(self, cmd): + if cmd.data != self.server_name: + logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) + self.send_error("Wrong remote") def replicate(self): """Send the subscription request to the server @@ -768,8 +638,3 @@ tcp_outbound_commands = LaterGauge( for k, count in iteritems(p.outbound_commands_counter) }, ) - -# number of updates received for each RDATA stream -inbound_rdata_count = Counter( - "synapse_replication_tcp_protocol_inbound_rdata_count", "", ["stream_name"] -) diff --git a/synapse/server.py b/synapse/server.py index 9228e1c892..9d273c980c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -87,6 +87,8 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator from synapse.push.pusherpool import PusherPool +from synapse.replication.tcp.client import ReplicationDataHandler +from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.resource import ReplicationStreamer from synapse.rest.media.v1.media_repository import ( MediaRepository, @@ -206,6 +208,7 @@ class HomeServer(object): "password_policy_handler", "storage", "replication_streamer", + "replication_data_handler", ] REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] @@ -468,7 +471,7 @@ class HomeServer(object): return ReadMarkerHandler(self) def build_tcp_replication(self): - raise NotImplementedError() + return ReplicationCommandHandler(self) def build_action_generator(self): return ActionGenerator(self) @@ -562,6 +565,9 @@ class HomeServer(object): def build_replication_streamer(self) -> ReplicationStreamer: return ReplicationStreamer(self) + def build_replication_data_handler(self): + return ReplicationDataHandler(self.get_datastore()) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/server.pyi b/synapse/server.pyi index 9d1dfa71e7..9013e9bac9 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -19,6 +19,7 @@ import synapse.handlers.set_password import synapse.http.client import synapse.notifier import synapse.replication.tcp.client +import synapse.replication.tcp.handler import synapse.rest.media.v1.media_repository import synapse.server_notices.server_notices_manager import synapse.server_notices.server_notices_sender @@ -106,7 +107,11 @@ class HomeServer(object): pass def get_tcp_replication( self, - ) -> synapse.replication.tcp.client.ReplicationClientHandler: + ) -> synapse.replication.tcp.handler.ReplicationCommandHandler: + pass + def get_replication_data_handler( + self, + ) -> synapse.replication.tcp.client.ReplicationDataHandler: pass def get_federation_registry( self, diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 2a1e7c7166..8902a5ab69 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -17,8 +17,9 @@ from mock import Mock, NonCallableMock from synapse.replication.tcp.client import ( ReplicationClientFactory, - ReplicationClientHandler, + ReplicationDataHandler, ) +from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.storage.database import make_conn @@ -51,15 +52,19 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) - self.streamer = server_factory.streamer + self.streamer = hs.get_replication_streamer() - handler_factory = Mock() - self.replication_handler = ReplicationClientHandler(self.slaved_store) - self.replication_handler.factory = handler_factory + # We now do some gut wrenching so that we have a client that is based + # off of the slave store rather than the main store. + self.replication_handler = ReplicationCommandHandler(self.hs) + self.replication_handler._replication_data_handler = ReplicationDataHandler( + self.slaved_store + ) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) + client_factory.handler = self.replication_handler server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py index a755fe2879..32238fe79a 100644 --- a/tests/replication/tcp/streams/_base.py +++ b/tests/replication/tcp/streams/_base.py @@ -15,7 +15,7 @@ from mock import Mock -from synapse.replication.tcp.commands import ReplicateCommand +from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory @@ -26,15 +26,20 @@ from tests.server import FakeTransport class BaseStreamTestCase(unittest.HomeserverTestCase): """Base class for tests of the replication streams""" + def make_homeserver(self, reactor, clock): + self.test_handler = Mock(wraps=TestReplicationDataHandler()) + return self.setup_test_homeserver(replication_data_handler=self.test_handler) + def prepare(self, reactor, clock, hs): # build a replication server - server_factory = ReplicationStreamProtocolFactory(self.hs) - self.streamer = server_factory.streamer + server_factory = ReplicationStreamProtocolFactory(hs) + self.streamer = hs.get_replication_streamer() self.server = server_factory.buildProtocol(None) - self.test_handler = Mock(wraps=TestReplicationClientHandler()) + repl_handler = ReplicationCommandHandler(hs) + repl_handler.handler = self.test_handler self.client = ClientReplicationStreamProtocol( - hs, "client", "test", clock, self.test_handler, + hs, "client", "test", clock, repl_handler, ) self._client_transport = None @@ -69,13 +74,9 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self.streamer.on_notifier_poke() self.pump(0.1) - def replicate_stream(self): - """Make the client end a REPLICATE command to set up a subscription to a stream""" - self.client.send_command(ReplicateCommand()) - -class TestReplicationClientHandler(object): - """Drop-in for ReplicationClientHandler which just collects RDATA rows""" +class TestReplicationDataHandler: + """Drop-in for ReplicationDataHandler which just collects RDATA rows""" def __init__(self): self.streams = set() @@ -88,18 +89,9 @@ class TestReplicationClientHandler(object): positions[stream] = max(token, positions.get(stream, 0)) return positions - def get_currently_syncing_users(self): - return [] - - def update_connection(self, connection): - pass - - def finished_connecting(self): - pass - - async def on_position(self, stream_name, token): - """Called when we get new position data.""" - async def on_rdata(self, stream_name, token, rows): for r in rows: self._received_rdata_rows.append((stream_name, token, r)) + + async def on_position(self, stream_name, token): + pass diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index 0ec0825a0e..a0206f7363 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -24,7 +24,6 @@ class ReceiptsStreamTestCase(BaseStreamTestCase): self.reconnect() # make the client subscribe to the receipts stream - self.replicate_stream() self.test_handler.streams.add("receipts") # tell the master to send a new receipt -- cgit 1.4.1 From b21000a44fa8b6f5d28a2089033f76767dff868b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 6 Apr 2020 12:35:30 +0100 Subject: Improve error responses when a remote server doesn't allow you to access its public rooms list (#6899) --- changelog.d/6899.bugfix | 1 + synapse/handlers/room_list.py | 23 ++++++++++++----------- synapse/rest/client/v1/room.py | 33 ++++++++++++++++++++------------- 3 files changed, 33 insertions(+), 24 deletions(-) create mode 100644 changelog.d/6899.bugfix (limited to 'changelog.d') diff --git a/changelog.d/6899.bugfix b/changelog.d/6899.bugfix new file mode 100644 index 0000000000..efa8a40b1f --- /dev/null +++ b/changelog.d/6899.bugfix @@ -0,0 +1 @@ +Improve error responses when accessing remote public room lists. \ No newline at end of file diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 0b7d3da680..59c9906b31 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -15,6 +15,7 @@ import logging from collections import namedtuple +from typing import Any, Dict, Optional from six import iteritems @@ -105,22 +106,22 @@ class RoomListHandler(BaseHandler): @defer.inlineCallbacks def _get_public_room_list( self, - limit=None, - since_token=None, - search_filter=None, - network_tuple=EMPTY_THIRD_PARTY_ID, - from_federation=False, - ): + limit: Optional[int] = None, + since_token: Optional[str] = None, + search_filter: Optional[Dict] = None, + network_tuple: ThirdPartyInstanceID = EMPTY_THIRD_PARTY_ID, + from_federation: bool = False, + ) -> Dict[str, Any]: """Generate a public room list. Args: - limit (int|None): Maximum amount of rooms to return. - since_token (str|None) - search_filter (dict|None): Dictionary to filter rooms by. - network_tuple (ThirdPartyInstanceID): Which public list to use. + limit: Maximum amount of rooms to return. + since_token: + search_filter: Dictionary to filter rooms by. + network_tuple: Which public list to use. This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. - from_federation (bool): Whether this request originated from a + from_federation: Whether this request originated from a federating server or a client. Used for room filtering. """ diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index bffd43de5f..6b5830cc3f 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -27,6 +27,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, + HttpResponseException, InvalidClientCredentialsError, SynapseError, ) @@ -364,10 +365,13 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit = None handler = self.hs.get_room_list_handler() - if server: - data = await handler.get_remote_public_room_list( - server, limit=limit, since_token=since_token - ) + if server and server != self.hs.config.server_name: + try: + data = await handler.get_remote_public_room_list( + server, limit=limit, since_token=since_token + ) + except HttpResponseException as e: + raise e.to_synapse_error() else: data = await handler.get_local_public_room_list( limit=limit, since_token=since_token @@ -404,15 +408,18 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit = None handler = self.hs.get_room_list_handler() - if server: - data = await handler.get_remote_public_room_list( - server, - limit=limit, - since_token=since_token, - search_filter=search_filter, - include_all_networks=include_all_networks, - third_party_instance_id=third_party_instance_id, - ) + if server and server != self.hs.config.server_name: + try: + data = await handler.get_remote_public_room_list( + server, + limit=limit, + since_token=since_token, + search_filter=search_filter, + include_all_networks=include_all_networks, + third_party_instance_id=third_party_instance_id, + ) + except HttpResponseException as e: + raise e.to_synapse_error() else: data = await handler.get_local_public_room_list( limit=limit, -- cgit 1.4.1 From 4b0f00ad0c6bbe153f82b95980a2ba16238b4449 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 6 Apr 2020 12:40:34 +0100 Subject: Remove stream before/after debug log lines (#7207) --- changelog.d/7207.misc | 1 + synapse/storage/data_stores/main/stream.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/7207.misc (limited to 'changelog.d') diff --git a/changelog.d/7207.misc b/changelog.d/7207.misc new file mode 100644 index 0000000000..4f9b6a1089 --- /dev/null +++ b/changelog.d/7207.misc @@ -0,0 +1 @@ +Remove some extraneous debugging log lines. \ No newline at end of file diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index ada5cce6c2..e89f0bffb5 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -481,11 +481,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_id, limit, end_token ) - logger.debug("stream before") events = yield self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True ) - logger.debug("stream after") self._set_before_and_after(events, rows) -- cgit 1.4.1 From 82498ee9019747eb86ed753e08fac0990d4ac8b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Apr 2020 10:51:07 +0100 Subject: Move server command handling out of TCP protocol (#7187) This completes the merging of server and client command processing. --- changelog.d/7187.misc | 1 + synapse/replication/tcp/handler.py | 177 ++++++++++++++++++++++++++++++++---- synapse/replication/tcp/protocol.py | 165 +++++++++++---------------------- synapse/replication/tcp/resource.py | 163 ++++++--------------------------- 4 files changed, 237 insertions(+), 269 deletions(-) create mode 100644 changelog.d/7187.misc (limited to 'changelog.d') diff --git a/changelog.d/7187.misc b/changelog.d/7187.misc new file mode 100644 index 0000000000..60d68ae877 --- /dev/null +++ b/changelog.d/7187.misc @@ -0,0 +1 @@ +Move server command handling out of TCP protocol. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 12a1cfd6d1..8ec0119697 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -19,8 +19,10 @@ from typing import Any, Callable, Dict, List, Optional, Set from prometheus_client import Counter +from synapse.metrics import LaterGauge from synapse.replication.tcp.client import ReplicationClientFactory from synapse.replication.tcp.commands import ( + ClearUserSyncsCommand, Command, FederationAckCommand, InvalidateCacheCommand, @@ -28,10 +30,12 @@ from synapse.replication.tcp.commands import ( RdataCommand, RemoteServerUpCommand, RemovePusherCommand, + ReplicateCommand, SyncCommand, UserIpCommand, UserSyncCommand, ) +from synapse.replication.tcp.protocol import AbstractConnection from synapse.replication.tcp.streams import STREAMS_MAP, Stream from synapse.util.async_helpers import Linearizer @@ -42,6 +46,13 @@ logger = logging.getLogger(__name__) inbound_rdata_count = Counter( "synapse_replication_tcp_protocol_inbound_rdata_count", "", ["stream_name"] ) +user_sync_counter = Counter("synapse_replication_tcp_resource_user_sync", "") +federation_ack_counter = Counter("synapse_replication_tcp_resource_federation_ack", "") +remove_pusher_counter = Counter("synapse_replication_tcp_resource_remove_pusher", "") +invalidate_cache_counter = Counter( + "synapse_replication_tcp_resource_invalidate_cache", "" +) +user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache", "") class ReplicationCommandHandler: @@ -52,6 +63,10 @@ class ReplicationCommandHandler: def __init__(self, hs): self._replication_data_handler = hs.get_replication_data_handler() self._presence_handler = hs.get_presence_handler() + self._store = hs.get_datastore() + self._notifier = hs.get_notifier() + self._clock = hs.get_clock() + self._instance_id = hs.get_instance_id() # Set of streams that we've caught up with. self._streams_connected = set() # type: Set[str] @@ -69,8 +84,26 @@ class ReplicationCommandHandler: # The factory used to create connections. self._factory = None # type: Optional[ReplicationClientFactory] - # The current connection. None if we are currently (re)connecting - self._connection = None + # The currently connected connections. + self._connections = [] # type: List[AbstractConnection] + + LaterGauge( + "synapse_replication_tcp_resource_total_connections", + "", + [], + lambda: len(self._connections), + ) + + self._is_master = hs.config.worker_app is None + + self._federation_sender = None + if self._is_master and not hs.config.send_federation: + self._federation_sender = hs.get_federation_sender() + + self._server_notices_sender = None + if self._is_master: + self._server_notices_sender = hs.get_server_notices_sender() + self._notifier.add_remote_server_up_callback(self.send_remote_server_up) def start_replication(self, hs): """Helper method to start a replication connection to the remote server @@ -82,6 +115,70 @@ class ReplicationCommandHandler: port = hs.config.worker_replication_port hs.get_reactor().connectTCP(host, port, self._factory) + async def on_REPLICATE(self, cmd: ReplicateCommand): + # We only want to announce positions by the writer of the streams. + # Currently this is just the master process. + if not self._is_master: + return + + for stream_name, stream in self._streams.items(): + current_token = stream.current_token() + self.send_command(PositionCommand(stream_name, current_token)) + + async def on_USER_SYNC(self, cmd: UserSyncCommand): + user_sync_counter.inc() + + if self._is_master: + await self._presence_handler.update_external_syncs_row( + cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms + ) + + async def on_CLEAR_USER_SYNC(self, cmd: ClearUserSyncsCommand): + if self._is_master: + await self._presence_handler.update_external_syncs_clear(cmd.instance_id) + + async def on_FEDERATION_ACK(self, cmd: FederationAckCommand): + federation_ack_counter.inc() + + if self._federation_sender: + self._federation_sender.federation_ack(cmd.token) + + async def on_REMOVE_PUSHER(self, cmd: RemovePusherCommand): + remove_pusher_counter.inc() + + if self._is_master: + await self._store.delete_pusher_by_app_id_pushkey_user_id( + app_id=cmd.app_id, pushkey=cmd.push_key, user_id=cmd.user_id + ) + + self._notifier.on_new_replication_data() + + async def on_INVALIDATE_CACHE(self, cmd: InvalidateCacheCommand): + invalidate_cache_counter.inc() + + if self._is_master: + # We invalidate the cache locally, but then also stream that to other + # workers. + await self._store.invalidate_cache_and_stream( + cmd.cache_func, tuple(cmd.keys) + ) + + async def on_USER_IP(self, cmd: UserIpCommand): + user_ip_cache_counter.inc() + + if self._is_master: + await self._store.insert_client_ip( + cmd.user_id, + cmd.access_token, + cmd.ip, + cmd.user_agent, + cmd.device_id, + cmd.last_seen, + ) + + if self._server_notices_sender: + await self._server_notices_sender.on_user_ip(cmd.user_id) + async def on_RDATA(self, cmd: RdataCommand): stream_name = cmd.stream_name inbound_rdata_count.labels(stream_name).inc() @@ -174,6 +271,9 @@ class ReplicationCommandHandler: """"Called when get a new REMOTE_SERVER_UP command.""" self._replication_data_handler.on_remote_server_up(cmd.data) + if self._is_master: + self._notifier.notify_remote_server_up(cmd.data) + def get_currently_syncing_users(self): """Get the list of currently syncing users (if any). This is called when a connection has been established and we need to send the @@ -181,29 +281,63 @@ class ReplicationCommandHandler: """ return self._presence_handler.get_currently_syncing_users() - def update_connection(self, connection): - """Called when a connection has been established (or lost with None). + def new_connection(self, connection: AbstractConnection): + """Called when we have a new connection. """ - self._connection = connection + self._connections.append(connection) + + # If we are connected to replication as a client (rather than a server) + # we need to reset the reconnection delay on the client factory (which + # is used to do exponential back off when the connection drops). + # + # Ideally we would reset the delay when we've "fully established" the + # connection (for some definition thereof) to stop us from tightlooping + # on reconnection if something fails after this point and we drop the + # connection. Unfortunately, we don't really have a better definition of + # "fully established" than the connection being established. + if self._factory: + self._factory.resetDelay() + + # Tell the server if we have any users currently syncing (should only + # happen on synchrotrons) + currently_syncing = self.get_currently_syncing_users() + now = self._clock.time_msec() + for user_id in currently_syncing: + connection.send_command( + UserSyncCommand(self._instance_id, user_id, True, now) + ) - def finished_connecting(self): - """Called when we have successfully subscribed and caught up to all - streams we're interested in. + def lost_connection(self, connection: AbstractConnection): + """Called when a connection is closed/lost. """ - logger.info("Finished connecting to server") + try: + self._connections.remove(connection) + except ValueError: + pass - # We don't reset the delay any earlier as otherwise if there is a - # problem during start up we'll end up tight looping connecting to the - # server. - if self._factory: - self._factory.resetDelay() + def connected(self) -> bool: + """Do we have any replication connections open? + + Is used by e.g. `ReplicationStreamer` to no-op if nothing is connected. + """ + return bool(self._connections) def send_command(self, cmd: Command): - """Send a command to master (when we get establish a connection if we - don't have one already.) + """Send a command to all connected connections. """ - if self._connection: - self._connection.send_command(cmd) + if self._connections: + for connection in self._connections: + try: + connection.send_command(cmd) + except Exception: + # We probably want to catch some types of exceptions here + # and log them as warnings (e.g. connection gone), but I + # can't find what those exception types they would be. + logger.exception( + "Failed to write command %s to connection %s", + cmd.NAME, + connection, + ) else: logger.warning("Dropping command as not connected: %r", cmd.NAME) @@ -250,3 +384,10 @@ class ReplicationCommandHandler: def send_remote_server_up(self, server: str): self.send_command(RemoteServerUpCommand(server)) + + def stream_update(self, stream_name: str, token: str, data: Any): + """Called when a new update is available to stream to clients. + + We need to check if the client is interested in the stream or not + """ + self.send_command(RdataCommand(stream_name, token, data)) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index f2a37f568e..9aabb9c586 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -46,6 +46,7 @@ indicate which side is sending, these are *not* included on the wire:: > ERROR server stopping * connection closed by server * """ +import abc import fcntl import logging import struct @@ -69,13 +70,8 @@ from synapse.replication.tcp.commands import ( ErrorCommand, NameCommand, PingCommand, - PositionCommand, - RdataCommand, - RemoteServerUpCommand, ReplicateCommand, ServerCommand, - SyncCommand, - UserSyncCommand, ) from synapse.types import Collection from synapse.util import Clock @@ -118,7 +114,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): are only sent by the server. On receiving a new command it calls `on_` with the parsed - command. + command before delegating to `ReplicationCommandHandler.on_`. It also sends `PING` periodically, and correctly times out remote connections (if they send a `PING` command) @@ -134,8 +130,9 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): max_line_buffer = 10000 - def __init__(self, clock): + def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): self.clock = clock + self.command_handler = handler self.last_received_command = self.clock.time_msec() self.last_sent_command = 0 @@ -175,6 +172,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): # can time us out. self.send_command(PingCommand(self.clock.time_msec())) + self.command_handler.new_connection(self) + def send_ping(self): """Periodically sends a ping and checks if we should close the connection due to the other side timing out. @@ -243,13 +242,31 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): async def handle_command(self, cmd: Command): """Handle a command we have received over the replication stream. - By default delegates to on_, which should return an awaitable. + First calls `self.on_` if it exists, then calls + `self.command_handler.on_` if it exists. This allows for + protocol level handling of commands (e.g. PINGs), before delegating to + the handler. Args: cmd: received command """ - handler = getattr(self, "on_%s" % (cmd.NAME,)) - await handler(cmd) + handled = False + + # First call any command handlers on this instance. These are for TCP + # specific handling. + cmd_func = getattr(self, "on_%s" % (cmd.NAME,), None) + if cmd_func: + await cmd_func(cmd) + handled = True + + # Then call out to the handler. + cmd_func = getattr(self.command_handler, "on_%s" % (cmd.NAME,), None) + if cmd_func: + await cmd_func(cmd) + handled = True + + if not handled: + logger.warning("Unhandled command: %r", cmd) def close(self): logger.warning("[%s] Closing connection", self.id()) @@ -378,6 +395,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.state = ConnectionStates.CLOSED self.pending_commands = [] + self.command_handler.lost_connection(self) + if self.transport: self.transport.unregisterProducer() @@ -404,74 +423,21 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS - def __init__(self, server_name, clock, streamer): - BaseReplicationStreamProtocol.__init__(self, clock) # Old style class + def __init__( + self, server_name: str, clock: Clock, handler: "ReplicationCommandHandler" + ): + super().__init__(clock, handler) self.server_name = server_name - self.streamer = streamer def connectionMade(self): self.send_command(ServerCommand(self.server_name)) - BaseReplicationStreamProtocol.connectionMade(self) - self.streamer.new_connection(self) + super().connectionMade() async def on_NAME(self, cmd): logger.info("[%s] Renamed to %r", self.id(), cmd.data) self.name = cmd.data - async def on_USER_SYNC(self, cmd): - await self.streamer.on_user_sync( - cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms - ) - - async def on_CLEAR_USER_SYNC(self, cmd): - await self.streamer.on_clear_user_syncs(cmd.instance_id) - - async def on_REPLICATE(self, cmd): - # Subscribe to all streams we're publishing to. - for stream_name in self.streamer.streams_by_name: - current_token = self.streamer.get_stream_token(stream_name) - self.send_command(PositionCommand(stream_name, current_token)) - - async def on_FEDERATION_ACK(self, cmd): - self.streamer.federation_ack(cmd.token) - - async def on_REMOVE_PUSHER(self, cmd): - await self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id) - - async def on_INVALIDATE_CACHE(self, cmd): - await self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys) - - async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): - self.streamer.on_remote_server_up(cmd.data) - - async def on_USER_IP(self, cmd): - self.streamer.on_user_ip( - cmd.user_id, - cmd.access_token, - cmd.ip, - cmd.user_agent, - cmd.device_id, - cmd.last_seen, - ) - - def stream_update(self, stream_name, token, data): - """Called when a new update is available to stream to clients. - - We need to check if the client is interested in the stream or not - """ - self.send_command(RdataCommand(stream_name, token, data)) - - def send_sync(self, data): - self.send_command(SyncCommand(data)) - - def send_remote_server_up(self, server: str): - self.send_command(RemoteServerUpCommand(server)) - - def on_connection_closed(self): - BaseReplicationStreamProtocol.on_connection_closed(self) - self.streamer.lost_connection(self) - class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): VALID_INBOUND_COMMANDS = VALID_SERVER_COMMANDS @@ -485,59 +451,18 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): clock: Clock, command_handler: "ReplicationCommandHandler", ): - BaseReplicationStreamProtocol.__init__(self, clock) - - self.instance_id = hs.get_instance_id() + super().__init__(clock, command_handler) self.client_name = client_name self.server_name = server_name - self.handler = command_handler def connectionMade(self): self.send_command(NameCommand(self.client_name)) - BaseReplicationStreamProtocol.connectionMade(self) + super().connectionMade() # Once we've connected subscribe to the necessary streams self.replicate() - # Tell the server if we have any users currently syncing (should only - # happen on synchrotrons) - currently_syncing = self.handler.get_currently_syncing_users() - now = self.clock.time_msec() - for user_id in currently_syncing: - self.send_command(UserSyncCommand(self.instance_id, user_id, True, now)) - - # We've now finished connecting to so inform the client handler - self.handler.update_connection(self) - self.handler.finished_connecting() - - async def handle_command(self, cmd: Command): - """Handle a command we have received over the replication stream. - - Delegates to `command_handler.on_`, which must return an - awaitable. - - Args: - cmd: received command - """ - handled = False - - # First call any command handlers on this instance. These are for TCP - # specific handling. - cmd_func = getattr(self, "on_%s" % (cmd.NAME,), None) - if cmd_func: - await cmd_func(cmd) - handled = True - - # Then call out to the handler. - cmd_func = getattr(self.handler, "on_%s" % (cmd.NAME,), None) - if cmd_func: - await cmd_func(cmd) - handled = True - - if not handled: - logger.warning("Unhandled command: %r", cmd) - async def on_SERVER(self, cmd): if cmd.data != self.server_name: logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) @@ -550,9 +475,21 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): self.send_command(ReplicateCommand()) - def on_connection_closed(self): - BaseReplicationStreamProtocol.on_connection_closed(self) - self.handler.update_connection(None) + +class AbstractConnection(abc.ABC): + """An interface for replication connections. + """ + + @abc.abstractmethod + def send_command(self, cmd: Command): + """Send the command down the connection + """ + pass + + +# This tells python that `BaseReplicationStreamProtocol` implements the +# interface. +AbstractConnection.register(BaseReplicationStreamProtocol) # The following simply registers metrics for the replication connections diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 30021ee309..b2d6baa2a2 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -17,7 +17,7 @@ import logging import random -from typing import Any, Dict, List +from typing import Dict from six import itervalues @@ -25,24 +25,14 @@ from prometheus_client import Counter from twisted.internet.protocol import Factory -from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.metrics import Measure, measure_func - -from .protocol import ServerReplicationStreamProtocol -from .streams import STREAMS_MAP, Stream -from .streams.federation import FederationStream +from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol +from synapse.replication.tcp.streams import STREAMS_MAP, FederationStream, Stream +from synapse.util.metrics import Measure stream_updates_counter = Counter( "synapse_replication_tcp_resource_stream_updates", "", ["stream_name"] ) -user_sync_counter = Counter("synapse_replication_tcp_resource_user_sync", "") -federation_ack_counter = Counter("synapse_replication_tcp_resource_federation_ack", "") -remove_pusher_counter = Counter("synapse_replication_tcp_resource_remove_pusher", "") -invalidate_cache_counter = Counter( - "synapse_replication_tcp_resource_invalidate_cache", "" -) -user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache", "") logger = logging.getLogger(__name__) @@ -52,13 +42,23 @@ class ReplicationStreamProtocolFactory(Factory): """ def __init__(self, hs): - self.streamer = hs.get_replication_streamer() + self.command_handler = hs.get_tcp_replication() self.clock = hs.get_clock() self.server_name = hs.config.server_name + # If we've created a `ReplicationStreamProtocolFactory` then we're + # almost certainly registering a replication listener, so let's ensure + # that we've started a `ReplicationStreamer` instance to actually push + # data. + # + # (This is a bit of a weird place to do this, but the alternatives such + # as putting this in `HomeServer.setup()`, requires either passing the + # listener config again or always starting a `ReplicationStreamer`.) + hs.get_replication_streamer() + def buildProtocol(self, addr): return ServerReplicationStreamProtocol( - self.server_name, self.clock, self.streamer + self.server_name, self.clock, self.command_handler ) @@ -78,16 +78,6 @@ class ReplicationStreamer(object): self._replication_torture_level = hs.config.replication_torture_level - # Current connections. - self.connections = [] # type: List[ServerReplicationStreamProtocol] - - LaterGauge( - "synapse_replication_tcp_resource_total_connections", - "", - [], - lambda: len(self.connections), - ) - # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been # disabled on the master. @@ -104,18 +94,12 @@ class ReplicationStreamer(object): self.federation_sender = hs.get_federation_sender() self.notifier.add_replication_callback(self.on_notifier_poke) - self.notifier.add_remote_server_up_callback(self.send_remote_server_up) # Keeps track of whether we are currently checking for updates self.is_looping = False self.pending_updates = False - hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.on_shutdown) - - def on_shutdown(self): - # close all connections on shutdown - for conn in self.connections: - conn.send_error("server shutting down") + self.command_handler = hs.get_tcp_replication() def get_streams(self) -> Dict[str, Stream]: """Get a mapp from stream name to stream instance. @@ -129,7 +113,7 @@ class ReplicationStreamer(object): This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed """ - if not self.connections: + if not self.command_handler.connected(): # Don't bother if nothing is listening. We still need to advance # the stream tokens otherwise they'll fall beihind forever for stream in self.streams: @@ -186,9 +170,7 @@ class ReplicationStreamer(object): raise logger.debug( - "Sending %d updates to %d connections", - len(updates), - len(self.connections), + "Sending %d updates", len(updates), ) if updates: @@ -204,112 +186,19 @@ class ReplicationStreamer(object): # token. See RdataCommand for more details. batched_updates = _batch_updates(updates) - for conn in self.connections: - for token, row in batched_updates: - try: - conn.stream_update(stream.NAME, token, row) - except Exception: - logger.exception("Failed to replicate") + for token, row in batched_updates: + try: + self.command_handler.stream_update( + stream.NAME, token, row + ) + except Exception: + logger.exception("Failed to replicate") logger.debug("No more pending updates, breaking poke loop") finally: self.pending_updates = False self.is_looping = False - def get_stream_token(self, stream_name): - """For a given stream get all updates since token. This is called when - a client first subscribes to a stream. - """ - stream = self.streams_by_name.get(stream_name, None) - if not stream: - raise Exception("unknown stream %s", stream_name) - - return stream.current_token() - - @measure_func("repl.federation_ack") - def federation_ack(self, token): - """We've received an ack for federation stream from a client. - """ - federation_ack_counter.inc() - if self.federation_sender: - self.federation_sender.federation_ack(token) - - @measure_func("repl.on_user_sync") - async def on_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): - """A client has started/stopped syncing on a worker. - """ - user_sync_counter.inc() - await self.presence_handler.update_external_syncs_row( - instance_id, user_id, is_syncing, last_sync_ms - ) - - async def on_clear_user_syncs(self, instance_id): - """A replication client wants us to drop all their UserSync data. - """ - await self.presence_handler.update_external_syncs_clear(instance_id) - - @measure_func("repl.on_remove_pusher") - async def on_remove_pusher(self, app_id, push_key, user_id): - """A client has asked us to remove a pusher - """ - remove_pusher_counter.inc() - await self.store.delete_pusher_by_app_id_pushkey_user_id( - app_id=app_id, pushkey=push_key, user_id=user_id - ) - - self.notifier.on_new_replication_data() - - @measure_func("repl.on_invalidate_cache") - async def on_invalidate_cache(self, cache_func: str, keys: List[Any]): - """The client has asked us to invalidate a cache - """ - invalidate_cache_counter.inc() - - # We invalidate the cache locally, but then also stream that to other - # workers. - await self.store.invalidate_cache_and_stream(cache_func, tuple(keys)) - - @measure_func("repl.on_user_ip") - async def on_user_ip( - self, user_id, access_token, ip, user_agent, device_id, last_seen - ): - """The client saw a user request - """ - user_ip_cache_counter.inc() - await self.store.insert_client_ip( - user_id, access_token, ip, user_agent, device_id, last_seen - ) - await self._server_notices_sender.on_user_ip(user_id) - - @measure_func("repl.on_remote_server_up") - def on_remote_server_up(self, server: str): - self.notifier.notify_remote_server_up(server) - - def send_remote_server_up(self, server: str): - for conn in self.connections: - conn.send_remote_server_up(server) - - def send_sync_to_all_connections(self, data): - """Sends a SYNC command to all clients. - - Used in tests. - """ - for conn in self.connections: - conn.send_sync(data) - - def new_connection(self, connection): - """A new client connection has been established - """ - self.connections.append(connection) - - def lost_connection(self, connection): - """A client connection has been lost - """ - try: - self.connections.remove(connection) - except ValueError: - pass - def _batch_updates(updates): """Takes a list of updates of form [(token, row)] and sets the token to -- cgit 1.4.1 From ce72355d7f67a986d60a7d86489b1b40f93fb152 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Apr 2020 11:01:04 +0100 Subject: Fix race in replication (#7226) Fixes a race between handling `POSITION` and `RDATA` commands. We do this by simply linearizing handling of them. --- changelog.d/7226.misc | 1 + synapse/replication/tcp/handler.py | 73 +++++++++++++++++---------- synapse/replication/tcp/streams/_base.py | 3 +- synapse/storage/data_stores/main/push_rule.py | 40 +++++++-------- 4 files changed, 68 insertions(+), 49 deletions(-) create mode 100644 changelog.d/7226.misc (limited to 'changelog.d') diff --git a/changelog.d/7226.misc b/changelog.d/7226.misc new file mode 100644 index 0000000000..676f285377 --- /dev/null +++ b/changelog.d/7226.misc @@ -0,0 +1 @@ +Move catchup of replication streams logic to worker. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 8ec0119697..dd71d1bc34 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -189,16 +189,34 @@ class ReplicationCommandHandler: logger.exception("Failed to parse RDATA: %r %r", stream_name, cmd.row) raise - if cmd.token is None or stream_name not in self._streams_connected: - # I.e. either this is part of a batch of updates for this stream (in - # which case batch until we get an update for the stream with a non - # None token) or we're currently connecting so we queue up rows. - self._pending_batches.setdefault(stream_name, []).append(row) - else: - # Check if this is the last of a batch of updates - rows = self._pending_batches.pop(stream_name, []) - rows.append(row) - await self.on_rdata(stream_name, cmd.token, rows) + # We linearize here for two reasons: + # 1. so we don't try and concurrently handle multiple rows for the + # same stream, and + # 2. so we don't race with getting a POSITION command and fetching + # missing RDATA. + with await self._position_linearizer.queue(cmd.stream_name): + if stream_name not in self._streams_connected: + # If the stream isn't marked as connected then we haven't seen a + # `POSITION` command yet, and so we may have missed some rows. + # Let's drop the row for now, on the assumption we'll receive a + # `POSITION` soon and we'll catch up correctly then. + logger.warning( + "Discarding RDATA for unconnected stream %s -> %s", + stream_name, + cmd.token, + ) + return + + if cmd.token is None: + # I.e. this is part of a batch of updates for this stream (in + # which case batch until we get an update for the stream with a non + # None token). + self._pending_batches.setdefault(stream_name, []).append(row) + else: + # Check if this is the last of a batch of updates + rows = self._pending_batches.pop(stream_name, []) + rows.append(row) + await self.on_rdata(stream_name, cmd.token, rows) async def on_rdata(self, stream_name: str, token: int, rows: list): """Called to handle a batch of replication data with a given stream token. @@ -221,12 +239,13 @@ class ReplicationCommandHandler: # We protect catching up with a linearizer in case the replication # connection reconnects under us. with await self._position_linearizer.queue(cmd.stream_name): - # We're about to go and catch up with the stream, so mark as connecting - # to stop RDATA being handled at the same time by removing stream from - # list of connected streams. We also clear any batched up RDATA from - # before we got the POSITION. + # We're about to go and catch up with the stream, so remove from set + # of connected streams. self._streams_connected.discard(cmd.stream_name) - self._pending_batches.clear() + + # We clear the pending batches for the stream as the fetching of the + # missing updates below will fetch all rows in the batch. + self._pending_batches.pop(cmd.stream_name, []) # Find where we previously streamed up to. current_token = self._replication_data_handler.get_streams_to_replicate().get( @@ -239,12 +258,17 @@ class ReplicationCommandHandler: ) return - # Fetch all updates between then and now. - limited = True - while limited: - updates, current_token, limited = await stream.get_updates_since( - current_token, cmd.token - ) + # If the position token matches our current token then we're up to + # date and there's nothing to do. Otherwise, fetch all updates + # between then and now. + missing_updates = cmd.token != current_token + while missing_updates: + ( + updates, + current_token, + missing_updates, + ) = await stream.get_updates_since(current_token, cmd.token) + if updates: await self.on_rdata( cmd.stream_name, @@ -255,13 +279,6 @@ class ReplicationCommandHandler: # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position(cmd.stream_name, cmd.token) - # Handle any RDATA that came in while we were catching up. - rows = self._pending_batches.pop(cmd.stream_name, []) - if rows: - await self._replication_data_handler.on_rdata( - cmd.stream_name, rows[-1].token, rows - ) - self._streams_connected.add(cmd.stream_name) async def on_SYNC(self, cmd: SyncCommand): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index c14dff6c64..f56a0fd4b5 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -168,12 +168,13 @@ def make_http_update_function( async def update_function( from_token: int, upto_token: int, limit: int ) -> Tuple[List[Tuple[int, tuple]], int, bool]: - return await client( + result = await client( stream_name=stream_name, from_token=from_token, upto_token=upto_token, limit=limit, ) + return result["updates"], result["upto_token"], result["limited"] return update_function diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py index 46f9bda773..b3faafa0a4 100644 --- a/synapse/storage/data_stores/main/push_rule.py +++ b/synapse/storage/data_stores/main/push_rule.py @@ -334,6 +334,26 @@ class PushRulesWorkerStore( results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled return results + def get_all_push_rule_updates(self, last_id, current_id, limit): + """Get all the push rules changes that have happend on the server""" + if last_id == current_id: + return defer.succeed([]) + + def get_all_push_rule_updates_txn(txn): + sql = ( + "SELECT stream_id, event_stream_ordering, user_id, rule_id," + " op, priority_class, priority, conditions, actions" + " FROM push_rules_stream" + " WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + return txn.fetchall() + + return self.db.runInteraction( + "get_all_push_rule_updates", get_all_push_rule_updates_txn + ) + class PushRuleStore(PushRulesWorkerStore): @defer.inlineCallbacks @@ -685,26 +705,6 @@ class PushRuleStore(PushRulesWorkerStore): self.push_rules_stream_cache.entity_has_changed, user_id, stream_id ) - def get_all_push_rule_updates(self, last_id, current_id, limit): - """Get all the push rules changes that have happend on the server""" - if last_id == current_id: - return defer.succeed([]) - - def get_all_push_rule_updates_txn(txn): - sql = ( - "SELECT stream_id, event_stream_ordering, user_id, rule_id," - " op, priority_class, priority, conditions, actions" - " FROM push_rules_stream" - " WHERE ? < stream_id AND stream_id <= ?" - " ORDER BY stream_id ASC LIMIT ?" - ) - txn.execute(sql, (last_id, current_id, limit)) - return txn.fetchall() - - return self.db.runInteraction( - "get_all_push_rule_updates", get_all_push_rule_updates_txn - ) - def get_push_rules_stream_token(self): """Get the position of the push rules stream. Returns a pair of a stream id for the push_rules stream and the -- cgit 1.4.1 From 2e105c156be036ebd408b8fbb87b5c218574726e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 7 Apr 2020 15:19:19 +0100 Subject: Remove sent outbound device list pokes from the database (#7192) They just get in the way. --- changelog.d/7192.misc | 1 + synapse/storage/data_stores/main/devices.py | 4 ++-- .../schema/delta/57/remove_sent_outbound_pokes.sql | 21 +++++++++++++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7192.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql (limited to 'changelog.d') diff --git a/changelog.d/7192.misc b/changelog.d/7192.misc new file mode 100644 index 0000000000..e401e36399 --- /dev/null +++ b/changelog.d/7192.misc @@ -0,0 +1 @@ +Remove sent outbound device list pokes from the database. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index dd3561e9b2..4c5bea4a5c 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -227,11 +227,11 @@ class DeviceWorkerStore(SQLBaseStore): # get the list of device updates that need to be sent sql = """ SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes - WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? + WHERE destination = ? AND ? < stream_id AND stream_id <= ? ORDER BY stream_id LIMIT ? """ - txn.execute(sql, (destination, from_stream_id, now_stream_id, False, limit)) + txn.execute(sql, (destination, from_stream_id, now_stream_id, limit)) return list(txn) diff --git a/synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql b/synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql new file mode 100644 index 0000000000..133d80af35 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql @@ -0,0 +1,21 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- we no longer keep sent outbound device pokes in the db; clear them out +-- so that we don't have to worry about them. +-- +-- This is a sequence scan, but it doesn't take too long. + +DELETE FROM device_lists_outbound_pokes WHERE sent; -- cgit 1.4.1 From ec5ac8e2b129f645a06f441143c2dcd2fb1c7037 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 7 Apr 2020 18:31:50 +0200 Subject: Fix typo in the login fallback javascript (#7235) * Fix typo in the login fallback javascript * Changelog --- changelog.d/7235.bugfix | 1 + synapse/static/client/login/js/login.js | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7235.bugfix (limited to 'changelog.d') diff --git a/changelog.d/7235.bugfix b/changelog.d/7235.bugfix new file mode 100644 index 0000000000..d185efe537 --- /dev/null +++ b/changelog.d/7235.bugfix @@ -0,0 +1 @@ +Fix a bug causing the login fallback to not display the SSO login form. diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index debe464371..5ca0317755 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -62,7 +62,7 @@ var show_login = function(inhibit_redirect) { } // Otherwise, show the SSO form - $("#sso_form").show(); + $("#sso_flow").show(); } if (matrixLogin.serverAcceptsPassword) { -- cgit 1.4.1 From bd2ea3432b617537a2596f1704de4478cda60dde Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Apr 2020 17:44:51 +0100 Subject: changelog --- changelog.d/7329.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7329.misc (limited to 'changelog.d') diff --git a/changelog.d/7329.misc b/changelog.d/7329.misc new file mode 100644 index 0000000000..676f285377 --- /dev/null +++ b/changelog.d/7329.misc @@ -0,0 +1 @@ +Move catchup of replication streams logic to worker. -- cgit 1.4.1 From d78cb31588e01468ab06a36e6120a80fb6fbf097 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Apr 2020 15:03:23 -0400 Subject: Add typing information to federation_server. (#7219) --- changelog.d/7219.misc | 1 + synapse/federation/federation_server.py | 173 ++++++++++++++++++++------------ tox.ini | 1 + 3 files changed, 109 insertions(+), 66 deletions(-) create mode 100644 changelog.d/7219.misc (limited to 'changelog.d') diff --git a/changelog.d/7219.misc b/changelog.d/7219.misc new file mode 100644 index 0000000000..4af5da8646 --- /dev/null +++ b/changelog.d/7219.misc @@ -0,0 +1 @@ +Add typing information to federation server code. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 89d521bc31..32a8a2ee46 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Dict +from typing import Any, Callable, Dict, List, Match, Optional, Tuple, Union import six from six import iteritems @@ -38,6 +38,7 @@ from synapse.api.errors import ( UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction @@ -94,7 +95,9 @@ class FederationServer(FederationBase): # come in waves. self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000) - async def on_backfill_request(self, origin, room_id, versions, limit): + async def on_backfill_request( + self, origin: str, room_id: str, versions: List[str], limit: int + ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -107,23 +110,25 @@ class FederationServer(FederationBase): return 200, res - async def on_incoming_transaction(self, origin, transaction_data): + async def on_incoming_transaction( + self, origin: str, transaction_data: JsonDict + ) -> Tuple[int, Dict[str, Any]]: # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() transaction = Transaction(**transaction_data) - if not transaction.transaction_id: + if not transaction.transaction_id: # type: ignore raise Exception("Transaction missing transaction_id") - logger.debug("[%s] Got transaction", transaction.transaction_id) + logger.debug("[%s] Got transaction", transaction.transaction_id) # type: ignore # use a linearizer to ensure that we don't process the same transaction # multiple times in parallel. with ( await self._transaction_linearizer.queue( - (origin, transaction.transaction_id) + (origin, transaction.transaction_id) # type: ignore ) ): result = await self._handle_incoming_transaction( @@ -132,31 +137,33 @@ class FederationServer(FederationBase): return result - async def _handle_incoming_transaction(self, origin, transaction, request_time): + async def _handle_incoming_transaction( + self, origin: str, transaction: Transaction, request_time: int + ) -> Tuple[int, Dict[str, Any]]: """ Process an incoming transaction and return the HTTP response Args: - origin (unicode): the server making the request - transaction (Transaction): incoming transaction - request_time (int): timestamp that the HTTP request arrived at + origin: the server making the request + transaction: incoming transaction + request_time: timestamp that the HTTP request arrived at Returns: - Deferred[(int, object)]: http response code and body + HTTP response code and body """ response = await self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( "[%s] We've already responded to this request", - transaction.transaction_id, + transaction.transaction_id, # type: ignore ) return response - logger.debug("[%s] Transaction is new", transaction.transaction_id) + logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore # Reject if PDU count > 50 or EDU count > 100 - if len(transaction.pdus) > 50 or ( - hasattr(transaction, "edus") and len(transaction.edus) > 100 + if len(transaction.pdus) > 50 or ( # type: ignore + hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore ): logger.info("Transaction PDU or EDU count too large. Returning 400") @@ -204,13 +211,13 @@ class FederationServer(FederationBase): report back to the sending server. """ - received_pdus_counter.inc(len(transaction.pdus)) + received_pdus_counter.inc(len(transaction.pdus)) # type: ignore origin_host, _ = parse_server_name(origin) - pdus_by_room = {} + pdus_by_room = {} # type: Dict[str, List[EventBase]] - for p in transaction.pdus: + for p in transaction.pdus: # type: ignore if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: @@ -254,7 +261,7 @@ class FederationServer(FederationBase): # require callouts to other servers to fetch missing events), but # impose a limit to avoid going too crazy with ram/cpu. - async def process_pdus_for_room(room_id): + async def process_pdus_for_room(room_id: str): logger.debug("Processing PDUs for %s", room_id) try: await self.check_server_matches_acl(origin_host, room_id) @@ -310,7 +317,9 @@ class FederationServer(FederationBase): TRANSACTION_CONCURRENCY_LIMIT, ) - async def on_context_state_request(self, origin, room_id, event_id): + async def on_context_state_request( + self, origin: str, room_id: str, event_id: str + ) -> Tuple[int, Dict[str, Any]]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -338,7 +347,9 @@ class FederationServer(FederationBase): return 200, resp - async def on_state_ids_request(self, origin, room_id, event_id): + async def on_state_ids_request( + self, origin: str, room_id: str, event_id: str + ) -> Tuple[int, Dict[str, Any]]: if not event_id: raise NotImplementedError("Specify an event") @@ -354,7 +365,9 @@ class FederationServer(FederationBase): return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} - async def _on_context_state_request_compute(self, room_id, event_id): + async def _on_context_state_request_compute( + self, room_id: str, event_id: str + ) -> Dict[str, list]: if event_id: pdus = await self.handler.get_state_for_pdu(room_id, event_id) else: @@ -367,7 +380,9 @@ class FederationServer(FederationBase): "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], } - async def on_pdu_request(self, origin, event_id): + async def on_pdu_request( + self, origin: str, event_id: str + ) -> Tuple[int, Union[JsonDict, str]]: pdu = await self.handler.get_persisted_pdu(origin, event_id) if pdu: @@ -375,12 +390,16 @@ class FederationServer(FederationBase): else: return 404, "" - async def on_query_request(self, query_type, args): + async def on_query_request( + self, query_type: str, args: Dict[str, str] + ) -> Tuple[int, Dict[str, Any]]: received_queries_counter.labels(query_type).inc() resp = await self.registry.on_query(query_type, args) return 200, resp - async def on_make_join_request(self, origin, room_id, user_id, supported_versions): + async def on_make_join_request( + self, origin: str, room_id: str, user_id: str, supported_versions: List[str] + ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -397,7 +416,7 @@ class FederationServer(FederationBase): async def on_invite_request( self, origin: str, content: JsonDict, room_version_id: str - ): + ) -> Dict[str, Any]: room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) if not room_version: raise SynapseError( @@ -414,7 +433,9 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} - async def on_send_join_request(self, origin, content, room_id): + async def on_send_join_request( + self, origin: str, content: JsonDict, room_id: str + ) -> Dict[str, Any]: logger.debug("on_send_join_request: content: %s", content) room_version = await self.store.get_room_version(room_id) @@ -434,7 +455,9 @@ class FederationServer(FederationBase): "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]], } - async def on_make_leave_request(self, origin, room_id, user_id): + async def on_make_leave_request( + self, origin: str, room_id: str, user_id: str + ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdu = await self.handler.on_make_leave_request(origin, room_id, user_id) @@ -444,7 +467,9 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - async def on_send_leave_request(self, origin, content, room_id): + async def on_send_leave_request( + self, origin: str, content: JsonDict, room_id: str + ) -> dict: logger.debug("on_send_leave_request: content: %s", content) room_version = await self.store.get_room_version(room_id) @@ -460,7 +485,9 @@ class FederationServer(FederationBase): await self.handler.on_send_leave_request(origin, pdu) return {} - async def on_event_auth(self, origin, room_id, event_id): + async def on_event_auth( + self, origin: str, room_id: str, event_id: str + ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -471,15 +498,21 @@ class FederationServer(FederationBase): return 200, res @log_function - def on_query_client_keys(self, origin, content): - return self.on_query_request("client_keys", content) - - async def on_query_user_devices(self, origin: str, user_id: str): + async def on_query_client_keys( + self, origin: str, content: Dict[str, str] + ) -> Tuple[int, Dict[str, Any]]: + return await self.on_query_request("client_keys", content) + + async def on_query_user_devices( + self, origin: str, user_id: str + ) -> Tuple[int, Dict[str, Any]]: keys = await self.device_handler.on_federation_query_user_devices(user_id) return 200, keys @trace - async def on_claim_client_keys(self, origin, content): + async def on_claim_client_keys( + self, origin: str, content: JsonDict + ) -> Dict[str, Any]: query = [] for user_id, device_keys in content.get("one_time_keys", {}).items(): for device_id, algorithm in device_keys.items(): @@ -488,7 +521,7 @@ class FederationServer(FederationBase): log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self.store.claim_e2e_one_time_keys(query) - json_result = {} + json_result = {} # type: Dict[str, Dict[str, dict]] for user_id, device_keys in results.items(): for device_id, keys in device_keys.items(): for key_id, json_bytes in keys.items(): @@ -511,8 +544,13 @@ class FederationServer(FederationBase): return {"one_time_keys": json_result} async def on_get_missing_events( - self, origin, room_id, earliest_events, latest_events, limit - ): + self, + origin: str, + room_id: str, + earliest_events: List[str], + latest_events: List[str], + limit: int, + ) -> Dict[str, list]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -541,11 +579,11 @@ class FederationServer(FederationBase): return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function - def on_openid_userinfo(self, token): + async def on_openid_userinfo(self, token: str) -> Optional[str]: ts_now_ms = self._clock.time_msec() - return self.store.get_user_id_for_open_id_token(token, ts_now_ms) + return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) - def _transaction_from_pdus(self, pdu_list): + def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction: """Returns a new Transaction containing the given PDUs suitable for transmission. """ @@ -558,7 +596,7 @@ class FederationServer(FederationBase): destination=None, ) - async def _handle_received_pdu(self, origin, pdu): + async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None: """ Process a PDU received in a federation /send/ transaction. If the event is invalid, then this method throws a FederationError. @@ -579,10 +617,8 @@ class FederationServer(FederationBase): until we try to backfill across the discontinuity. Args: - origin (str): server which sent the pdu - pdu (FrozenEvent): received pdu - - Returns (Deferred): completes with None + origin: server which sent the pdu + pdu: received pdu Raises: FederationError if the signatures / hash do not match, or if the event was unacceptable for any other reason (eg, too large, @@ -625,25 +661,27 @@ class FederationServer(FederationBase): return "" % self.server_name async def exchange_third_party_invite( - self, sender_user_id, target_user_id, room_id, signed + self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict ): ret = await self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) return ret - async def on_exchange_third_party_invite_request(self, room_id, event_dict): + async def on_exchange_third_party_invite_request( + self, room_id: str, event_dict: Dict + ): ret = await self.handler.on_exchange_third_party_invite_request( room_id, event_dict ) return ret - async def check_server_matches_acl(self, server_name, room_id): + async def check_server_matches_acl(self, server_name: str, room_id: str): """Check if the given server is allowed by the server ACLs in the room Args: - server_name (str): name of server, *without any port part* - room_id (str): ID of the room to check + server_name: name of server, *without any port part* + room_id: ID of the room to check Raises: AuthError if the server does not match the ACL @@ -661,15 +699,15 @@ class FederationServer(FederationBase): raise AuthError(code=403, msg="Server is banned from room") -def server_matches_acl_event(server_name, acl_event): +def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: """Check if the given server is allowed by the ACL event Args: - server_name (str): name of server, without any port part - acl_event (EventBase): m.room.server_acl event + server_name: name of server, without any port part + acl_event: m.room.server_acl event Returns: - bool: True if this server is allowed by the ACLs + True if this server is allowed by the ACLs """ logger.debug("Checking %s against acl %s", server_name, acl_event.content) @@ -713,7 +751,7 @@ def server_matches_acl_event(server_name, acl_event): return False -def _acl_entry_matches(server_name, acl_entry): +def _acl_entry_matches(server_name: str, acl_entry: str) -> Match: if not isinstance(acl_entry, six.string_types): logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) @@ -732,13 +770,13 @@ class FederationHandlerRegistry(object): self.edu_handlers = {} self.query_handlers = {} - def register_edu_handler(self, edu_type, handler): + def register_edu_handler(self, edu_type: str, handler: Callable[[str, dict], None]): """Sets the handler callable that will be used to handle an incoming federation EDU of the given type. Args: - edu_type (str): The type of the incoming EDU to register handler for - handler (Callable[[str, dict]]): A callable invoked on incoming EDU + edu_type: The type of the incoming EDU to register handler for + handler: A callable invoked on incoming EDU of the given type. The arguments are the origin server name and the EDU contents. """ @@ -749,14 +787,16 @@ class FederationHandlerRegistry(object): self.edu_handlers[edu_type] = handler - def register_query_handler(self, query_type, handler): + def register_query_handler( + self, query_type: str, handler: Callable[[dict], defer.Deferred] + ): """Sets the handler callable that will be used to handle an incoming federation query of the given type. Args: - query_type (str): Category name of the query, which should match + query_type: Category name of the query, which should match the string used by make_query. - handler (Callable[[dict], Deferred[dict]]): Invoked to handle + handler: Invoked to handle incoming queries of this type. The return will be yielded on and the result used as the response to the query request. """ @@ -767,10 +807,11 @@ class FederationHandlerRegistry(object): self.query_handlers[query_type] = handler - async def on_edu(self, edu_type, origin, content): + async def on_edu(self, edu_type: str, origin: str, content: dict): handler = self.edu_handlers.get(edu_type) if not handler: logger.warning("No handler registered for EDU type %s", edu_type) + return with start_active_span_from_edu(content, "handle_edu"): try: @@ -780,7 +821,7 @@ class FederationHandlerRegistry(object): except Exception: logger.exception("Failed to handle edu %r", edu_type) - def on_query(self, query_type, args): + def on_query(self, query_type: str, args: dict) -> defer.Deferred: handler = self.query_handlers.get(query_type) if not handler: logger.warning("No handler registered for query type %s", query_type) @@ -807,7 +848,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): super(ReplicationFederationHandlerRegistry, self).__init__() - async def on_edu(self, edu_type, origin, content): + async def on_edu(self, edu_type: str, origin: str, content: dict): """Overrides FederationHandlerRegistry """ if not self.config.use_presence and edu_type == "m.presence": @@ -821,7 +862,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): return await self._send_edu(edu_type=edu_type, origin=origin, content=content) - async def on_query(self, query_type, args): + async def on_query(self, query_type: str, args: dict): """Overrides FederationHandlerRegistry """ handler = self.query_handlers.get(query_type) diff --git a/tox.ini b/tox.ini index a79fc93b57..763c8463d9 100644 --- a/tox.ini +++ b/tox.ini @@ -183,6 +183,7 @@ commands = mypy \ synapse/events/spamcheck.py \ synapse/federation/federation_base.py \ synapse/federation/federation_client.py \ + synapse/federation/federation_server.py \ synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/auth.py \ -- cgit 1.4.1 From 1722b8a527b8caa0f76706bf4acaf240e167daf4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Apr 2020 16:56:34 -0400 Subject: Convert delete_url_cache_media to async/await. (#7241) --- changelog.d/7241.misc | 1 + synapse/storage/data_stores/main/media_repository.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7241.misc (limited to 'changelog.d') diff --git a/changelog.d/7241.misc b/changelog.d/7241.misc new file mode 100644 index 0000000000..fac5bc0403 --- /dev/null +++ b/changelog.d/7241.misc @@ -0,0 +1 @@ +Convert some of synapse.rest.media to async/await. diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index cf195f8aa6..8aecd414c2 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -367,7 +367,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "get_url_cache_media_before", _get_url_cache_media_before_txn ) - def delete_url_cache_media(self, media_ids): + async def delete_url_cache_media(self, media_ids): if len(media_ids) == 0: return @@ -380,6 +380,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): txn.executemany(sql, [(media_id,) for media_id in media_ids]) - return self.db.runInteraction( + return await self.db.runInteraction( "delete_url_cache_media", _delete_url_cache_media_txn ) -- cgit 1.4.1 From f31e65a749f84f8b3278c91784509d908d4fb342 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 7 Apr 2020 23:06:39 +0100 Subject: bg update to clear out duplicate outbound_device_list_pokes (#7193) We seem to have some duplicates, which could do with being cleared out. --- changelog.d/7193.misc | 1 + synapse/storage/data_stores/main/client_ips.py | 16 ++--- synapse/storage/data_stores/main/devices.py | 73 ++++++++++++++++++- .../delta/58/02remove_dup_outbound_pokes.sql | 22 ++++++ synapse/storage/database.py | 83 +++++++++++++++++++++- tests/storage/test_database.py | 52 ++++++++++++++ 6 files changed, 234 insertions(+), 13 deletions(-) create mode 100644 changelog.d/7193.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql create mode 100644 tests/storage/test_database.py (limited to 'changelog.d') diff --git a/changelog.d/7193.misc b/changelog.d/7193.misc new file mode 100644 index 0000000000..383a738e64 --- /dev/null +++ b/changelog.d/7193.misc @@ -0,0 +1 @@ +Add a background database update job to clear out duplicate `device_lists_outbound_pokes`. diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py index e1ccb27142..92bc06919b 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py @@ -21,7 +21,7 @@ from twisted.internet import defer from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore -from synapse.storage.database import Database +from synapse.storage.database import Database, make_tuple_comparison_clause from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches.descriptors import Cache @@ -303,16 +303,10 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): # we'll just end up updating the same device row multiple # times, which is fine. - if self.database_engine.supports_tuple_comparison: - where_clause = "(user_id, device_id) > (?, ?)" - where_args = [last_user_id, last_device_id] - else: - # We explicitly do a `user_id >= ? AND (...)` here to ensure - # that an index is used, as doing `user_id > ? OR (user_id = ? AND ...)` - # makes it hard for query optimiser to tell that it can use the - # index on user_id - where_clause = "user_id >= ? AND (user_id > ? OR device_id > ?)" - where_args = [last_user_id, last_user_id, last_device_id] + where_clause, where_args = make_tuple_comparison_clause( + self.database_engine, + [("user_id", last_user_id), ("device_id", last_device_id)], + ) sql = """ SELECT diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 4c5bea4a5c..ee3a2ab031 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -32,7 +32,11 @@ from synapse.logging.opentracing import ( ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause -from synapse.storage.database import Database, LoggingTransaction +from synapse.storage.database import ( + Database, + LoggingTransaction, + make_tuple_comparison_clause, +) from synapse.types import Collection, get_verify_key_from_cross_signing_key from synapse.util.caches.descriptors import ( Cache, @@ -49,6 +53,8 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( "drop_device_list_streams_non_unique_indexes" ) +BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes" + class DeviceWorkerStore(SQLBaseStore): def get_device(self, user_id, device_id): @@ -714,6 +720,11 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): self._drop_device_list_streams_non_unique_indexes, ) + # clear out duplicate device list outbound pokes + self.db.updates.register_background_update_handler( + BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, self._remove_duplicate_outbound_pokes, + ) + @defer.inlineCallbacks def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size): def f(conn): @@ -728,6 +739,66 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): ) return 1 + async def _remove_duplicate_outbound_pokes(self, progress, batch_size): + # for some reason, we have accumulated duplicate entries in + # device_lists_outbound_pokes, which makes prune_outbound_device_list_pokes less + # efficient. + # + # For each duplicate, we delete all the existing rows and put one back. + + KEY_COLS = ["stream_id", "destination", "user_id", "device_id"] + last_row = progress.get( + "last_row", + {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""}, + ) + + def _txn(txn): + clause, args = make_tuple_comparison_clause( + self.db.engine, [(x, last_row[x]) for x in KEY_COLS] + ) + sql = """ + SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts + FROM device_lists_outbound_pokes + WHERE %s + GROUP BY %s + HAVING count(*) > 1 + ORDER BY %s + LIMIT ? + """ % ( + clause, # WHERE + ",".join(KEY_COLS), # GROUP BY + ",".join(KEY_COLS), # ORDER BY + ) + txn.execute(sql, args + [batch_size]) + rows = self.db.cursor_to_dict(txn) + + row = None + for row in rows: + self.db.simple_delete_txn( + txn, "device_lists_outbound_pokes", {x: row[x] for x in KEY_COLS}, + ) + + row["sent"] = False + self.db.simple_insert_txn( + txn, "device_lists_outbound_pokes", row, + ) + + if row: + self.db.updates._background_update_progress_txn( + txn, BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, {"last_row": row}, + ) + + return len(rows) + + rows = await self.db.runInteraction(BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, _txn) + + if not rows: + await self.db.updates._end_background_update( + BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES + ) + + return rows + class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): def __init__(self, database: Database, db_conn, hs): diff --git a/synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql b/synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql new file mode 100644 index 0000000000..fdc39e9ba5 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql @@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + /* for some reason, we have accumulated duplicate entries in + * device_lists_outbound_pokes, which makes prune_outbound_device_list_pokes less + * efficient. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) + VALUES (5800, 'remove_dup_outbound_pokes', '{}'); diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 715c0346dd..a7cd97b0b0 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -17,7 +17,17 @@ import logging import time from time import monotonic as monotonic_time -from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + TypeVar, +) from six import iteritems, iterkeys, itervalues from six.moves import intern, range @@ -1557,3 +1567,74 @@ def make_in_list_sql_clause( return "%s = ANY(?)" % (column,), [list(iterable)] else: return "%s IN (%s)" % (column, ",".join("?" for _ in iterable)), list(iterable) + + +KV = TypeVar("KV") + + +def make_tuple_comparison_clause( + database_engine: BaseDatabaseEngine, keys: List[Tuple[str, KV]] +) -> Tuple[str, List[KV]]: + """Returns a tuple comparison SQL clause + + Depending what the SQL engine supports, builds a SQL clause that looks like either + "(a, b) > (?, ?)", or "(a > ?) OR (a == ? AND b > ?)". + + Args: + database_engine + keys: A set of (column, value) pairs to be compared. + + Returns: + A tuple of SQL query and the args + """ + if database_engine.supports_tuple_comparison: + return ( + "(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)), + [k[1] for k in keys], + ) + + # we want to build a clause + # (a > ?) OR + # (a == ? AND b > ?) OR + # (a == ? AND b == ? AND c > ?) + # ... + # (a == ? AND b == ? AND ... AND z > ?) + # + # or, equivalently: + # + # (a > ? OR (a == ? AND + # (b > ? OR (b == ? AND + # ... + # (y > ? OR (y == ? AND + # z > ? + # )) + # ... + # )) + # )) + # + # which itself is equivalent to (and apparently easier for the query optimiser): + # + # (a >= ? AND (a > ? OR + # (b >= ? AND (b > ? OR + # ... + # (y >= ? AND (y > ? OR + # z > ? + # )) + # ... + # )) + # )) + # + # + + clause = "" + args = [] # type: List[KV] + for k, v in keys[:-1]: + clause = clause + "(%s >= ? AND (%s > ? OR " % (k, k) + args.extend([v, v]) + + (k, v) = keys[-1] + clause += "%s > ?" % (k,) + args.append(v) + + clause += "))" * (len(keys) - 1) + return clause, args diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py new file mode 100644 index 0000000000..5a77c84962 --- /dev/null +++ b/tests/storage/test_database.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.storage.database import make_tuple_comparison_clause +from synapse.storage.engines import BaseDatabaseEngine + +from tests import unittest + + +def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: + # returns a DatabaseEngine, circumventing the abc mechanism + # any kwargs are set as attributes on the class before instantiating it + t = type( + "TestBaseDatabaseEngine", + (BaseDatabaseEngine,), + dict(BaseDatabaseEngine.__dict__), + ) + # defeat the abc mechanism + t.__abstractmethods__ = set() + for k, v in kwargs.items(): + setattr(t, k, v) + return t(None, None) + + +class TupleComparisonClauseTestCase(unittest.TestCase): + def test_native_tuple_comparison(self): + db_engine = _stub_db_engine(supports_tuple_comparison=True) + clause, args = make_tuple_comparison_clause(db_engine, [("a", 1), ("b", 2)]) + self.assertEqual(clause, "(a,b) > (?,?)") + self.assertEqual(args, [1, 2]) + + def test_emulated_tuple_comparison(self): + db_engine = _stub_db_engine(supports_tuple_comparison=False) + clause, args = make_tuple_comparison_clause( + db_engine, [("a", 1), ("b", 2), ("c", 3)] + ) + self.assertEqual( + clause, "(a >= ? AND (a > ? OR (b >= ? AND (b > ? OR c > ?))))" + ) + self.assertEqual(args, [1, 1, 2, 2, 3]) -- cgit 1.4.1 From 29b7e22b939c473649c8619fdfbecec0cee6b029 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 8 Apr 2020 00:46:50 +0100 Subject: Add documentation to password_providers config option (#7238) --- changelog.d/7238.doc | 1 + docs/password_auth_providers.md | 5 ++++- docs/sample_config.yaml | 14 +++++++++++++- synapse/config/password_auth_providers.py | 16 ++++++++++++++-- 4 files changed, 32 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7238.doc (limited to 'changelog.d') diff --git a/changelog.d/7238.doc b/changelog.d/7238.doc new file mode 100644 index 0000000000..0e3b4be428 --- /dev/null +++ b/changelog.d/7238.doc @@ -0,0 +1 @@ +Add documentation to the `password_providers` config option. Add known password provider implementations to docs. \ No newline at end of file diff --git a/docs/password_auth_providers.md b/docs/password_auth_providers.md index 0db1a3804a..96f9841b7a 100644 --- a/docs/password_auth_providers.md +++ b/docs/password_auth_providers.md @@ -9,7 +9,10 @@ into Synapse, and provides a number of methods by which it can integrate with the authentication system. This document serves as a reference for those looking to implement their -own password auth providers. +own password auth providers. Additionally, here is a list of known +password auth provider module implementations: + +* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/) ## Required methods diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index be742969cc..3417813750 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1657,7 +1657,19 @@ email: #template_dir: "res/templates" -#password_providers: +# Password providers allow homeserver administrators to integrate +# their Synapse installation with existing authentication methods +# ex. LDAP, external tokens, etc. +# +# For more information and known implementations, please see +# https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md +# +# Note: instances wishing to use SAML or CAS authentication should +# instead use the `saml2_config` or `cas_config` options, +# respectively. +# +password_providers: +# # Example config for an LDAP auth provider # - module: "ldap_auth_provider.LdapAuthProvider" # config: # enabled: true diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 9746bbc681..4fda8ae987 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -35,7 +35,7 @@ class PasswordAuthProviderConfig(Config): if ldap_config.get("enabled", False): providers.append({"module": LDAP_PROVIDER, "config": ldap_config}) - providers.extend(config.get("password_providers", [])) + providers.extend(config.get("password_providers") or []) for provider in providers: mod_name = provider["module"] @@ -52,7 +52,19 @@ class PasswordAuthProviderConfig(Config): def generate_config_section(self, **kwargs): return """\ - #password_providers: + # Password providers allow homeserver administrators to integrate + # their Synapse installation with existing authentication methods + # ex. LDAP, external tokens, etc. + # + # For more information and known implementations, please see + # https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md + # + # Note: instances wishing to use SAML or CAS authentication should + # instead use the `saml2_config` or `cas_config` options, + # respectively. + # + password_providers: + # # Example config for an LDAP auth provider # - module: "ldap_auth_provider.LdapAuthProvider" # config: # enabled: true -- cgit 1.4.1 From c11d24d48c2f0b0b57d70087e5659290b9ddd154 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 8 Apr 2020 11:59:51 +0200 Subject: Fix changelog for #7235 --- changelog.d/7235.bugfix | 1 - changelog.d/7235.feature | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/7235.bugfix create mode 100644 changelog.d/7235.feature (limited to 'changelog.d') diff --git a/changelog.d/7235.bugfix b/changelog.d/7235.bugfix deleted file mode 100644 index d185efe537..0000000000 --- a/changelog.d/7235.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing the login fallback to not display the SSO login form. diff --git a/changelog.d/7235.feature b/changelog.d/7235.feature new file mode 100644 index 0000000000..fafa79c7e7 --- /dev/null +++ b/changelog.d/7235.feature @@ -0,0 +1 @@ +Improve the support for SSO authentication on the login fallback page. -- cgit 1.4.1 From cae412148483763a108c3dd797c92ad89f5c1568 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 8 Apr 2020 11:59:26 +0100 Subject: Make systemd-with-workers doc official (#7234) Simplify and update this documentation, and make it part of the core dist. --- changelog.d/7234.doc | 1 + contrib/systemd-with-workers/README.md | 152 +-------------------- .../system/matrix-synapse-worker@.service | 19 --- .../system/matrix-synapse.service | 18 --- contrib/systemd-with-workers/system/matrix.target | 7 - .../workers/federation_reader.yaml | 14 -- docs/systemd-with-workers/README.md | 67 +++++++++ .../system/matrix-synapse-worker@.service | 20 +++ .../system/matrix-synapse.service | 21 +++ .../system/matrix-synapse.target | 6 + .../workers/federation_reader.yaml | 13 ++ docs/workers.md | 48 +++++-- 12 files changed, 163 insertions(+), 223 deletions(-) create mode 100644 changelog.d/7234.doc delete mode 100644 contrib/systemd-with-workers/system/matrix-synapse-worker@.service delete mode 100644 contrib/systemd-with-workers/system/matrix-synapse.service delete mode 100644 contrib/systemd-with-workers/system/matrix.target delete mode 100644 contrib/systemd-with-workers/workers/federation_reader.yaml create mode 100644 docs/systemd-with-workers/README.md create mode 100644 docs/systemd-with-workers/system/matrix-synapse-worker@.service create mode 100644 docs/systemd-with-workers/system/matrix-synapse.service create mode 100644 docs/systemd-with-workers/system/matrix-synapse.target create mode 100644 docs/systemd-with-workers/workers/federation_reader.yaml (limited to 'changelog.d') diff --git a/changelog.d/7234.doc b/changelog.d/7234.doc new file mode 100644 index 0000000000..d284f1422b --- /dev/null +++ b/changelog.d/7234.doc @@ -0,0 +1 @@ +Update the contributed documentation on managing synapse workers with systemd, and bring it into the core distribution. diff --git a/contrib/systemd-with-workers/README.md b/contrib/systemd-with-workers/README.md index 74b261e9fb..8d21d532bd 100644 --- a/contrib/systemd-with-workers/README.md +++ b/contrib/systemd-with-workers/README.md @@ -1,150 +1,2 @@ -# Setup Synapse with Workers and Systemd - -This is a setup for managing synapse with systemd including support for -managing workers. It provides a `matrix-synapse`, as well as a -`matrix-synapse-worker@` service for any workers you require. Additionally to -group the required services it sets up a `matrix.target`. You can use this to -automatically start any bot- or bridge-services. More on this in -[Bots and Bridges](#bots-and-bridges). - -See the folder [system](system) for any service and target files. - -The folder [workers](workers) contains an example configuration for the -`federation_reader` worker. Pay special attention to the name of the -configuration file. In order to work with the `matrix-synapse-worker@.service` -service, it needs to have the exact same name as the worker app. - -This setup expects neither the homeserver nor any workers to fork. Forking is -handled by systemd. - -## Setup - -1. Adjust your matrix configs. Make sure that the worker config files have the -exact same name as the worker app. Compare `matrix-synapse-worker@.service` for -why. You can find an example worker config in the [workers](workers) folder. See -below for relevant settings in the `homeserver.yaml`. -2. Copy the `*.service` and `*.target` files in [system](system) to -`/etc/systemd/system`. -3. `systemctl enable matrix-synapse.service` this adds the homeserver -app to the `matrix.target` -4. *Optional.* `systemctl enable -matrix-synapse-worker@federation_reader.service` this adds the federation_reader -app to the `matrix-synapse.service` -5. *Optional.* Repeat step 4 for any additional workers you require. -6. *Optional.* Add any bots or bridges by enabling them. -7. Start all matrix related services via `systemctl start matrix.target` -8. *Optional.* Enable autostart of all matrix related services on system boot -via `systemctl enable matrix.target` - -## Usage - -After you have setup you can use the following commands to manage your synapse -installation: - -``` -# Start matrix-synapse, all workers and any enabled bots or bridges. -systemctl start matrix.target - -# Restart matrix-synapse and all workers (not necessarily restarting bots -# or bridges, see "Bots and Bridges") -systemctl restart matrix-synapse.service - -# Stop matrix-synapse and all workers (not necessarily restarting bots -# or bridges, see "Bots and Bridges") -systemctl stop matrix-synapse.service - -# Restart a specific worker (i. e. federation_reader), the homeserver is -# unaffected by this. -systemctl restart matrix-synapse-worker@federation_reader.service - -# Add a new worker (assuming all configs are setup already) -systemctl enable matrix-synapse-worker@federation_writer.service -systemctl restart matrix-synapse.service -``` - -## The Configs - -Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork. - -``` -worker_app: synapse.app.homeserver -daemonize: false -``` - -None of the workers should fork, as forking is handled by systemd. Hence make -sure this is present in all worker config files. - -``` -worker_daemonize: false -``` - -The config files of all workers are expected to be located in -`/etc/matrix-synapse/workers`. If you want to use a different location you have -to edit the provided `*.service` files accordingly. - -## Bots and Bridges - -Most bots and bridges do not care if the homeserver goes down or is restarted. -Depending on the implementation this may crash them though. So look up the docs -or ask the community of the specific bridge or bot you want to run to make sure -you choose the correct setup. - -Whichever configuration you choose, after the setup the following will enable -automatically starting (and potentially restarting) your bot/bridge with the -`matrix.target`. - -``` -systemctl enable .service -``` - -**Note** that from an inactive synapse the bots/bridges will only be started with -synapse if you start the `matrix.target`, not if you start the -`matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service` -as *just* synapse, but `matrix.target` being anything matrix related, including -synapse and any and all enabled bots and bridges. - -### Start with synapse but ignore synapse going down - -If the bridge can handle shutdowns of the homeserver you'll want to install the -service in the `matrix.target` and optionally add a -`After=matrix-synapse.service` dependency to have the bot/bridge start after -synapse on starting everything. - -In this case the service file should look like this. - -``` -[Unit] -# ... -# Optional, this will only ensure that if you start everything, synapse will -# be started before the bot/bridge will be started. -After=matrix-synapse.service - -[Service] -# ... - -[Install] -WantedBy=matrix.target -``` - -### Stop/restart when synapse stops/restarts - -If the bridge can't handle shutdowns of the homeserver you'll still want to -install the service in the `matrix.target` but also have to specify the -`After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service` -dependencies to have the bot/bridge stop/restart with synapse. - -In this case the service file should look like this. - -``` -[Unit] -# ... -# Mandatory -After=matrix-synapse.service -BindsTo=matrix-synapse.service - -[Service] -# ... - -[Install] -WantedBy=matrix.target -``` +The documentation for using systemd to manage synapse workers is now part of +the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers). diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service deleted file mode 100644 index 3507e2e989..0000000000 --- a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service +++ /dev/null @@ -1,19 +0,0 @@ -[Unit] -Description=Synapse Matrix Worker -After=matrix-synapse.service -BindsTo=matrix-synapse.service - -[Service] -Type=notify -NotifyAccess=main -User=matrix-synapse -WorkingDirectory=/var/lib/matrix-synapse -EnvironmentFile=/etc/default/matrix-synapse -ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml -ExecReload=/bin/kill -HUP $MAINPID -Restart=always -RestartSec=3 -SyslogIdentifier=matrix-synapse-%i - -[Install] -WantedBy=matrix-synapse.service diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service deleted file mode 100644 index 68e8991f18..0000000000 --- a/contrib/systemd-with-workers/system/matrix-synapse.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Synapse Matrix Homeserver - -[Service] -Type=notify -NotifyAccess=main -User=matrix-synapse -WorkingDirectory=/var/lib/matrix-synapse -EnvironmentFile=/etc/default/matrix-synapse -ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys -ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ -ExecReload=/bin/kill -HUP $MAINPID -Restart=always -RestartSec=3 -SyslogIdentifier=matrix-synapse - -[Install] -WantedBy=matrix.target diff --git a/contrib/systemd-with-workers/system/matrix.target b/contrib/systemd-with-workers/system/matrix.target deleted file mode 100644 index aff97d03ef..0000000000 --- a/contrib/systemd-with-workers/system/matrix.target +++ /dev/null @@ -1,7 +0,0 @@ -[Unit] -Description=Contains matrix services like synapse, bridges and bots -After=network.target -AllowIsolate=no - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd-with-workers/workers/federation_reader.yaml b/contrib/systemd-with-workers/workers/federation_reader.yaml deleted file mode 100644 index 47c54ec0d4..0000000000 --- a/contrib/systemd-with-workers/workers/federation_reader.yaml +++ /dev/null @@ -1,14 +0,0 @@ -worker_app: synapse.app.federation_reader - -worker_replication_host: 127.0.0.1 -worker_replication_port: 9092 -worker_replication_http_port: 9093 - -worker_listeners: - - type: http - port: 8011 - resources: - - names: [federation] - -worker_daemonize: false -worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml diff --git a/docs/systemd-with-workers/README.md b/docs/systemd-with-workers/README.md new file mode 100644 index 0000000000..257c09446f --- /dev/null +++ b/docs/systemd-with-workers/README.md @@ -0,0 +1,67 @@ +# Setting up Synapse with Workers and Systemd + +This is a setup for managing synapse with systemd, including support for +managing workers. It provides a `matrix-synapse` service for the master, as +well as a `matrix-synapse-worker@` service template for any workers you +require. Additionally, to group the required services, it sets up a +`matrix-synapse.target`. + +See the folder [system](system) for the systemd unit files. + +The folder [workers](workers) contains an example configuration for the +`federation_reader` worker. + +## Synapse configuration files + +See [workers.md](../workers.md) for information on how to set up the +configuration files and reverse-proxy correctly. You can find an example worker +config in the [workers](workers) folder. + +Systemd manages daemonization itself, so ensure that none of the configuration +files set either `daemonize` or `worker_daemonize`. + +The config files of all workers are expected to be located in +`/etc/matrix-synapse/workers`. If you want to use a different location, edit +the provided `*.service` files accordingly. + +There is no need for a separate configuration file for the master process. + +## Set up + +1. Adjust synapse configuration files as above. +1. Copy the `*.service` and `*.target` files in [system](system) to +`/etc/systemd/system`. +1. Run `systemctl deamon-reload` to tell systemd to load the new unit files. +1. Run `systemctl enable matrix-synapse.service`. This will configure the +synapse master process to be started as part of the `matrix-synapse.target` +target. +1. For each worker process to be enabled, run `systemctl enable +matrix-synapse-worker@.service`. For each ``, there +should be a corresponding configuration file +`/etc/matrix-synapse/workers/.yaml`. +1. Start all the synapse processes with `systemctl start matrix-synapse.target`. +1. Tell systemd to start synapse on boot with `systemctl enable matrix-synapse.target`/ + +## Usage + +Once the services are correctly set up, you can use the following commands +to manage your synapse installation: + +```sh +# Restart Synapse master and all workers +systemctl restart matrix-synapse.target + +# Stop Synapse and all workers +systemctl stop matrix-synapse.target + +# Restart the master alone +systemctl start matrix-synapse.service + +# Restart a specific worker (eg. federation_reader); the master is +# unaffected by this. +systemctl restart matrix-synapse-worker@federation_reader.service + +# Add a new worker (assuming all configs are set up already) +systemctl enable matrix-synapse-worker@federation_writer.service +systemctl restart matrix-synapse.target +``` diff --git a/docs/systemd-with-workers/system/matrix-synapse-worker@.service b/docs/systemd-with-workers/system/matrix-synapse-worker@.service new file mode 100644 index 0000000000..70589a7a51 --- /dev/null +++ b/docs/systemd-with-workers/system/matrix-synapse-worker@.service @@ -0,0 +1,20 @@ +[Unit] +Description=Synapse %i + +# This service should be restarted when the synapse target is restarted. +PartOf=matrix-synapse.target + +[Service] +Type=notify +NotifyAccess=main +User=matrix-synapse +WorkingDirectory=/var/lib/matrix-synapse +EnvironmentFile=/etc/default/matrix-synapse +ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +SyslogIdentifier=matrix-synapse-%i + +[Install] +WantedBy=matrix-synapse.target diff --git a/docs/systemd-with-workers/system/matrix-synapse.service b/docs/systemd-with-workers/system/matrix-synapse.service new file mode 100644 index 0000000000..c7b5ddfa49 --- /dev/null +++ b/docs/systemd-with-workers/system/matrix-synapse.service @@ -0,0 +1,21 @@ +[Unit] +Description=Synapse master + +# This service should be restarted when the synapse target is restarted. +PartOf=matrix-synapse.target + +[Service] +Type=notify +NotifyAccess=main +User=matrix-synapse +WorkingDirectory=/var/lib/matrix-synapse +EnvironmentFile=/etc/default/matrix-synapse +ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys +ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +SyslogIdentifier=matrix-synapse + +[Install] +WantedBy=matrix-synapse.target diff --git a/docs/systemd-with-workers/system/matrix-synapse.target b/docs/systemd-with-workers/system/matrix-synapse.target new file mode 100644 index 0000000000..e0eba1b342 --- /dev/null +++ b/docs/systemd-with-workers/system/matrix-synapse.target @@ -0,0 +1,6 @@ +[Unit] +Description=Synapse parent target +After=network.target + +[Install] +WantedBy=multi-user.target diff --git a/docs/systemd-with-workers/workers/federation_reader.yaml b/docs/systemd-with-workers/workers/federation_reader.yaml new file mode 100644 index 0000000000..5b65c7040d --- /dev/null +++ b/docs/systemd-with-workers/workers/federation_reader.yaml @@ -0,0 +1,13 @@ +worker_app: synapse.app.federation_reader + +worker_replication_host: 127.0.0.1 +worker_replication_port: 9092 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8011 + resources: + - names: [federation] + +worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml diff --git a/docs/workers.md b/docs/workers.md index cf460283d5..2ce2259b22 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -52,24 +52,20 @@ synapse process.) You then create a set of configs for the various worker processes. These should be worker configuration files, and should be stored in a dedicated -subdirectory, to allow synctl to manipulate them. An additional configuration -for the master synapse process will need to be created because the process will -not be started automatically. That configuration should look like this: - - worker_app: synapse.app.homeserver - daemonize: true +subdirectory, to allow synctl to manipulate them. Each worker configuration file inherits the configuration of the main homeserver configuration file. You can then override configuration specific to that worker, e.g. the HTTP listener that it provides (if any); logging configuration; etc. You should minimise the number of overrides though to maintain a usable config. -You must specify the type of worker application (`worker_app`). The currently -available worker applications are listed below. You must also specify the -replication endpoints that it's talking to on the main synapse process. -`worker_replication_host` should specify the host of the main synapse, -`worker_replication_port` should point to the TCP replication listener port and -`worker_replication_http_port` should point to the HTTP replication port. +In the config file for each worker, you must specify the type of worker +application (`worker_app`). The currently available worker applications are +listed below. You must also specify the replication endpoints that it's talking +to on the main synapse process. `worker_replication_host` should specify the +host of the main synapse, `worker_replication_port` should point to the TCP +replication listener port and `worker_replication_http_port` should point to +the HTTP replication port. Currently, the `event_creator` and `federation_reader` workers require specifying `worker_replication_http_port`. @@ -90,8 +86,6 @@ For instance: - names: - client - worker_daemonize: True - worker_pid_file: /home/matrix/synapse/synchrotron.pid worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml ...is a full configuration for a synchrotron worker instance, which will expose a @@ -101,7 +95,31 @@ by the main synapse. Obviously you should configure your reverse-proxy to route the relevant endpoints to the worker (`localhost:8083` in the above example). -Finally, to actually run your worker-based synapse, you must pass synctl the -a +Finally, you need to start your worker processes. This can be done with either +`synctl` or your distribution's preferred service manager such as `systemd`. We +recommend the use of `systemd` where available: for information on setting up +`systemd` to start synapse workers, see +[systemd-with-workers](systemd-with-workers). To use `synctl`, see below. + +### Using synctl + +If you want to use `synctl` to manage your synapse processes, you will need to +create an an additional configuration file for the master synapse process. That +configuration should look like this: + +```yaml +worker_app: synapse.app.homeserver +``` + +Additionally, each worker app must be configured with the name of a "pid file", +to which it will write its process ID when it starts. For example, for a +synchrotron, you might write: + +```yaml +worker_pid_file: /home/matrix/synapse/synchrotron.pid +``` + +Finally, to actually run your worker-based synapse, you must pass synctl the `-a` commandline option to tell it to operate on all the worker configurations found in the given directory, e.g.: -- cgit 1.4.1 From 23f8d285ebd36d4091c2a03831d0b7c825f12e7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 8 Apr 2020 11:59:47 +0100 Subject: Remove redundant checks on `daemonize` from synctl (#7233) We pass --daemonize on the commandline, which (since at least #4853) overrides whatever the config file, so there is no need for it to be set in the config file. --- changelog.d/7233.misc | 1 + synctl | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 changelog.d/7233.misc (limited to 'changelog.d') diff --git a/changelog.d/7233.misc b/changelog.d/7233.misc new file mode 100644 index 0000000000..d9ad582726 --- /dev/null +++ b/changelog.d/7233.misc @@ -0,0 +1 @@ +Remove redundant checks on `daemonize` from synctl. diff --git a/synctl b/synctl index 45acece30b..bbccd05290 100755 --- a/synctl +++ b/synctl @@ -117,7 +117,17 @@ def start_worker(app: str, configfile: str, worker_configfile: str) -> bool: False if there was an error starting the process """ - args = [sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile] + args = [ + sys.executable, + "-B", + "-m", + app, + "-c", + configfile, + "-c", + worker_configfile, + "--daemonize", + ] try: subprocess.check_call(args) @@ -266,9 +276,6 @@ def main(): worker_cache_factors = ( worker_config.get("synctl_cache_factors") or cache_factors ) - daemonize = worker_config.get("daemonize") or config.get("daemonize") - assert daemonize, "Main process must have daemonize set to true" - # The master process doesn't support using worker_* config. for key in worker_config: if key == "worker_app": # But we allow worker_app @@ -278,11 +285,6 @@ def main(): ), "Main process cannot use worker_* config" else: worker_pidfile = worker_config["worker_pid_file"] - worker_daemonize = worker_config["worker_daemonize"] - assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, - "worker_daemonize", - ) worker_cache_factor = worker_config.get("synctl_cache_factor") worker_cache_factors = worker_config.get("synctl_cache_factors", {}) workers.append( -- cgit 1.4.1 From 55d46da59a9d0db58888243137b69ee342921b11 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 9 Apr 2020 12:23:30 +0100 Subject: Upgrade jQuery to 3.x on fallback login/registration screens (#7236) --- changelog.d/7236.misc | 1 + synapse/static/client/login/index.html | 3 ++- synapse/static/client/login/js/jquery-2.1.3.min.js | 4 ---- synapse/static/client/login/js/jquery-3.4.1.min.js | 2 ++ synapse/static/client/login/js/login.js | 6 +++--- synapse/static/client/register/index.html | 3 ++- synapse/static/client/register/js/jquery-2.1.3.min.js | 4 ---- synapse/static/client/register/js/jquery-3.4.1.min.js | 2 ++ synapse/static/client/register/js/register.js | 6 +++--- 9 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 changelog.d/7236.misc delete mode 100644 synapse/static/client/login/js/jquery-2.1.3.min.js create mode 100644 synapse/static/client/login/js/jquery-3.4.1.min.js delete mode 100644 synapse/static/client/register/js/jquery-2.1.3.min.js create mode 100644 synapse/static/client/register/js/jquery-3.4.1.min.js (limited to 'changelog.d') diff --git a/changelog.d/7236.misc b/changelog.d/7236.misc new file mode 100644 index 0000000000..e4a2702b54 --- /dev/null +++ b/changelog.d/7236.misc @@ -0,0 +1 @@ +Upgrade jQuery to v3.4.1 on fallback login/registration pages. \ No newline at end of file diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html index 712b0e3980..6fefdaaff7 100644 --- a/synapse/static/client/login/index.html +++ b/synapse/static/client/login/index.html @@ -1,9 +1,10 @@ + Login - + diff --git a/synapse/static/client/login/js/jquery-2.1.3.min.js b/synapse/static/client/login/js/jquery-2.1.3.min.js deleted file mode 100644 index 25714ed29a..0000000000 --- a/synapse/static/client/login/js/jquery-2.1.3.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v2.1.3 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m="2.1.3",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=l.createElement("script"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=hb(),z=hb(),A=hb(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},eb=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fb){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function gb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+rb(o[l]);w=ab.test(a)&&pb(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function hb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ib(a){return a[u]=!0,a}function jb(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function kb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function lb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function nb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function ob(a){return ib(function(b){return b=+b,ib(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pb(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=gb.support={},f=gb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=gb.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",eb,!1):e.attachEvent&&e.attachEvent("onunload",eb)),p=!f(g),c.attributes=jb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=jb(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=jb(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(jb(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),jb(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&jb(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return lb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?lb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},gb.matches=function(a,b){return gb(a,null,null,b)},gb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return gb(b,n,null,[a]).length>0},gb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},gb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},gb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},gb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=gb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=gb.selectors={cacheLength:50,createPseudo:ib,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||gb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&gb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=gb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||gb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ib(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ib(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ib(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ib(function(a){return function(b){return gb(a,b).length>0}}),contains:ib(function(a){return a=a.replace(cb,db),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ib(function(a){return W.test(a||"")||gb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:ob(function(){return[0]}),last:ob(function(a,b){return[b-1]}),eq:ob(function(a,b,c){return[0>c?c+b:c]}),even:ob(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:ob(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:ob(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:ob(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function sb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function tb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ub(a,b,c){for(var d=0,e=b.length;e>d;d++)gb(a,b[d],c);return c}function vb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wb(a,b,c,d,e,f){return d&&!d[u]&&(d=wb(d)),e&&!e[u]&&(e=wb(e,f)),ib(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ub(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:vb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=vb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=vb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sb(function(a){return a===b},h,!0),l=sb(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sb(tb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wb(i>1&&tb(m),i>1&&rb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xb(a.slice(i,e)),f>e&&xb(a=a.slice(e)),f>e&&rb(a))}m.push(c)}return tb(m)}function yb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=vb(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&gb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ib(f):f}return h=gb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,yb(e,d)),f.selector=a}return f},i=gb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&pb(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&rb(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&pb(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=jb(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),jb(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||kb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&jb(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||kb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),jb(function(a){return null==a.getAttribute("disabled")})||kb(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),gb}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler("ready"),n(l).off("ready"))))}});function I(){l.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),"complete"===l.readyState?setTimeout(n.ready):(l.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){return M.access(a,b,c) -},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthx",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";k.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\/>/gi,bb=/<([\w:]+)/,cb=/<|&#?\w+;/,db=/<(?:script|style|link)/i,eb=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/^$|\/(?:java|ecma)script/i,gb=/^true\/(.*)/,hb=/^\s*\s*$/g,ib={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ib.optgroup=ib.option,ib.tbody=ib.tfoot=ib.colgroup=ib.caption=ib.thead,ib.th=ib.td;function jb(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function kb(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function lb(a){var b=gb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function mb(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function nb(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function ob(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pb(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=ob(h),f=ob(a),d=0,e=f.length;e>d;d++)pb(f[d],g[d]);if(b)if(c)for(f=f||ob(a),g=g||ob(h),d=0,e=f.length;e>d;d++)nb(f[d],g[d]);else nb(a,h);return g=ob(h,"script"),g.length>0&&mb(g,!i&&ob(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if("object"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(cb.test(e)){f=f||k.appendChild(b.createElement("div")),g=(bb.exec(e)||["",""])[1].toLowerCase(),h=ib[g]||ib._default,f.innerHTML=h[1]+e.replace(ab,"<$1>")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=ob(k.appendChild(e),"script"),i&&mb(f),c)){j=0;while(e=f[j++])fb.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(ob(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&mb(ob(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(ob(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ab,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ob(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(ob(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&eb.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(ob(c,"script"),kb),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,ob(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,lb),j=0;g>j;j++)h=f[j],fb.test(h.type||"")&&!L.access(h,"globalEval")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(hb,"")))}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qb,rb={};function sb(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],"display");return e.detach(),f}function tb(a){var b=l,c=rb[a];return c||(c=sb(a,b),"none"!==c&&c||(qb=(qb||n("