summary refs log tree commit diff
diff options
context:
space:
mode:
authorOlivier Wilkinson (reivilibre) <oliverw@matrix.org>2022-05-26 15:49:57 +0100
committerOlivier Wilkinson (reivilibre) <oliverw@matrix.org>2022-05-26 15:49:57 +0100
commita2ae63e89ce1ff90207055d2b3e6b21dbe63d4d2 (patch)
tree04540ceaf246ee6d00dcb27d3cfd6208af8043ad
parentSplit workers' tests by regex into 4 different jobs for now (diff)
parentRemove backing code for groups/communities (#12558) (diff)
downloadsynapse-a2ae63e89ce1ff90207055d2b3e6b21dbe63d4d2.tar.xz
Merge branch 'develop' into rei/complement_workers_in_ci
-rw-r--r--.git-blame-ignore-revs3
-rw-r--r--CHANGES.md112
-rw-r--r--changelog.d/10533.misc1
-rw-r--r--changelog.d/12477.misc1
-rw-r--r--changelog.d/12498.misc1
-rw-r--r--changelog.d/12513.feature1
-rw-r--r--changelog.d/12553.removal1
-rw-r--r--changelog.d/12558.removal1
-rw-r--r--changelog.d/12567.misc1
-rw-r--r--changelog.d/12586.misc1
-rw-r--r--changelog.d/12588.misc1
-rw-r--r--changelog.d/12618.feature1
-rw-r--r--changelog.d/12623.feature1
-rw-r--r--changelog.d/12630.misc1
-rw-r--r--changelog.d/12672.misc1
-rw-r--r--changelog.d/12673.feature1
-rw-r--r--changelog.d/12676.misc1
-rw-r--r--changelog.d/12677.misc1
-rw-r--r--changelog.d/12679.misc1
-rw-r--r--changelog.d/12680.misc1
-rw-r--r--changelog.d/12683.bugfix1
-rw-r--r--changelog.d/12687.bugfix1
-rw-r--r--changelog.d/12689.misc1
-rw-r--r--changelog.d/12691.misc1
-rw-r--r--changelog.d/12693.misc1
-rw-r--r--changelog.d/12694.misc1
-rw-r--r--changelog.d/12695.misc1
-rw-r--r--changelog.d/12696.bugfix1
-rw-r--r--changelog.d/12698.misc1
-rw-r--r--changelog.d/12699.misc1
-rw-r--r--changelog.d/12700.misc1
-rw-r--r--changelog.d/12701.feature1
-rw-r--r--changelog.d/12705.misc1
-rw-r--r--changelog.d/12708.misc1
-rw-r--r--changelog.d/12709.removal1
-rw-r--r--changelog.d/12711.misc1
-rw-r--r--changelog.d/12713.bugfix1
-rw-r--r--changelog.d/12715.doc1
-rw-r--r--changelog.d/12716.misc1
-rw-r--r--changelog.d/12717.misc1
-rw-r--r--changelog.d/12720.misc1
-rw-r--r--changelog.d/12721.bugfix1
-rw-r--r--changelog.d/12723.misc1
-rw-r--r--changelog.d/12726.misc1
-rw-r--r--changelog.d/12727.doc1
-rw-r--r--changelog.d/12731.misc1
-rw-r--r--changelog.d/12734.misc1
-rw-r--r--changelog.d/12740.feature1
-rw-r--r--changelog.d/12742.doc1
-rw-r--r--changelog.d/12746.bugfix1
-rw-r--r--changelog.d/12747.bugfix1
-rw-r--r--changelog.d/12748.doc1
-rw-r--r--changelog.d/12749.doc1
-rw-r--r--changelog.d/12753.misc1
-rw-r--r--changelog.d/12759.doc1
-rw-r--r--changelog.d/12761.doc1
-rw-r--r--changelog.d/12762.misc1
-rw-r--r--changelog.d/12765.doc1
-rw-r--r--changelog.d/12769.misc1
-rw-r--r--changelog.d/12770.bugfix1
-rw-r--r--changelog.d/12772.misc1
-rw-r--r--changelog.d/12773.doc1
-rw-r--r--changelog.d/12774.misc1
-rw-r--r--changelog.d/12775.misc1
-rw-r--r--changelog.d/12776.doc2
-rw-r--r--changelog.d/12777.doc2
-rw-r--r--changelog.d/12779.bugfix1
-rw-r--r--changelog.d/12781.misc1
-rw-r--r--changelog.d/12783.misc1
-rw-r--r--changelog.d/12785.doc1
-rw-r--r--changelog.d/12786.feature1
-rw-r--r--changelog.d/12789.misc1
-rw-r--r--changelog.d/12790.misc1
-rw-r--r--changelog.d/12791.misc1
-rw-r--r--changelog.d/12792.feature1
-rw-r--r--changelog.d/12794.bugfix1
-rw-r--r--changelog.d/12803.bugfix1
-rw-r--r--changelog.d/12829.bugfix1
-rw-r--r--changelog.d/12843.bugfix1
-rw-r--r--changelog.d/12849.misc1
-rw-r--r--changelog.d/12851.misc1
-rw-r--r--changelog.d/12852.misc1
-rw-r--r--changelog.d/12856.misc1
-rw-r--r--changelog.d/12858.bugfix1
-rw-r--r--changelog.d/12859.feature1
-rw-r--r--changelog.d/12860.misc1
-rw-r--r--changelog.d/12863.doc1
-rw-r--r--changelog.d/12865.misc1
-rw-r--r--changelog.d/12866.misc1
-rw-r--r--changelog.d/12867.doc1
-rw-r--r--changelog.d/12868.misc1
-rw-r--r--changelog.d/12869.misc1
-rw-r--r--changelog.d/12871.misc1
-rw-r--r--changelog.d/12877.bugfix1
-rw-r--r--changelog.d/12879.misc1
-rw-r--r--debian/changelog6
-rw-r--r--docker/Dockerfile2
-rw-r--r--docker/complement/SynapseWorkers.Dockerfile12
-rw-r--r--docker/complement/conf-workers/caddy.complement.json72
-rw-r--r--docker/complement/conf-workers/caddy.supervisord.conf7
-rwxr-xr-xdocker/complement/conf-workers/start-complement-synapse-workers.sh23
-rw-r--r--docker/complement/conf-workers/workers-shared.yaml6
-rw-r--r--docker/conf-workers/nginx.conf.j216
-rw-r--r--docker/conf-workers/shared.yaml.j211
-rwxr-xr-xdocker/configure_workers_and_start.py21
-rw-r--r--docs/development/contributing_guide.md4
-rw-r--r--docs/message_retention_policies.md2
-rw-r--r--docs/modules/spam_checker_callbacks.md45
-rw-r--r--docs/sample_config.yaml10
-rw-r--r--docs/structured_logging.md2
-rw-r--r--docs/upgrade.md29
-rw-r--r--docs/usage/configuration/config_documentation.md19
-rw-r--r--docs/workers.md11
-rw-r--r--mypy.ini8
-rw-r--r--poetry.lock6
-rw-r--r--pyproject.toml2
-rwxr-xr-xscripts-dev/complement.sh9
-rw-r--r--synapse/api/constants.py5
-rw-r--r--synapse/api/errors.py7
-rw-r--r--synapse/app/generic_worker.py9
-rw-r--r--synapse/appservice/__init__.py43
-rw-r--r--synapse/appservice/api.py15
-rw-r--r--synapse/appservice/scheduler.py6
-rw-r--r--synapse/config/_base.pyi2
-rw-r--r--synapse/config/experimental.py6
-rw-r--r--synapse/config/groups.py39
-rw-r--r--synapse/config/homeserver.py2
-rw-r--r--synapse/events/spamcheck.py77
-rw-r--r--synapse/federation/federation_base.py5
-rw-r--r--synapse/federation/federation_server.py48
-rw-r--r--synapse/federation/sender/per_destination_queue.py2
-rw-r--r--synapse/federation/transport/client.py15
-rw-r--r--synapse/federation/transport/server/__init__.py48
-rw-r--r--synapse/federation/transport/server/federation.py5
-rw-r--r--synapse/federation/transport/server/groups_local.py115
-rw-r--r--synapse/federation/transport/server/groups_server.py755
-rw-r--r--synapse/groups/__init__.py0
-rw-r--r--synapse/groups/attestations.py218
-rw-r--r--synapse/groups/groups_server.py1019
-rw-r--r--synapse/handlers/admin.py4
-rw-r--r--synapse/handlers/device.py10
-rw-r--r--synapse/handlers/federation.py6
-rw-r--r--synapse/handlers/federation_event.py212
-rw-r--r--synapse/handlers/groups_local.py503
-rw-r--r--synapse/handlers/initial_sync.py6
-rw-r--r--synapse/handlers/message.py61
-rw-r--r--synapse/handlers/pagination.py6
-rw-r--r--synapse/handlers/room.py4
-rw-r--r--synapse/handlers/room_batch.py4
-rw-r--r--synapse/handlers/room_member.py11
-rw-r--r--synapse/handlers/room_summary.py6
-rw-r--r--synapse/handlers/search.py4
-rw-r--r--synapse/handlers/sync.py89
-rw-r--r--synapse/http/matrixfederationclient.py29
-rw-r--r--synapse/module_api/__init__.py10
-rw-r--r--synapse/module_api/errors.py2
-rw-r--r--synapse/push/baserules.py16
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py71
-rw-r--r--synapse/push/clientformat.py4
-rw-r--r--synapse/push/mailer.py6
-rw-r--r--synapse/push/push_rule_evaluator.py50
-rw-r--r--synapse/replication/tcp/commands.py12
-rw-r--r--synapse/replication/tcp/redis.py6
-rw-r--r--synapse/rest/__init__.py3
-rw-r--r--synapse/rest/admin/__init__.py3
-rw-r--r--synapse/rest/admin/groups.py50
-rw-r--r--synapse/rest/client/groups.py962
-rw-r--r--synapse/rest/client/room.py7
-rw-r--r--synapse/rest/client/sync.py8
-rw-r--r--synapse/rest/media/v1/preview_html.py2
-rw-r--r--synapse/server.py39
-rw-r--r--synapse/spam_checker_api/__init__.py27
-rw-r--r--synapse/state/__init__.py36
-rw-r--r--synapse/storage/database.py2
-rw-r--r--synapse/storage/databases/main/event_federation.py2
-rw-r--r--synapse/storage/databases/main/event_push_actions.py2
-rw-r--r--synapse/storage/databases/main/events.py9
-rw-r--r--synapse/storage/databases/main/push_rule.py21
-rw-r--r--synapse/storage/databases/main/receipts.py5
-rw-r--r--synapse/storage/databases/main/relations.py53
-rw-r--r--synapse/storage/databases/main/room.py80
-rw-r--r--synapse/storage/databases/main/state.py59
-rw-r--r--synapse/storage/persist_events.py2
-rw-r--r--synapse/types.py29
-rw-r--r--synapse/util/caches/descriptors.py15
-rw-r--r--synapse/visibility.py6
-rw-r--r--tests/appservice/test_api.py102
-rw-r--r--tests/appservice/test_appservice.py2
-rw-r--r--tests/handlers/test_federation.py10
-rw-r--r--tests/handlers/test_room_summary.py20
-rw-r--r--tests/http/test_fedclient.py6
-rw-r--r--tests/http/test_servlet.py14
-rw-r--r--tests/http/test_site.py2
-rw-r--r--tests/push/test_push_rule_evaluator.py84
-rw-r--r--tests/rest/admin/test_admin.py90
-rw-r--r--tests/rest/client/test_groups.py56
-rw-r--r--tests/rest/client/test_relations.py8
-rw-r--r--tests/rest/client/test_retention.py35
-rw-r--r--tests/scripts/test_new_matrix_user.py13
-rw-r--r--tests/storage/test_base.py2
-rw-r--r--tests/storage/test_events.py43
-rw-r--r--tests/storage/test_roommember.py2
-rw-r--r--tests/test_state.py14
-rw-r--r--tests/test_types.py21
204 files changed, 1386 insertions, 4666 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 83ddd568c2..50d28c68ee 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -6,3 +6,6 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
 
 # Update black to 20.8b1 (#9381).
 0a00b7ff14890987f09112a2ae696c61001e6cf1
+
+# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
+c4268e3da64f1abb5b31deaeb5769adb6510c0a7
\ No newline at end of file
diff --git a/CHANGES.md b/CHANGES.md
index e10ac0314a..46ac3fce7a 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,115 @@
+Synapse 1.60.0rc1 (2022-05-24)
+==============================
+
+This release of Synapse adds a unique index to the `state_group_edges` table, in
+order to prevent accidentally introducing duplicate information (for example,
+because a database backup was restored multiple times). If your Synapse database
+already has duplicate rows in this table, this could fail with an error and
+require manual remediation.
+
+Additionally, the signature of the `check_event_for_spam` module callback has changed.
+The previous signature has been deprecated and remains working for now. Module authors
+should update their modules to use the new signature where possible.
+
+See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
+for more details.
+
+Features
+--------
+
+- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
+- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
+- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
+- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
+- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
+- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
+- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
+- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
+- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
+- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
+- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
+- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
+- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
+- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
+- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
+- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
+- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
+- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
+- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
+- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
+- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
+
+
+Improved Documentation
+----------------------
+
+- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
+- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
+- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
+- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
+- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
+- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
+- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
+- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
+- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
+- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
+- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
+- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
+
+
+Deprecations and Removals
+-------------------------
+
+- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
+
+
+Internal Changes
+----------------
+
+- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
+- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
+- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
+- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
+- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
+- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
+- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
+- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
+- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
+- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
+- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
+- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
+- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
+- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
+- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
+- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
+- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
+- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
+- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
+- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
+- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
+- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
+- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
+- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
+- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
+- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
+- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
+- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
+- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
+- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
+
+
 Synapse 1.59.1 (2022-05-18)
 ===========================
 
diff --git a/changelog.d/10533.misc b/changelog.d/10533.misc
deleted file mode 100644
index f70dc6496f..0000000000
--- a/changelog.d/10533.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve event caching mechanism to avoid having multiple copies of an event in memory at a time.
diff --git a/changelog.d/12477.misc b/changelog.d/12477.misc
deleted file mode 100644
index e793d08e5e..0000000000
--- a/changelog.d/12477.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type hints to datastore.
\ No newline at end of file
diff --git a/changelog.d/12498.misc b/changelog.d/12498.misc
deleted file mode 100644
index 8a00b94fbe..0000000000
--- a/changelog.d/12498.misc
+++ /dev/null
@@ -1 +0,0 @@
-Preparation for faster-room-join work: return subsets of room state which we already have, immediately.
diff --git a/changelog.d/12513.feature b/changelog.d/12513.feature
deleted file mode 100644
index 01bf1d9d2c..0000000000
--- a/changelog.d/12513.feature
+++ /dev/null
@@ -1 +0,0 @@
-Measure the time taken in spam-checking callbacks and expose those measurements as metrics.
diff --git a/changelog.d/12553.removal b/changelog.d/12553.removal
new file mode 100644
index 0000000000..41f6fae5da
--- /dev/null
+++ b/changelog.d/12553.removal
@@ -0,0 +1 @@
+Remove support for the non-standard groups/communities feature from Synapse.
diff --git a/changelog.d/12558.removal b/changelog.d/12558.removal
new file mode 100644
index 0000000000..41f6fae5da
--- /dev/null
+++ b/changelog.d/12558.removal
@@ -0,0 +1 @@
+Remove support for the non-standard groups/communities feature from Synapse.
diff --git a/changelog.d/12567.misc b/changelog.d/12567.misc
deleted file mode 100644
index 35f08569ba..0000000000
--- a/changelog.d/12567.misc
+++ /dev/null
@@ -1 +0,0 @@
-Replace string literal instances of stream key types with typed constants.
\ No newline at end of file
diff --git a/changelog.d/12586.misc b/changelog.d/12586.misc
deleted file mode 100644
index d26e332305..0000000000
--- a/changelog.d/12586.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect.
diff --git a/changelog.d/12588.misc b/changelog.d/12588.misc
deleted file mode 100644
index f62d5c8e21..0000000000
--- a/changelog.d/12588.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add ability to cancel disconnected requests to `SynapseRequest`.
diff --git a/changelog.d/12618.feature b/changelog.d/12618.feature
deleted file mode 100644
index 37fa03b3cb..0000000000
--- a/changelog.d/12618.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a `default_power_level_content_override` config option to set default room power levels per room preset.
diff --git a/changelog.d/12623.feature b/changelog.d/12623.feature
deleted file mode 100644
index cdee19fafa..0000000000
--- a/changelog.d/12623.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787).
\ No newline at end of file
diff --git a/changelog.d/12630.misc b/changelog.d/12630.misc
deleted file mode 100644
index 43e12603e2..0000000000
--- a/changelog.d/12630.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a helper class for testing request cancellation.
diff --git a/changelog.d/12672.misc b/changelog.d/12672.misc
deleted file mode 100644
index 265e0a801f..0000000000
--- a/changelog.d/12672.misc
+++ /dev/null
@@ -1 +0,0 @@
-Lay some foundation work to allow workers to only subscribe to some kinds of messages, reducing replication traffic.
\ No newline at end of file
diff --git a/changelog.d/12673.feature b/changelog.d/12673.feature
deleted file mode 100644
index f2bddd6e1c..0000000000
--- a/changelog.d/12673.feature
+++ /dev/null
@@ -1 +0,0 @@
-Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal.
diff --git a/changelog.d/12676.misc b/changelog.d/12676.misc
deleted file mode 100644
index 26490af00d..0000000000
--- a/changelog.d/12676.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve documentation of the `synapse.push` module.
diff --git a/changelog.d/12677.misc b/changelog.d/12677.misc
deleted file mode 100644
index eed12e69e9..0000000000
--- a/changelog.d/12677.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor functions to on `PushRuleEvaluatorForEvent`.
diff --git a/changelog.d/12679.misc b/changelog.d/12679.misc
deleted file mode 100644
index 6df1116b49..0000000000
--- a/changelog.d/12679.misc
+++ /dev/null
@@ -1 +0,0 @@
-Preparation for database schema simplifications: stop writing to `event_reference_hashes`.
diff --git a/changelog.d/12680.misc b/changelog.d/12680.misc
deleted file mode 100644
index dfd1f0a6c6..0000000000
--- a/changelog.d/12680.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove code which updates unused database column `application_services_state.last_txn`.
diff --git a/changelog.d/12683.bugfix b/changelog.d/12683.bugfix
deleted file mode 100644
index 2ce84a223a..0000000000
--- a/changelog.d/12683.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room.
diff --git a/changelog.d/12687.bugfix b/changelog.d/12687.bugfix
deleted file mode 100644
index 196d976670..0000000000
--- a/changelog.d/12687.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance.
\ No newline at end of file
diff --git a/changelog.d/12689.misc b/changelog.d/12689.misc
deleted file mode 100644
index daa484ea30..0000000000
--- a/changelog.d/12689.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `EventContext` class.
diff --git a/changelog.d/12691.misc b/changelog.d/12691.misc
deleted file mode 100644
index c635434211..0000000000
--- a/changelog.d/12691.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove an unneeded class in the push code.
diff --git a/changelog.d/12693.misc b/changelog.d/12693.misc
deleted file mode 100644
index 8bd1e1cb0c..0000000000
--- a/changelog.d/12693.misc
+++ /dev/null
@@ -1 +0,0 @@
-Consolidate parsing of relation information from events.
diff --git a/changelog.d/12694.misc b/changelog.d/12694.misc
deleted file mode 100644
index e1e956a513..0000000000
--- a/changelog.d/12694.misc
+++ /dev/null
@@ -1 +0,0 @@
-Capture the `Deferred` for request cancellation in `_AsyncResource`.
diff --git a/changelog.d/12695.misc b/changelog.d/12695.misc
deleted file mode 100644
index 1b39d969a4..0000000000
--- a/changelog.d/12695.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fixes an incorrect type hint for `Filter._check_event_relations`.
diff --git a/changelog.d/12696.bugfix b/changelog.d/12696.bugfix
deleted file mode 100644
index e410184a22..0000000000
--- a/changelog.d/12696.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room.
diff --git a/changelog.d/12698.misc b/changelog.d/12698.misc
deleted file mode 100644
index 5d626352f9..0000000000
--- a/changelog.d/12698.misc
+++ /dev/null
@@ -1 +0,0 @@
-Respect the `@cancellable` flag for `DirectServe{Html,Json}Resource`s.
diff --git a/changelog.d/12699.misc b/changelog.d/12699.misc
deleted file mode 100644
index d278a956c7..0000000000
--- a/changelog.d/12699.misc
+++ /dev/null
@@ -1 +0,0 @@
-Respect the `@cancellable` flag for `RestServlet`s and `BaseFederationServlet`s.
diff --git a/changelog.d/12700.misc b/changelog.d/12700.misc
deleted file mode 100644
index d93eb5dada..0000000000
--- a/changelog.d/12700.misc
+++ /dev/null
@@ -1 +0,0 @@
-Respect the `@cancellable` flag for `ReplicationEndpoint`s.
diff --git a/changelog.d/12701.feature b/changelog.d/12701.feature
deleted file mode 100644
index bb2264602c..0000000000
--- a/changelog.d/12701.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a config options to allow for auto-tuning of caches.
diff --git a/changelog.d/12705.misc b/changelog.d/12705.misc
deleted file mode 100644
index a913d8bb85..0000000000
--- a/changelog.d/12705.misc
+++ /dev/null
@@ -1 +0,0 @@
-Complain if a federation endpoint has the `@cancellable` flag, since some of the wrapper code may not handle cancellation correctly yet.
diff --git a/changelog.d/12708.misc b/changelog.d/12708.misc
deleted file mode 100644
index aa99e7311b..0000000000
--- a/changelog.d/12708.misc
+++ /dev/null
@@ -1 +0,0 @@
-Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests.
diff --git a/changelog.d/12709.removal b/changelog.d/12709.removal
deleted file mode 100644
index 6bb03e2894..0000000000
--- a/changelog.d/12709.removal
+++ /dev/null
@@ -1 +0,0 @@
-Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts.
diff --git a/changelog.d/12711.misc b/changelog.d/12711.misc
deleted file mode 100644
index 0831ce0452..0000000000
--- a/changelog.d/12711.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimize private read receipt filtering.
diff --git a/changelog.d/12713.bugfix b/changelog.d/12713.bugfix
deleted file mode 100644
index 91e70f102c..0000000000
--- a/changelog.d/12713.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set.
diff --git a/changelog.d/12715.doc b/changelog.d/12715.doc
deleted file mode 100644
index 150d78c3f6..0000000000
--- a/changelog.d/12715.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a typo in the Media Admin API documentation.
diff --git a/changelog.d/12716.misc b/changelog.d/12716.misc
deleted file mode 100644
index b07e1b52ee..0000000000
--- a/changelog.d/12716.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type annotations to increase the number of modules passing `disallow-untyped-defs`.
\ No newline at end of file
diff --git a/changelog.d/12717.misc b/changelog.d/12717.misc
deleted file mode 100644
index e793d08e5e..0000000000
--- a/changelog.d/12717.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type hints to datastore.
\ No newline at end of file
diff --git a/changelog.d/12720.misc b/changelog.d/12720.misc
deleted file mode 100644
index 01b427f200..0000000000
--- a/changelog.d/12720.misc
+++ /dev/null
@@ -1 +0,0 @@
-Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG.
\ No newline at end of file
diff --git a/changelog.d/12721.bugfix b/changelog.d/12721.bugfix
deleted file mode 100644
index 6987f7ab15..0000000000
--- a/changelog.d/12721.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper.
diff --git a/changelog.d/12723.misc b/changelog.d/12723.misc
deleted file mode 100644
index 4f5bffeda6..0000000000
--- a/changelog.d/12723.misc
+++ /dev/null
@@ -1 +0,0 @@
-Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports.
diff --git a/changelog.d/12726.misc b/changelog.d/12726.misc
deleted file mode 100644
index b07e1b52ee..0000000000
--- a/changelog.d/12726.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type annotations to increase the number of modules passing `disallow-untyped-defs`.
\ No newline at end of file
diff --git a/changelog.d/12727.doc b/changelog.d/12727.doc
deleted file mode 100644
index c41e50c85b..0000000000
--- a/changelog.d/12727.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh.
diff --git a/changelog.d/12731.misc b/changelog.d/12731.misc
deleted file mode 100644
index 962100d516..0000000000
--- a/changelog.d/12731.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update configs used by Complement to allow more invites/3PID validations during tests.
\ No newline at end of file
diff --git a/changelog.d/12734.misc b/changelog.d/12734.misc
deleted file mode 100644
index ffbfb0d632..0000000000
--- a/changelog.d/12734.misc
+++ /dev/null
@@ -1 +0,0 @@
-Tidy up and type-hint the database engine modules.
diff --git a/changelog.d/12740.feature b/changelog.d/12740.feature
new file mode 100644
index 0000000000..e674c31ae8
--- /dev/null
+++ b/changelog.d/12740.feature
@@ -0,0 +1 @@
+Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events.
diff --git a/changelog.d/12742.doc b/changelog.d/12742.doc
deleted file mode 100644
index 0084e27a7d..0000000000
--- a/changelog.d/12742.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix typo in server listener documentation.
\ No newline at end of file
diff --git a/changelog.d/12746.bugfix b/changelog.d/12746.bugfix
new file mode 100644
index 0000000000..67e7fc854c
--- /dev/null
+++ b/changelog.d/12746.bugfix
@@ -0,0 +1 @@
+Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Matrix specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks).
\ No newline at end of file
diff --git a/changelog.d/12747.bugfix b/changelog.d/12747.bugfix
deleted file mode 100644
index 0fb0059237..0000000000
--- a/changelog.d/12747.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers.
diff --git a/changelog.d/12748.doc b/changelog.d/12748.doc
deleted file mode 100644
index 996ad3a1b9..0000000000
--- a/changelog.d/12748.doc
+++ /dev/null
@@ -1 +0,0 @@
-Link to the configuration manual from the welcome page of the documentation.
diff --git a/changelog.d/12749.doc b/changelog.d/12749.doc
deleted file mode 100644
index 4560319ee4..0000000000
--- a/changelog.d/12749.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix typo in 'run_background_tasks_on' option name in configuration manual documentation.
diff --git a/changelog.d/12753.misc b/changelog.d/12753.misc
deleted file mode 100644
index e793d08e5e..0000000000
--- a/changelog.d/12753.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type hints to datastore.
\ No newline at end of file
diff --git a/changelog.d/12759.doc b/changelog.d/12759.doc
deleted file mode 100644
index 45d1c9c0ca..0000000000
--- a/changelog.d/12759.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add information regarding the `rc_invites` ratelimiting option to the configuration docs.
diff --git a/changelog.d/12761.doc b/changelog.d/12761.doc
deleted file mode 100644
index 2eb2c0976f..0000000000
--- a/changelog.d/12761.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add documentation for cancellation of request processing.
diff --git a/changelog.d/12762.misc b/changelog.d/12762.misc
deleted file mode 100644
index 990fb6fe74..0000000000
--- a/changelog.d/12762.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar.
diff --git a/changelog.d/12765.doc b/changelog.d/12765.doc
deleted file mode 100644
index 277b037d6b..0000000000
--- a/changelog.d/12765.doc
+++ /dev/null
@@ -1 +0,0 @@
-Recommend using docker to run tests against postgres.
diff --git a/changelog.d/12769.misc b/changelog.d/12769.misc
deleted file mode 100644
index 27bd53abe3..0000000000
--- a/changelog.d/12769.misc
+++ /dev/null
@@ -1 +0,0 @@
-Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`.
diff --git a/changelog.d/12770.bugfix b/changelog.d/12770.bugfix
deleted file mode 100644
index a958f9a16b..0000000000
--- a/changelog.d/12770.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API.
diff --git a/changelog.d/12772.misc b/changelog.d/12772.misc
deleted file mode 100644
index da66f376fe..0000000000
--- a/changelog.d/12772.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move methods that call `add_push_rule` to the `PushRuleStore` class.
diff --git a/changelog.d/12773.doc b/changelog.d/12773.doc
deleted file mode 100644
index 6de3716534..0000000000
--- a/changelog.d/12773.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari.
\ No newline at end of file
diff --git a/changelog.d/12774.misc b/changelog.d/12774.misc
deleted file mode 100644
index 8651f2e0e0..0000000000
--- a/changelog.d/12774.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make handling of federation Authorization header (more) compliant with RFC7230.
diff --git a/changelog.d/12775.misc b/changelog.d/12775.misc
deleted file mode 100644
index eac326cde3..0000000000
--- a/changelog.d/12775.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens.
\ No newline at end of file
diff --git a/changelog.d/12776.doc b/changelog.d/12776.doc
deleted file mode 100644
index c00489a8ce..0000000000
--- a/changelog.d/12776.doc
+++ /dev/null
@@ -1,2 +0,0 @@
-Add additional info to documentation of config option `cache_autotuning`.
-
diff --git a/changelog.d/12777.doc b/changelog.d/12777.doc
deleted file mode 100644
index cc9c07704d..0000000000
--- a/changelog.d/12777.doc
+++ /dev/null
@@ -1,2 +0,0 @@
-Update configuration manual documentation to document size-related suffixes.
-
diff --git a/changelog.d/12779.bugfix b/changelog.d/12779.bugfix
deleted file mode 100644
index 7cf7a1f65f..0000000000
--- a/changelog.d/12779.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Give a meaningful error message when a client tries to create a room with an invalid alias localpart.
\ No newline at end of file
diff --git a/changelog.d/12781.misc b/changelog.d/12781.misc
deleted file mode 100644
index 8a04571617..0000000000
--- a/changelog.d/12781.misc
+++ /dev/null
@@ -1 +0,0 @@
-Do not keep going if there are 5 back-to-back background update failures.
\ No newline at end of file
diff --git a/changelog.d/12783.misc b/changelog.d/12783.misc
deleted file mode 100644
index 97575608bb..0000000000
--- a/changelog.d/12783.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix federation when using the demo scripts.
diff --git a/changelog.d/12785.doc b/changelog.d/12785.doc
deleted file mode 100644
index 5209dfeb05..0000000000
--- a/changelog.d/12785.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option.
diff --git a/changelog.d/12786.feature b/changelog.d/12786.feature
deleted file mode 100644
index c90ddd411e..0000000000
--- a/changelog.d/12786.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818).
diff --git a/changelog.d/12789.misc b/changelog.d/12789.misc
deleted file mode 100644
index 3398d00110..0000000000
--- a/changelog.d/12789.misc
+++ /dev/null
@@ -1 +0,0 @@
-The `hash_password` script now fails when it is called without specifying a config file.
diff --git a/changelog.d/12790.misc b/changelog.d/12790.misc
deleted file mode 100644
index b78156cf4e..0000000000
--- a/changelog.d/12790.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify `disallow_untyped_defs` config in `mypy.ini`.
diff --git a/changelog.d/12791.misc b/changelog.d/12791.misc
deleted file mode 100644
index b6e92b7eaf..0000000000
--- a/changelog.d/12791.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible.
diff --git a/changelog.d/12792.feature b/changelog.d/12792.feature
deleted file mode 100644
index 4778b8a394..0000000000
--- a/changelog.d/12792.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818).
\ No newline at end of file
diff --git a/changelog.d/12794.bugfix b/changelog.d/12794.bugfix
deleted file mode 100644
index 2d1a2838e1..0000000000
--- a/changelog.d/12794.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar.
diff --git a/changelog.d/12803.bugfix b/changelog.d/12803.bugfix
deleted file mode 100644
index 6ddd3d24e0..0000000000
--- a/changelog.d/12803.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers.
diff --git a/changelog.d/12829.bugfix b/changelog.d/12829.bugfix
new file mode 100644
index 0000000000..dfa1fed34e
--- /dev/null
+++ b/changelog.d/12829.bugfix
@@ -0,0 +1 @@
+Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen.
diff --git a/changelog.d/12843.bugfix b/changelog.d/12843.bugfix
new file mode 100644
index 0000000000..f87c0799a0
--- /dev/null
+++ b/changelog.d/12843.bugfix
@@ -0,0 +1 @@
+Fix bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`).
diff --git a/changelog.d/12849.misc b/changelog.d/12849.misc
new file mode 100644
index 0000000000..4c2a15ce2b
--- /dev/null
+++ b/changelog.d/12849.misc
@@ -0,0 +1 @@
+Remove `dont_notify` from the `.m.rule.room.server_acl` rule.
\ No newline at end of file
diff --git a/changelog.d/12851.misc b/changelog.d/12851.misc
new file mode 100644
index 0000000000..ca6f48c369
--- /dev/null
+++ b/changelog.d/12851.misc
@@ -0,0 +1 @@
+Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946).
diff --git a/changelog.d/12852.misc b/changelog.d/12852.misc
new file mode 100644
index 0000000000..afca32471f
--- /dev/null
+++ b/changelog.d/12852.misc
@@ -0,0 +1 @@
+Pull out less state when handling gaps in room DAG.
diff --git a/changelog.d/12856.misc b/changelog.d/12856.misc
new file mode 100644
index 0000000000..19ecefd9af
--- /dev/null
+++ b/changelog.d/12856.misc
@@ -0,0 +1 @@
+Clean-up the push rules datastore.
diff --git a/changelog.d/12858.bugfix b/changelog.d/12858.bugfix
new file mode 100644
index 0000000000..8c95a3e3a3
--- /dev/null
+++ b/changelog.d/12858.bugfix
@@ -0,0 +1 @@
+Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses.
diff --git a/changelog.d/12859.feature b/changelog.d/12859.feature
new file mode 100644
index 0000000000..e674c31ae8
--- /dev/null
+++ b/changelog.d/12859.feature
@@ -0,0 +1 @@
+Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events.
diff --git a/changelog.d/12860.misc b/changelog.d/12860.misc
new file mode 100644
index 0000000000..b7d2943023
--- /dev/null
+++ b/changelog.d/12860.misc
@@ -0,0 +1 @@
+Correct a type annotation in the URL preview source code.
diff --git a/changelog.d/12863.doc b/changelog.d/12863.doc
new file mode 100644
index 0000000000..94f7b8371a
--- /dev/null
+++ b/changelog.d/12863.doc
@@ -0,0 +1 @@
+Fix typos in documentation.
\ No newline at end of file
diff --git a/changelog.d/12865.misc b/changelog.d/12865.misc
new file mode 100644
index 0000000000..d982ca7622
--- /dev/null
+++ b/changelog.d/12865.misc
@@ -0,0 +1 @@
+Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0).
diff --git a/changelog.d/12866.misc b/changelog.d/12866.misc
new file mode 100644
index 0000000000..3f7ef59253
--- /dev/null
+++ b/changelog.d/12866.misc
@@ -0,0 +1 @@
+Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12867.doc b/changelog.d/12867.doc
new file mode 100644
index 0000000000..1caeb7a290
--- /dev/null
+++ b/changelog.d/12867.doc
@@ -0,0 +1 @@
+Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12868.misc b/changelog.d/12868.misc
new file mode 100644
index 0000000000..382a876dab
--- /dev/null
+++ b/changelog.d/12868.misc
@@ -0,0 +1 @@
+Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12869.misc b/changelog.d/12869.misc
new file mode 100644
index 0000000000..1d9d1c8921
--- /dev/null
+++ b/changelog.d/12869.misc
@@ -0,0 +1 @@
+Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12871.misc b/changelog.d/12871.misc
new file mode 100644
index 0000000000..94bd6c4974
--- /dev/null
+++ b/changelog.d/12871.misc
@@ -0,0 +1 @@
+Fix up the variable `state_store` naming.
diff --git a/changelog.d/12877.bugfix b/changelog.d/12877.bugfix
new file mode 100644
index 0000000000..1ecf448baf
--- /dev/null
+++ b/changelog.d/12877.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.54 which could sometimes cause exceptions when handling federated traffic.
diff --git a/changelog.d/12879.misc b/changelog.d/12879.misc
new file mode 100644
index 0000000000..24fa0d0de0
--- /dev/null
+++ b/changelog.d/12879.misc
@@ -0,0 +1 @@
+Avoid running queries which will never result in deletions.
diff --git a/debian/changelog b/debian/changelog
index dda342a630..6eba9b3a1b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
+
+  * New Synapse release 1.60.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 24 May 2022 12:05:01 +0100
+
 matrix-synapse-py3 (1.59.1) stable; urgency=medium
 
   * New Synapse release 1.59.1.
diff --git a/docker/Dockerfile b/docker/Dockerfile
index ccc6a9f778..7af0e51f97 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -55,7 +55,7 @@ RUN \
 # NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
 # pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
 RUN --mount=type=cache,target=/root/.cache/pip \
-  pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
+  pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
 
 WORKDIR /synapse
 
diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile
index 9a4438e730..99a09cbc2b 100644
--- a/docker/complement/SynapseWorkers.Dockerfile
+++ b/docker/complement/SynapseWorkers.Dockerfile
@@ -6,12 +6,6 @@
 # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
 FROM matrixdotorg/synapse-workers
 
-# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
-# custom CA.
-# We include this near the top of the file in order to cache the result.
-RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
-  tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
-
 # Install postgresql
 RUN apt-get update && \
   DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
@@ -31,16 +25,12 @@ COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
 
 WORKDIR /data
 
-# Copy the caddy config
-COPY conf-workers/caddy.complement.json /root/caddy.json
-
 COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
-COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
 
 # Copy the entrypoint
 COPY conf-workers/start-complement-synapse-workers.sh /
 
-# Expose caddy's listener ports
+# Expose nginx's listener ports
 EXPOSE 8008 8448
 
 ENTRYPOINT ["/start-complement-synapse-workers.sh"]
diff --git a/docker/complement/conf-workers/caddy.complement.json b/docker/complement/conf-workers/caddy.complement.json
deleted file mode 100644
index 09e2136af2..0000000000
--- a/docker/complement/conf-workers/caddy.complement.json
+++ /dev/null
@@ -1,72 +0,0 @@
-{
-    "apps": {
-      "http": {
-        "servers": {
-          "srv0": {
-            "listen": [
-              ":8448"
-            ],
-            "routes": [
-              {
-                "match": [
-                  {
-                    "host": [
-                      "{{ server_name }}"
-                    ]
-                  }
-                ],
-                "handle": [
-                  {
-                    "handler": "subroute",
-                    "routes": [
-                      {
-                        "handle": [
-                          {
-                            "handler": "reverse_proxy",
-                            "upstreams": [
-                              {
-                                "dial": "localhost:8008"
-                              }
-                            ]
-                          }
-                        ]
-                      }
-                    ]
-                  }
-                ],
-                "terminal": true
-              }
-            ]
-          }
-        }
-      },
-      "tls": {
-        "automation": {
-          "policies": [
-            {
-              "subjects": [
-                "{{ server_name }}"
-              ],
-              "issuers": [
-                {
-                  "module": "internal"
-                }
-              ],
-              "on_demand": true
-            }
-          ]
-        }
-      },
-      "pki": {
-        "certificate_authorities": {
-          "local": {
-            "name": "Complement CA",
-            "root": {
-              "certificate": "/complement/ca/ca.crt",
-              "private_key": "/complement/ca/ca.key"
-            }
-          }
-        }
-      }
-    }
-  }
diff --git a/docker/complement/conf-workers/caddy.supervisord.conf b/docker/complement/conf-workers/caddy.supervisord.conf
deleted file mode 100644
index d9ddb51dac..0000000000
--- a/docker/complement/conf-workers/caddy.supervisord.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-[program:caddy]
-command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
-autorestart=unexpected
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh
index b9a6b55bbe..b7e2444000 100755
--- a/docker/complement/conf-workers/start-complement-synapse-workers.sh
+++ b/docker/complement/conf-workers/start-complement-synapse-workers.sh
@@ -9,9 +9,6 @@ function log {
     echo "$d $@"
 }
 
-# Replace the server name in the caddy config
-sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
-
 # Set the server name of the homeserver
 export SYNAPSE_SERVER_NAME=${SERVER_NAME}
 
@@ -39,6 +36,26 @@ export SYNAPSE_WORKER_TYPES="\
     appservice, \
     pusher"
 
+# Add Complement's appservice registration directory, if there is one
+# (It can be absent when there are no application services in this test!)
+if [ -d /complement/appservice ]; then
+    export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
+fi
+
+# Generate a TLS key, then generate a certificate by having Complement's CA sign it
+# Note that both the key and certificate are in PEM format (not DER).
+openssl genrsa -out /conf/server.tls.key 2048
+
+openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
+  -subj "/CN=${SERVER_NAME}"
+
+openssl x509 -req -in /conf/server.tls.csr \
+  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
+  -out /conf/server.tls.crt
+
+export SYNAPSE_TLS_CERT=/conf/server.tls.crt
+export SYNAPSE_TLS_KEY=/conf/server.tls.key
+
 # Run the script that writes the necessary config files and starts supervisord, which in turn
 # starts everything else
 exec /configure_workers_and_start.py
diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf-workers/workers-shared.yaml
index 86ee11ecd0..cd7b50c65c 100644
--- a/docker/complement/conf-workers/workers-shared.yaml
+++ b/docker/complement/conf-workers/workers-shared.yaml
@@ -5,6 +5,12 @@ enable_registration: true
 enable_registration_without_verification: true
 bcrypt_rounds: 4
 
+## Registration ##
+
+# Needed by Complement to register admin users
+# DO NOT USE in a production configuration! This should be a random secret.
+registration_shared_secret: complement
+
 ## Federation ##
 
 # trust certs signed by Complement's CA
diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2
index 1081979e06..967fc65e79 100644
--- a/docker/conf-workers/nginx.conf.j2
+++ b/docker/conf-workers/nginx.conf.j2
@@ -9,6 +9,22 @@ server {
     listen 8008;
     listen [::]:8008;
 
+    {% if tls_cert_path is not none and tls_key_path is not none %}
+        listen 8448 ssl;
+        listen [::]:8448 ssl;
+
+        ssl_certificate {{ tls_cert_path }};
+        ssl_certificate_key {{ tls_key_path }};
+
+        # Some directives from cipherlist.eu (fka cipherli.st):
+        ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
+        ssl_prefer_server_ciphers on;
+        ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
+        ssl_ecdh_curve secp384r1; # Requires nginx >= 1.1.0
+        ssl_session_cache shared:SSL:10m;
+        ssl_session_tickets off; # Requires nginx >= 1.5.9
+    {% endif %}
+
     server_name localhost;
 
     # Nginx by default only allows file uploads up to 1M in size
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index f94b8c6aca..644ed788f3 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -6,4 +6,13 @@
 redis:
     enabled: true
 
-{{ shared_worker_config }}
\ No newline at end of file
+{% if appservice_registrations is not none %}
+## Application Services ##
+# A list of application service config files to use.
+app_service_config_files:
+{%- for path in appservice_registrations %}
+  - "{{ path }}"
+{%- endfor %}
+{%- endif %}
+
+{{ shared_worker_config }}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index b2b7938ae8..f7dac90222 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -21,6 +21,11 @@
 #   * SYNAPSE_REPORT_STATS: Whether to report stats.
 #   * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
 #         below. Leave empty for no workers, or set to '*' for all possible workers.
+#   * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
+#         will be treated as Application Service registration files.
+#   * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
+#   * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
+#         Nginx will be configured to serve TLS on port 8448.
 #
 # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
 # in the project's README), this script may be run multiple times, and functionality should
@@ -29,6 +34,7 @@
 import os
 import subprocess
 import sys
+from pathlib import Path
 from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
 
 import jinja2
@@ -152,6 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
             "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
             "^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
             "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
+            "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
         ],
         "shared_extra_conf": {},
         "worker_extra_conf": "",
@@ -488,11 +495,23 @@ def generate_worker_files(
     master_log_config = generate_worker_log_config(environ, "master", data_dir)
     shared_config["log_config"] = master_log_config
 
+    # Find application service registrations
+    appservice_registrations = None
+    appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
+    if appservice_registration_dir:
+        # Scan for all YAML files that should be application service registrations.
+        appservice_registrations = [
+            str(reg_path.resolve())
+            for reg_path in Path(appservice_registration_dir).iterdir()
+            if reg_path.suffix.lower() in (".yaml", ".yml")
+        ]
+
     # Shared homeserver config
     convert(
         "/conf/shared.yaml.j2",
         "/conf/workers/shared.yaml",
         shared_worker_config=yaml.dump(shared_config),
+        appservice_registrations=appservice_registrations,
     )
 
     # Nginx config
@@ -501,6 +520,8 @@ def generate_worker_files(
         "/etc/nginx/conf.d/matrix-synapse.conf",
         worker_locations=nginx_location_config,
         upstream_directives=nginx_upstream_config,
+        tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
+        tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
     )
 
     # Supervisord config
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index f55a1fbb90..2b3714df66 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -422,8 +422,8 @@ same lightweight approach that the Linux Kernel
 [submitting patches process](
 https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
 [Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
-projects use: the DCO (Developer Certificate of Origin:
-http://developercertificate.org/). This is a simple declaration that you wrote
+projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)).
+This is a simple declaration that you wrote
 the contribution or otherwise have the right to contribute it to Matrix:
 
 ```
diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md
index 9214d6d7e9..b52c4aaa24 100644
--- a/docs/message_retention_policies.md
+++ b/docs/message_retention_policies.md
@@ -117,7 +117,7 @@ In this example, we define three jobs:
 Note that this example is tailored to show different configurations and
 features slightly more jobs than it's probably necessary (in practice, a
 server admin would probably consider it better to replace the two last
-jobs with one that runs once a day and handles rooms which which
+jobs with one that runs once a day and handles rooms which
 policy's `max_lifetime` is greater than 3 days).
 
 Keep in mind, when configuring these jobs, that a purge job can become
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 472d957180..71f6f9f0ab 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -11,22 +11,29 @@ The available spam checker callbacks are:
 ### `check_event_for_spam`
 
 _First introduced in Synapse v1.37.0_
+_Signature extended to support Allow and Code in Synapse v1.60.0_
+_Boolean and string return value types deprecated in Synapse v1.60.0_
 
 ```python
-async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
+async def check_event_for_spam(event: "synapse.module_api.EventBase") -> Union["synapse.module_api.ALLOW", "synapse.module_api.error.Codes", str, bool]
 ```
 
-Called when receiving an event from a client or via federation. The callback must return
-either:
-- an error message string, to indicate the event must be rejected because of spam and 
-  give a rejection reason to forward to clients;
-- the boolean `True`, to indicate that the event is spammy, but not provide further details; or
-- the booelan `False`, to indicate that the event is not considered spammy.
+Called when receiving an event from a client or via federation. The callback must return either:
+  - `synapse.module_api.ALLOW`, to allow the operation. Other callbacks
+    may still decide to reject it.
+  - `synapse.api.Codes` to reject the operation with an error code. In case
+    of doubt, `synapse.api.error.Codes.FORBIDDEN` is a good error code.
+  - (deprecated) a `str` to reject the operation and specify an error message. Note that clients
+    typically will not localize the error message to the user's preferred locale.
+  - (deprecated) on `False`, behave as `ALLOW`. Deprecated as confusing, as some
+    callbacks in expect `True` to allow and others `True` to reject.
+  - (deprecated) on `True`, behave as `synapse.api.error.Codes.FORBIDDEN`. Deprecated as confusing, as
+    some callbacks in expect `True` to allow and others `True` to reject.
 
 If multiple modules implement this callback, they will be considered in order. If a
-callback returns `False`, Synapse falls through to the next one. The value of the first
-callback that does not return `False` will be used. If this happens, Synapse will not call
-any of the subsequent implementations of this callback.
+callback returns `synapse.module_api.ALLOW`, Synapse falls through to the next one. The value of the
+first callback that does not return `synapse.module_api.ALLOW` will be used. If this happens, Synapse
+will not call any of the subsequent implementations of this callback.
 
 ### `user_may_join_room`
 
@@ -249,6 +256,24 @@ callback returns `False`, Synapse falls through to the next one. The value of th
 callback that does not return `False` will be used. If this happens, Synapse will not call
 any of the subsequent implementations of this callback.
 
+### `should_drop_federated_event`
+
+_First introduced in Synapse v1.60.0_
+
+```python
+async def should_drop_federated_event(event: "synapse.events.EventBase") -> bool
+```
+
+Called when checking whether a remote server can federate an event with us. **Returning
+`True` from this function will silently drop a federated event and split-brain our view
+of a room's DAG, and thus you shouldn't use this callback unless you know what you are
+doing.**
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `False`, Synapse falls through to the next one. The value of the first
+callback that does not return `False` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
 ## Example
 
 The example below is a module that implements the spam checker callback
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index ee98d193cb..4388a00df1 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -2521,16 +2521,6 @@ push:
 #        "events_default": 1
 
 
-# Uncomment to allow non-server-admin users to create groups on this server
-#
-#enable_group_creation: true
-
-# If enabled, non server admins can only create groups with local parts
-# starting with this prefix
-#
-#group_creation_prefix: "unofficial_"
-
-
 
 # User Directory configuration
 #
diff --git a/docs/structured_logging.md b/docs/structured_logging.md
index a6667e1a11..d43dc9eb6e 100644
--- a/docs/structured_logging.md
+++ b/docs/structured_logging.md
@@ -43,7 +43,7 @@ loggers:
 The above logging config will set Synapse as 'INFO' logging level by default,
 with the SQL layer at 'WARNING', and will log to a file, stored as JSON.
 
-It is also possible to figure Synapse to log to a remote endpoint by using the
+It is also possible to configure Synapse to log to a remote endpoint by using the
 `synapse.logging.RemoteHandler` class included with Synapse. It takes the
 following arguments:
 
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 92ca31b2f8..e7eadadb64 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -177,7 +177,36 @@ has queries that can be used to check a database for this problem in advance.
 
 </details>
 
+## SpamChecker API's `check_event_for_spam` has a new signature.
 
+The previous signature has been deprecated.
+
+Whereas `check_event_for_spam` callbacks used to return `Union[str, bool]`, they should now return `Union["synapse.module_api.Allow", "synapse.module_api.errors.Codes"]`.
+
+This is part of an ongoing refactoring of the SpamChecker API to make it less ambiguous and more powerful.
+
+If your module implements `check_event_for_spam` as follows:
+
+```python
+async def check_event_for_spam(event):
+    if ...:
+        # Event is spam
+        return True
+    # Event is not spam
+    return False
+```
+
+you should rewrite it as follows:
+
+```python
+async def check_event_for_spam(event):
+    if ...:
+        # Event is spam, mark it as forbidden (you may use some more precise error
+        # code if it is useful).
+        return synapse.module_api.errors.Codes.FORBIDDEN
+    # Event is not spam, mark it as `ALLOW`.
+    return synapse.module_api.ALLOW
+```
 
 # Upgrading to v1.59.0
 
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 0f5bda32b9..8724bf27e8 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -3145,25 +3145,6 @@ Example configuration:
 encryption_enabled_by_default_for_room_type: invite
 ```
 ---
-Config option: `enable_group_creation`
-
-Set to true to allow non-server-admin users to create groups on this server
-
-Example configuration:
-```yaml
-enable_group_creation: true
-```
----
-Config option: `group_creation_prefix`
-
-If enabled/present, non-server admins can only create groups with local parts
-starting with this prefix.
-
-Example configuration:
-```yaml
-group_creation_prefix: "unofficial_"
-```
----
 Config option: `user_directory`
 
 This setting defines options related to the user directory. 
diff --git a/docs/workers.md b/docs/workers.md
index 779069b817..78973a498c 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -1,6 +1,6 @@
 # Scaling synapse via workers
 
-For small instances it recommended to run Synapse in the default monolith mode.
+For small instances it is recommended to run Synapse in the default monolith mode.
 For larger instances where performance is a concern it can be helpful to split
 out functionality into multiple separate python processes. These processes are
 called 'workers', and are (eventually) intended to scale horizontally
@@ -193,7 +193,7 @@ information.
     ^/_matrix/federation/v1/user/devices/
     ^/_matrix/federation/v1/get_groups_publicised$
     ^/_matrix/key/v2/query
-    ^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/
+    ^/_matrix/federation/v1/hierarchy/
 
     # Inbound federation transaction request
     ^/_matrix/federation/v1/send/
@@ -205,9 +205,11 @@ information.
     ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
     ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
     ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
-    ^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$
+    ^/_matrix/client/v1/rooms/.*/hierarchy$
+    ^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
     ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
     ^/_matrix/client/(r0|v3|unstable)/account/3pid$
+    ^/_matrix/client/(r0|v3|unstable)/account/whoami$
     ^/_matrix/client/(r0|v3|unstable)/devices$
     ^/_matrix/client/versions$
     ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
@@ -237,9 +239,6 @@ information.
     ^/_matrix/client/(api/v1|r0|v3|unstable)/join/
     ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
 
-    # Device requests
-    ^/_matrix/client/(r0|v3|unstable)/sendToDevice/
-
     # Account data requests
     ^/_matrix/client/(r0|v3|unstable)/.*/tags
     ^/_matrix/client/(r0|v3|unstable)/.*/account_data
diff --git a/mypy.ini b/mypy.ini
index df2622df98..fe3e3f9b8e 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -41,16 +41,11 @@ exclude = (?x)
    |tests/events/test_utils.py
    |tests/federation/test_federation_catch_up.py
    |tests/federation/test_federation_sender.py
-   |tests/federation/test_federation_server.py
    |tests/federation/transport/test_knocking.py
-   |tests/federation/transport/test_server.py
    |tests/handlers/test_typing.py
    |tests/http/federation/test_matrix_federation_agent.py
    |tests/http/federation/test_srv_resolver.py
-   |tests/http/test_fedclient.py
    |tests/http/test_proxyagent.py
-   |tests/http/test_servlet.py
-   |tests/http/test_site.py
    |tests/logging/__init__.py
    |tests/logging/test_terse_json.py
    |tests/module_api/test_api.py
@@ -59,12 +54,9 @@ exclude = (?x)
    |tests/push/test_push_rule_evaluator.py
    |tests/rest/client/test_transactions.py
    |tests/rest/media/v1/test_media_storage.py
-   |tests/scripts/test_new_matrix_user.py
    |tests/server.py
    |tests/server_notices/test_resource_limits_server_notices.py
    |tests/state/test_v2.py
-   |tests/storage/test_base.py
-   |tests/storage/test_roommember.py
    |tests/test_metrics.py
    |tests/test_server.py
    |tests/test_state.py
diff --git a/poetry.lock b/poetry.lock
index 49a912a589..f64d70941e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -813,7 +813,7 @@ python-versions = ">=3.5"
 
 [[package]]
 name = "pyjwt"
-version = "2.3.0"
+version = "2.4.0"
 description = "JSON Web Token implementation in Python"
 category = "main"
 optional = false
@@ -2264,8 +2264,8 @@ pygments = [
     {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
 ]
 pyjwt = [
-    {file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"},
-    {file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"},
+    {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
+    {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
 ]
 pymacaroons = [
     {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"},
diff --git a/pyproject.toml b/pyproject.toml
index 5a5a2eaba7..9359d211f7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -54,7 +54,7 @@ skip_gitignore = true
 
 [tool.poetry]
 name = "matrix-synapse"
-version = "1.59.1"
+version = "1.60.0rc1"
 description = "Homeserver for the Matrix decentralised comms protocol"
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 license = "Apache-2.0"
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 190df6909a..3c472c576e 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -45,6 +45,8 @@ docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
 
 extra_test_args=()
 
+test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
+
 # If we're using workers, modify the docker files slightly.
 if [[ -n "$WORKERS" ]]; then
   # Build the workers docker image (from the base Synapse image).
@@ -65,6 +67,10 @@ if [[ -n "$WORKERS" ]]; then
 else
   export COMPLEMENT_BASE_IMAGE=complement-synapse
   COMPLEMENT_DOCKERFILE=Dockerfile
+
+  # We only test faster room joins on monoliths, because they are purposefully
+  # being developed without worker support to start with.
+  test_tags="$test_tags,faster_joins"
 fi
 
 # Build the Complement image from the Synapse image we just built.
@@ -73,4 +79,5 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
 # Run the tests!
 echo "Images built; running complement"
 cd "$COMPLEMENT_DIR"
-go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
+
+go test -v -tags $test_tags -count=1 "${extra_test_args[@]}" "$@" ./tests/...
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 330de21f6b..4a0552e7e5 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -31,11 +31,6 @@ MAX_ALIAS_LENGTH = 255
 # the maximum length for a user id is 255 characters
 MAX_USERID_LENGTH = 255
 
-# The maximum length for a group id is 255 characters
-MAX_GROUPID_LENGTH = 255
-MAX_GROUP_CATEGORYID_LENGTH = 255
-MAX_GROUP_ROLEID_LENGTH = 255
-
 
 class Membership:
 
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index cb3b7323d5..6650e826d5 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -17,6 +17,7 @@
 
 import logging
 import typing
+from enum import Enum
 from http import HTTPStatus
 from typing import Any, Dict, List, Optional, Union
 
@@ -30,7 +31,11 @@ if typing.TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
-class Codes:
+class Codes(str, Enum):
+    """
+    All known error codes, as an enum of strings.
+    """
+
     UNRECOGNIZED = "M_UNRECOGNIZED"
     UNAUTHORIZED = "M_UNAUTHORIZED"
     FORBIDDEN = "M_FORBIDDEN"
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 2a9480a5c1..0a6dd618f6 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -69,7 +69,6 @@ from synapse.rest.admin import register_servlets_for_media_repo
 from synapse.rest.client import (
     account_data,
     events,
-    groups,
     initial_sync,
     login,
     presence,
@@ -78,6 +77,7 @@ from synapse.rest.client import (
     read_marker,
     receipts,
     room,
+    room_batch,
     room_keys,
     sendtodevice,
     sync,
@@ -87,7 +87,7 @@ from synapse.rest.client import (
     voip,
 )
 from synapse.rest.client._base import client_patterns
-from synapse.rest.client.account import ThreepidRestServlet
+from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
 from synapse.rest.client.devices import DevicesRestServlet
 from synapse.rest.client.keys import (
     KeyChangesServlet,
@@ -289,6 +289,7 @@ class GenericWorkerServer(HomeServer):
                     RegistrationTokenValidityRestServlet(self).register(resource)
                     login.register_servlets(self, resource)
                     ThreepidRestServlet(self).register(resource)
+                    WhoamiRestServlet(self).register(resource)
                     DevicesRestServlet(self).register(resource)
 
                     # Read-only
@@ -308,6 +309,7 @@ class GenericWorkerServer(HomeServer):
                     room.register_servlets(self, resource, is_worker=True)
                     room.register_deprecated_servlets(self, resource)
                     initial_sync.register_servlets(self, resource)
+                    room_batch.register_servlets(self, resource)
                     room_keys.register_servlets(self, resource)
                     tags.register_servlets(self, resource)
                     account_data.register_servlets(self, resource)
@@ -320,9 +322,6 @@ class GenericWorkerServer(HomeServer):
 
                     presence.register_servlets(self, resource)
 
-                    if self.config.experimental.groups_enabled:
-                        groups.register_servlets(self, resource)
-
                     resources.update({CLIENT_API_PREFIX: resource})
 
                     resources.update(build_synapse_client_resource_tree(self))
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index a610fb785d..ed92c2e910 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -23,13 +23,7 @@ from netaddr import IPSet
 
 from synapse.api.constants import EventTypes
 from synapse.events import EventBase
-from synapse.types import (
-    DeviceListUpdates,
-    GroupID,
-    JsonDict,
-    UserID,
-    get_domain_from_id,
-)
+from synapse.types import DeviceListUpdates, JsonDict, UserID
 from synapse.util.caches.descriptors import _CacheContext, cached
 
 if TYPE_CHECKING:
@@ -55,7 +49,6 @@ class ApplicationServiceState(Enum):
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class Namespace:
     exclusive: bool
-    group_id: Optional[str]
     regex: Pattern[str]
 
 
@@ -141,30 +134,13 @@ class ApplicationService:
                 exclusive = regex_obj.get("exclusive")
                 if not isinstance(exclusive, bool):
                     raise ValueError("Expected bool for 'exclusive' in ns '%s'" % ns)
-                group_id = regex_obj.get("group_id")
-                if group_id:
-                    if not isinstance(group_id, str):
-                        raise ValueError(
-                            "Expected string for 'group_id' in ns '%s'" % ns
-                        )
-                    try:
-                        GroupID.from_string(group_id)
-                    except Exception:
-                        raise ValueError(
-                            "Expected valid group ID for 'group_id' in ns '%s'" % ns
-                        )
-
-                    if get_domain_from_id(group_id) != self.server_name:
-                        raise ValueError(
-                            "Expected 'group_id' to be this host in ns '%s'" % ns
-                        )
 
                 regex = regex_obj.get("regex")
                 if not isinstance(regex, str):
                     raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
 
                 # Pre-compile regex.
-                result[ns].append(Namespace(exclusive, group_id, re.compile(regex)))
+                result[ns].append(Namespace(exclusive, re.compile(regex)))
 
         return result
 
@@ -369,21 +345,6 @@ class ApplicationService:
             if namespace.exclusive
         ]
 
-    def get_groups_for_user(self, user_id: str) -> Iterable[str]:
-        """Get the groups that this user is associated with by this AS
-
-        Args:
-            user_id: The ID of the user.
-
-        Returns:
-            An iterable that yields group_id strings.
-        """
-        return (
-            namespace.group_id
-            for namespace in self.namespaces[ApplicationService.NS_USERS]
-            if namespace.group_id and namespace.regex.match(user_id)
-        )
-
     def is_rate_limited(self) -> bool:
         return self.rate_limited
 
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index d19f8dd996..df1c214462 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 import logging
 import urllib.parse
-from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
 
 from prometheus_client import Counter
 from typing_extensions import TypeGuard
@@ -155,6 +155,9 @@ class ApplicationServiceApi(SimpleHttpClient):
         if service.url is None:
             return []
 
+        # This is required by the configuration.
+        assert service.hs_token is not None
+
         uri = "%s%s/thirdparty/%s/%s" % (
             service.url,
             APP_SERVICE_PREFIX,
@@ -162,7 +165,11 @@ class ApplicationServiceApi(SimpleHttpClient):
             urllib.parse.quote(protocol),
         )
         try:
-            response = await self.get_json(uri, fields)
+            args: Mapping[Any, Any] = {
+                **fields,
+                b"access_token": service.hs_token,
+            }
+            response = await self.get_json(uri, args=args)
             if not isinstance(response, list):
                 logger.warning(
                     "query_3pe to %s returned an invalid response %r", uri, response
@@ -190,13 +197,15 @@ class ApplicationServiceApi(SimpleHttpClient):
             return {}
 
         async def _get() -> Optional[JsonDict]:
+            # This is required by the configuration.
+            assert service.hs_token is not None
             uri = "%s%s/thirdparty/protocol/%s" % (
                 service.url,
                 APP_SERVICE_PREFIX,
                 urllib.parse.quote(protocol),
             )
             try:
-                info = await self.get_json(uri)
+                info = await self.get_json(uri, {"access_token": service.hs_token})
 
                 if not _is_valid_3pe_metadata(info):
                     logger.warning(
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 3b49e60716..de5e5216c2 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -384,6 +384,11 @@ class _TransactionController:
             device_list_summary: The device list summary to include in the transaction.
         """
         try:
+            service_is_up = await self._is_service_up(service)
+            # Don't create empty txns when in recovery mode (ephemeral events are dropped)
+            if not service_is_up and not events:
+                return
+
             txn = await self.store.create_appservice_txn(
                 service=service,
                 events=events,
@@ -393,7 +398,6 @@ class _TransactionController:
                 unused_fallback_keys=unused_fallback_keys or {},
                 device_list_summary=device_list_summary or DeviceListUpdates(),
             )
-            service_is_up = await self._is_service_up(service)
             if service_is_up:
                 sent = await txn.send(self.as_api)
                 if sent:
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 71d6655fda..01ea2b4dab 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -32,7 +32,6 @@ from synapse.config import (
     emailconfig,
     experimental,
     federation,
-    groups,
     jwt,
     key,
     logger,
@@ -107,7 +106,6 @@ class RootConfig:
     push: push.PushConfig
     spamchecker: spam_checker.SpamCheckerConfig
     room: room.RoomConfig
-    groups: groups.GroupsConfig
     userdirectory: user_directory.UserDirectoryConfig
     consent: consent.ConsentConfig
     stats: stats.StatsConfig
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index b20d949689..f2dfd49b07 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -73,9 +73,6 @@ class ExperimentalConfig(Config):
         # MSC3720 (Account status endpoint)
         self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
 
-        # The deprecated groups feature.
-        self.groups_enabled: bool = experimental.get("groups_enabled", False)
-
         # MSC2654: Unread counts
         self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
 
@@ -84,3 +81,6 @@ class ExperimentalConfig(Config):
 
         # MSC3786 (Add a default push rule to ignore m.room.server_acl events)
         self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
+
+        # MSC3772: A push rule for mutual relations.
+        self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
deleted file mode 100644
index c9b9c6daad..0000000000
--- a/synapse/config/groups.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Any
-
-from synapse.types import JsonDict
-
-from ._base import Config
-
-
-class GroupsConfig(Config):
-    section = "groups"
-
-    def read_config(self, config: JsonDict, **kwargs: Any) -> None:
-        self.enable_group_creation = config.get("enable_group_creation", False)
-        self.group_creation_prefix = config.get("group_creation_prefix", "")
-
-    def generate_config_section(self, **kwargs: Any) -> str:
-        return """\
-        # Uncomment to allow non-server-admin users to create groups on this server
-        #
-        #enable_group_creation: true
-
-        # If enabled, non server admins can only create groups with local parts
-        # starting with this prefix
-        #
-        #group_creation_prefix: "unofficial_"
-        """
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index a4ec706908..4d2b298a70 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -25,7 +25,6 @@ from .database import DatabaseConfig
 from .emailconfig import EmailConfig
 from .experimental import ExperimentalConfig
 from .federation import FederationConfig
-from .groups import GroupsConfig
 from .jwt import JWTConfig
 from .key import KeyConfig
 from .logger import LoggingConfig
@@ -89,7 +88,6 @@ class HomeServerConfig(RootConfig):
         PushConfig,
         SpamCheckerConfig,
         RoomConfig,
-        GroupsConfig,
         UserDirectoryConfig,
         ConsentConfig,
         StatsConfig,
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index f30207376a..7984874e21 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -27,9 +27,10 @@ from typing import (
     Union,
 )
 
+from synapse.api.errors import Codes
 from synapse.rest.media.v1._base import FileInfo
 from synapse.rest.media.v1.media_storage import ReadableFileWrapper
-from synapse.spam_checker_api import RegistrationBehaviour
+from synapse.spam_checker_api import Allow, Decision, RegistrationBehaviour
 from synapse.types import RoomAlias, UserProfile
 from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
 from synapse.util.metrics import Measure
@@ -40,8 +41,22 @@ if TYPE_CHECKING:
 
 logger = logging.getLogger(__name__)
 
+
 CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
     ["synapse.events.EventBase"],
+    Awaitable[
+        Union[
+            Allow,
+            Codes,
+            # Deprecated
+            bool,
+            # Deprecated
+            str,
+        ]
+    ],
+]
+SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
+    ["synapse.events.EventBase"],
     Awaitable[Union[bool, str]],
 ]
 USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
@@ -168,6 +183,9 @@ class SpamChecker:
         self.clock = hs.get_clock()
 
         self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
+        self._should_drop_federated_event_callbacks: List[
+            SHOULD_DROP_FEDERATED_EVENT_CALLBACK
+        ] = []
         self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
         self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
         self._user_may_send_3pid_invite_callbacks: List[
@@ -191,6 +209,9 @@ class SpamChecker:
     def register_callbacks(
         self,
         check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
+        should_drop_federated_event: Optional[
+            SHOULD_DROP_FEDERATED_EVENT_CALLBACK
+        ] = None,
         user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
         user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
         user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
@@ -209,6 +230,11 @@ class SpamChecker:
         if check_event_for_spam is not None:
             self._check_event_for_spam_callbacks.append(check_event_for_spam)
 
+        if should_drop_federated_event is not None:
+            self._should_drop_federated_event_callbacks.append(
+                should_drop_federated_event
+            )
+
         if user_may_join_room is not None:
             self._user_may_join_room_callbacks.append(user_may_join_room)
 
@@ -244,7 +270,7 @@ class SpamChecker:
 
     async def check_event_for_spam(
         self, event: "synapse.events.EventBase"
-    ) -> Union[bool, str]:
+    ) -> Union[Decision, str]:
         """Checks if a given event is considered "spammy" by this server.
 
         If the server considers an event spammy, then it will be rejected if
@@ -255,13 +281,56 @@ class SpamChecker:
             event: the event to be checked
 
         Returns:
-            True or a string if the event is spammy. If a string is returned it
-            will be used as the error message returned to the user.
+            - on `ALLOW`, the event is considered good (non-spammy) and should
+                be let through. Other spamcheck filters may still reject it.
+            - on `Code`, the event is considered spammy and is rejected with a specific
+                error message/code.
+            - on `str`, the event is considered spammy and the string is used as error
+                message. This usage is generally discouraged as it doesn't support
+                internationalization.
         """
         for callback in self._check_event_for_spam_callbacks:
             with Measure(
                 self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
             ):
+                res: Union[Decision, str, bool] = await delay_cancellation(
+                    callback(event)
+                )
+                if res is False or res is Allow.ALLOW:
+                    # This spam-checker accepts the event.
+                    # Other spam-checkers may reject it, though.
+                    continue
+                elif res is True:
+                    # This spam-checker rejects the event with deprecated
+                    # return value `True`
+                    return Codes.FORBIDDEN
+                else:
+                    # This spam-checker rejects the event either with a `str`
+                    # or with a `Codes`. In either case, we stop here.
+                    return res
+
+        # No spam-checker has rejected the event, let it pass.
+        return Allow.ALLOW
+
+    async def should_drop_federated_event(
+        self, event: "synapse.events.EventBase"
+    ) -> Union[bool, str]:
+        """Checks if a given federated event is considered "spammy" by this
+        server.
+
+        If the server considers an event spammy, it will be silently dropped,
+        and in doing so will split-brain our view of the room's DAG.
+
+        Args:
+            event: the event to be checked
+
+        Returns:
+            True if the event should be silently dropped
+        """
+        for callback in self._should_drop_federated_event_callbacks:
+            with Measure(
+                self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
+            ):
                 res: Union[bool, str] = await delay_cancellation(callback(event))
             if res:
                 return res
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 41ac49fdc8..1e866b19d8 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -15,6 +15,7 @@
 import logging
 from typing import TYPE_CHECKING
 
+import synapse
 from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
 from synapse.api.room_versions import EventFormatVersions, RoomVersion
@@ -98,9 +99,9 @@ class FederationBase:
                 )
             return redacted_event
 
-        result = await self.spam_checker.check_event_for_spam(pdu)
+        spam_check = await self.spam_checker.check_event_for_spam(pdu)
 
-        if result:
+        if spam_check is not synapse.spam_checker_api.Allow.ALLOW:
             logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
             # we redact (to save disk space) as well as soft-failing (to stop
             # using the event in prev_events).
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 884b5d60b4..b8232e5257 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -110,6 +110,7 @@ class FederationServer(FederationBase):
 
         self.handler = hs.get_federation_handler()
         self.storage = hs.get_storage()
+        self._spam_checker = hs.get_spam_checker()
         self._federation_event_handler = hs.get_federation_event_handler()
         self.state = hs.get_state_handler()
         self._event_auth_handler = hs.get_event_auth_handler()
@@ -1019,6 +1020,12 @@ class FederationServer(FederationBase):
         except SynapseError as e:
             raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
 
+        if await self._spam_checker.should_drop_federated_event(pdu):
+            logger.warning(
+                "Unstaged federated event contains spam, dropping %s", pdu.event_id
+            )
+            return
+
         # Add the event to our staging area
         await self.store.insert_received_event_to_staging(origin, pdu)
 
@@ -1032,6 +1039,41 @@ class FederationServer(FederationBase):
                 pdu.room_id, room_version, lock, origin, pdu
             )
 
+    async def _get_next_nonspam_staged_event_for_room(
+        self, room_id: str, room_version: RoomVersion
+    ) -> Optional[Tuple[str, EventBase]]:
+        """Fetch the first non-spam event from staging queue.
+
+        Args:
+            room_id: the room to fetch the first non-spam event in.
+            room_version: the version of the room.
+
+        Returns:
+            The first non-spam event in that room.
+        """
+
+        while True:
+            # We need to do this check outside the lock to avoid a race between
+            # a new event being inserted by another instance and it attempting
+            # to acquire the lock.
+            next = await self.store.get_next_staged_event_for_room(
+                room_id, room_version
+            )
+
+            if next is None:
+                return None
+
+            origin, event = next
+
+            if await self._spam_checker.should_drop_federated_event(event):
+                logger.warning(
+                    "Staged federated event contains spam, dropping %s",
+                    event.event_id,
+                )
+                continue
+
+            return next
+
     @wrap_as_background_process("_process_incoming_pdus_in_room_inner")
     async def _process_incoming_pdus_in_room_inner(
         self,
@@ -1109,12 +1151,10 @@ class FederationServer(FederationBase):
                         (self._clock.time_msec() - received_ts) / 1000
                     )
 
-            # We need to do this check outside the lock to avoid a race between
-            # a new event being inserted by another instance and it attempting
-            # to acquire the lock.
-            next = await self.store.get_next_staged_event_for_room(
+            next = await self._get_next_nonspam_staged_event_for_room(
                 room_id, room_version
             )
+
             if not next:
                 break
 
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index d80f0ac5e8..8983b5a53d 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -223,7 +223,7 @@ class PerDestinationQueue:
         """Marks that the destination has new data to send, without starting a
         new transaction.
 
-        If a transaction loop is already in progress then a new transcation will
+        If a transaction loop is already in progress then a new transaction will
         be attempted when the current one finishes.
         """
 
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 9ce06dfa28..25df1905c6 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -49,11 +49,6 @@ from synapse.types import JsonDict
 
 logger = logging.getLogger(__name__)
 
-# Send join responses can be huge, so we set a separate limit here. The response
-# is parsed in a streaming manner, which helps alleviate the issue of memory
-# usage a bit.
-MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
-
 
 class TransportLayerClient:
     """Sends federation HTTP requests to other servers"""
@@ -349,7 +344,6 @@ class TransportLayerClient:
             path=path,
             data=content,
             parser=SendJoinParser(room_version, v1_api=True),
-            max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
         )
 
     async def send_join_v2(
@@ -372,7 +366,6 @@ class TransportLayerClient:
             args=query_params,
             data=content,
             parser=SendJoinParser(room_version, v1_api=False),
-            max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
         )
 
     async def send_leave_v1(
@@ -1360,6 +1353,11 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
 
     CONTENT_TYPE = "application/json"
 
+    # /send_join responses can be huge, so we override the size limit here. The response
+    # is parsed in a streaming manner, which helps alleviate the issue of memory
+    # usage a bit.
+    MAX_RESPONSE_SIZE = 500 * 1024 * 1024
+
     def __init__(self, room_version: RoomVersion, v1_api: bool):
         self._response = SendJoinResponse([], [], event_dict={})
         self._room_version = room_version
@@ -1427,6 +1425,9 @@ class _StateParser(ByteParser[StateRequestResponse]):
 
     CONTENT_TYPE = "application/json"
 
+    # As with /send_join, /state responses can be huge.
+    MAX_RESPONSE_SIZE = 500 * 1024 * 1024
+
     def __init__(self, room_version: RoomVersion):
         self._response = StateRequestResponse([], [])
         self._room_version = room_version
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 71b2f90eb9..50623cd385 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -27,10 +27,6 @@ from synapse.federation.transport.server.federation import (
     FederationAccountStatusServlet,
     FederationTimestampLookupServlet,
 )
-from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
-from synapse.federation.transport.server.groups_server import (
-    GROUP_SERVER_SERVLET_CLASSES,
-)
 from synapse.http.server import HttpServer, JsonResource
 from synapse.http.servlet import (
     parse_boolean_from_args,
@@ -199,38 +195,6 @@ class PublicRoomList(BaseFederationServlet):
         return 200, data
 
 
-class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
-    """A group or user's server renews their attestation"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
-
-    def __init__(
-        self,
-        hs: "HomeServer",
-        authenticator: Authenticator,
-        ratelimiter: FederationRateLimiter,
-        server_name: str,
-    ):
-        super().__init__(hs, authenticator, ratelimiter, server_name)
-        self.handler = hs.get_groups_attestation_renewer()
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        # We don't need to check auth here as we check the attestation signatures
-
-        new_content = await self.handler.on_renew_attestation(
-            group_id, user_id, content
-        )
-
-        return 200, new_content
-
-
 class OpenIdUserInfo(BaseFederationServlet):
     """
     Exchange a bearer token for information about a user.
@@ -292,16 +256,9 @@ class OpenIdUserInfo(BaseFederationServlet):
 SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
     "federation": FEDERATION_SERVLET_CLASSES,
     "room_list": (PublicRoomList,),
-    "group_server": GROUP_SERVER_SERVLET_CLASSES,
-    "group_local": GROUP_LOCAL_SERVLET_CLASSES,
-    "group_attestation": (FederationGroupsRenewAttestaionServlet,),
     "openid": (OpenIdUserInfo,),
 }
 
-DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
-
-GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
-
 
 def register_servlets(
     hs: "HomeServer",
@@ -324,10 +281,7 @@ def register_servlets(
             Defaults to ``DEFAULT_SERVLET_GROUPS``.
     """
     if not servlet_groups:
-        servlet_groups = DEFAULT_SERVLET_GROUPS
-        # Only allow the groups servlets if the deprecated groups feature is enabled.
-        if hs.config.experimental.groups_enabled:
-            servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
+        servlet_groups = SERVLET_GROUPS.keys()
 
     for servlet_group in servlet_groups:
         # Skip unknown servlet groups.
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 6fbc7b5f15..57e8fb21b0 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -650,10 +650,6 @@ class FederationRoomHierarchyServlet(BaseFederationServlet):
         )
 
 
-class FederationRoomHierarchyUnstableServlet(FederationRoomHierarchyServlet):
-    PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
-
-
 class RoomComplexityServlet(BaseFederationServlet):
     """
     Indicates to other servers how complex (and therefore likely
@@ -752,7 +748,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
     FederationVersionServlet,
     RoomComplexityServlet,
     FederationRoomHierarchyServlet,
-    FederationRoomHierarchyUnstableServlet,
     FederationV1SendKnockServlet,
     FederationMakeKnockServlet,
     FederationAccountStatusServlet,
diff --git a/synapse/federation/transport/server/groups_local.py b/synapse/federation/transport/server/groups_local.py
deleted file mode 100644
index 496472e1dc..0000000000
--- a/synapse/federation/transport/server/groups_local.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#  Copyright 2021 The Matrix.org Foundation C.I.C.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-from typing import TYPE_CHECKING, Dict, List, Tuple, Type
-
-from synapse.api.errors import SynapseError
-from synapse.federation.transport.server._base import (
-    Authenticator,
-    BaseFederationServlet,
-)
-from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.types import JsonDict, get_domain_from_id
-from synapse.util.ratelimitutils import FederationRateLimiter
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-
-class BaseGroupsLocalServlet(BaseFederationServlet):
-    """Abstract base class for federation servlet classes which provides a groups local handler.
-
-    See BaseFederationServlet for more information.
-    """
-
-    def __init__(
-        self,
-        hs: "HomeServer",
-        authenticator: Authenticator,
-        ratelimiter: FederationRateLimiter,
-        server_name: str,
-    ):
-        super().__init__(hs, authenticator, ratelimiter, server_name)
-        self.handler = hs.get_groups_local_handler()
-
-
-class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
-    """A group server has invited a local user"""
-
-    PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        if get_domain_from_id(group_id) != origin:
-            raise SynapseError(403, "group_id doesn't match origin")
-
-        assert isinstance(
-            self.handler, GroupsLocalHandler
-        ), "Workers cannot handle group invites."
-
-        new_content = await self.handler.on_invite(group_id, user_id, content)
-
-        return 200, new_content
-
-
-class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
-    """A group server has removed a local user"""
-
-    PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, None]:
-        if get_domain_from_id(group_id) != origin:
-            raise SynapseError(403, "user_id doesn't match origin")
-
-        assert isinstance(
-            self.handler, GroupsLocalHandler
-        ), "Workers cannot handle group removals."
-
-        await self.handler.user_removed_from_group(group_id, user_id, content)
-
-        return 200, None
-
-
-class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
-    """Get roles in a group"""
-
-    PATH = "/get_groups_publicised"
-
-    async def on_POST(
-        self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
-    ) -> Tuple[int, JsonDict]:
-        resp = await self.handler.bulk_get_publicised_groups(
-            content["user_ids"], proxy=False
-        )
-
-        return 200, resp
-
-
-GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
-    FederationGroupsLocalInviteServlet,
-    FederationGroupsRemoveLocalUserServlet,
-    FederationGroupsBulkPublicisedServlet,
-)
diff --git a/synapse/federation/transport/server/groups_server.py b/synapse/federation/transport/server/groups_server.py
deleted file mode 100644
index 851b50152e..0000000000
--- a/synapse/federation/transport/server/groups_server.py
+++ /dev/null
@@ -1,755 +0,0 @@
-#  Copyright 2021 The Matrix.org Foundation C.I.C.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-from typing import TYPE_CHECKING, Dict, List, Tuple, Type
-
-from typing_extensions import Literal
-
-from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
-from synapse.api.errors import Codes, SynapseError
-from synapse.federation.transport.server._base import (
-    Authenticator,
-    BaseFederationServlet,
-)
-from synapse.http.servlet import parse_string_from_args
-from synapse.types import JsonDict, get_domain_from_id
-from synapse.util.ratelimitutils import FederationRateLimiter
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-
-class BaseGroupsServerServlet(BaseFederationServlet):
-    """Abstract base class for federation servlet classes which provides a groups server handler.
-
-    See BaseFederationServlet for more information.
-    """
-
-    def __init__(
-        self,
-        hs: "HomeServer",
-        authenticator: Authenticator,
-        ratelimiter: FederationRateLimiter,
-        server_name: str,
-    ):
-        super().__init__(hs, authenticator, ratelimiter, server_name)
-        self.handler = hs.get_groups_server_handler()
-
-
-class FederationGroupsProfileServlet(BaseGroupsServerServlet):
-    """Get/set the basic profile of a group on behalf of a user"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/profile"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.get_group_profile(group_id, requester_user_id)
-
-        return 200, new_content
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.update_group_profile(
-            group_id, requester_user_id, content
-        )
-
-        return 200, new_content
-
-
-class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
-    PATH = "/groups/(?P<group_id>[^/]*)/summary"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.get_group_summary(group_id, requester_user_id)
-
-        return 200, new_content
-
-
-class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
-    """Get the rooms in a group on behalf of a user"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/rooms"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
-
-        return 200, new_content
-
-
-class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
-    """Add/remove room from group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        room_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.add_room_to_group(
-            group_id, requester_user_id, room_id, content
-        )
-
-        return 200, new_content
-
-    async def on_DELETE(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        room_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.remove_room_from_group(
-            group_id, requester_user_id, room_id
-        )
-
-        return 200, new_content
-
-
-class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
-    """Update room config in group"""
-
-    PATH = (
-        "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
-        "/config/(?P<config_key>[^/]*)"
-    )
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        room_id: str,
-        config_key: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        result = await self.handler.update_room_in_group(
-            group_id, requester_user_id, room_id, config_key, content
-        )
-
-        return 200, result
-
-
-class FederationGroupsUsersServlet(BaseGroupsServerServlet):
-    """Get the users in a group on behalf of a user"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/users"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
-
-        return 200, new_content
-
-
-class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
-    """Get the users that have been invited to a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.get_invited_users_in_group(
-            group_id, requester_user_id
-        )
-
-        return 200, new_content
-
-
-class FederationGroupsInviteServlet(BaseGroupsServerServlet):
-    """Ask a group server to invite someone to the group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.invite_to_group(
-            group_id, user_id, requester_user_id, content
-        )
-
-        return 200, new_content
-
-
-class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
-    """Accept an invitation from the group server"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        if get_domain_from_id(user_id) != origin:
-            raise SynapseError(403, "user_id doesn't match origin")
-
-        new_content = await self.handler.accept_invite(group_id, user_id, content)
-
-        return 200, new_content
-
-
-class FederationGroupsJoinServlet(BaseGroupsServerServlet):
-    """Attempt to join a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        if get_domain_from_id(user_id) != origin:
-            raise SynapseError(403, "user_id doesn't match origin")
-
-        new_content = await self.handler.join_group(group_id, user_id, content)
-
-        return 200, new_content
-
-
-class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
-    """Leave or kick a user from the group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.remove_user_from_group(
-            group_id, user_id, requester_user_id, content
-        )
-
-        return 200, new_content
-
-
-class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
-    """Add/remove a room from the group summary, with optional category.
-
-    Matches both:
-        - /groups/:group/summary/rooms/:room_id
-        - /groups/:group/summary/categories/:category/rooms/:room_id
-    """
-
-    PATH = (
-        "/groups/(?P<group_id>[^/]*)/summary"
-        "(/categories/(?P<category_id>[^/]+))?"
-        "/rooms/(?P<room_id>[^/]*)"
-    )
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        category_id: str,
-        room_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if category_id == "":
-            raise SynapseError(
-                400, "category_id cannot be empty string", Codes.INVALID_PARAM
-            )
-
-        if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
-            raise SynapseError(
-                400,
-                "category_id may not be longer than %s characters"
-                % (MAX_GROUP_CATEGORYID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        resp = await self.handler.update_group_summary_room(
-            group_id,
-            requester_user_id,
-            room_id=room_id,
-            category_id=category_id,
-            content=content,
-        )
-
-        return 200, resp
-
-    async def on_DELETE(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        category_id: str,
-        room_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if category_id == "":
-            raise SynapseError(400, "category_id cannot be empty string")
-
-        resp = await self.handler.delete_group_summary_room(
-            group_id, requester_user_id, room_id=room_id, category_id=category_id
-        )
-
-        return 200, resp
-
-
-class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
-    """Get all categories for a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        resp = await self.handler.get_group_categories(group_id, requester_user_id)
-
-        return 200, resp
-
-
-class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
-    """Add/remove/get a category in a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        category_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        resp = await self.handler.get_group_category(
-            group_id, requester_user_id, category_id
-        )
-
-        return 200, resp
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        category_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if category_id == "":
-            raise SynapseError(400, "category_id cannot be empty string")
-
-        if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
-            raise SynapseError(
-                400,
-                "category_id may not be longer than %s characters"
-                % (MAX_GROUP_CATEGORYID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        resp = await self.handler.upsert_group_category(
-            group_id, requester_user_id, category_id, content
-        )
-
-        return 200, resp
-
-    async def on_DELETE(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        category_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if category_id == "":
-            raise SynapseError(400, "category_id cannot be empty string")
-
-        resp = await self.handler.delete_group_category(
-            group_id, requester_user_id, category_id
-        )
-
-        return 200, resp
-
-
-class FederationGroupsRolesServlet(BaseGroupsServerServlet):
-    """Get roles in a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        resp = await self.handler.get_group_roles(group_id, requester_user_id)
-
-        return 200, resp
-
-
-class FederationGroupsRoleServlet(BaseGroupsServerServlet):
-    """Add/remove/get a role in a group"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
-
-    async def on_GET(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        role_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
-
-        return 200, resp
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        role_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if role_id == "":
-            raise SynapseError(
-                400, "role_id cannot be empty string", Codes.INVALID_PARAM
-            )
-
-        if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
-            raise SynapseError(
-                400,
-                "role_id may not be longer than %s characters"
-                % (MAX_GROUP_ROLEID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        resp = await self.handler.update_group_role(
-            group_id, requester_user_id, role_id, content
-        )
-
-        return 200, resp
-
-    async def on_DELETE(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        role_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if role_id == "":
-            raise SynapseError(400, "role_id cannot be empty string")
-
-        resp = await self.handler.delete_group_role(
-            group_id, requester_user_id, role_id
-        )
-
-        return 200, resp
-
-
-class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
-    """Add/remove a user from the group summary, with optional role.
-
-    Matches both:
-        - /groups/:group/summary/users/:user_id
-        - /groups/:group/summary/roles/:role/users/:user_id
-    """
-
-    PATH = (
-        "/groups/(?P<group_id>[^/]*)/summary"
-        "(/roles/(?P<role_id>[^/]+))?"
-        "/users/(?P<user_id>[^/]*)"
-    )
-
-    async def on_POST(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        role_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if role_id == "":
-            raise SynapseError(400, "role_id cannot be empty string")
-
-        if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
-            raise SynapseError(
-                400,
-                "role_id may not be longer than %s characters"
-                % (MAX_GROUP_ROLEID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        resp = await self.handler.update_group_summary_user(
-            group_id,
-            requester_user_id,
-            user_id=user_id,
-            role_id=role_id,
-            content=content,
-        )
-
-        return 200, resp
-
-    async def on_DELETE(
-        self,
-        origin: str,
-        content: Literal[None],
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-        role_id: str,
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        if role_id == "":
-            raise SynapseError(400, "role_id cannot be empty string")
-
-        resp = await self.handler.delete_group_summary_user(
-            group_id, requester_user_id, user_id=user_id, role_id=role_id
-        )
-
-        return 200, resp
-
-
-class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
-    """Sets whether a group is joinable without an invite or knock"""
-
-    PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
-
-    async def on_PUT(
-        self,
-        origin: str,
-        content: JsonDict,
-        query: Dict[bytes, List[bytes]],
-        group_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester_user_id = parse_string_from_args(
-            query, "requester_user_id", required=True
-        )
-        if get_domain_from_id(requester_user_id) != origin:
-            raise SynapseError(403, "requester_user_id doesn't match origin")
-
-        new_content = await self.handler.set_group_join_policy(
-            group_id, requester_user_id, content
-        )
-
-        return 200, new_content
-
-
-GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
-    FederationGroupsProfileServlet,
-    FederationGroupsSummaryServlet,
-    FederationGroupsRoomsServlet,
-    FederationGroupsUsersServlet,
-    FederationGroupsInvitedUsersServlet,
-    FederationGroupsInviteServlet,
-    FederationGroupsAcceptInviteServlet,
-    FederationGroupsJoinServlet,
-    FederationGroupsRemoveUserServlet,
-    FederationGroupsSummaryRoomsServlet,
-    FederationGroupsCategoriesServlet,
-    FederationGroupsCategoryServlet,
-    FederationGroupsRolesServlet,
-    FederationGroupsRoleServlet,
-    FederationGroupsSummaryUsersServlet,
-    FederationGroupsAddRoomsServlet,
-    FederationGroupsAddRoomsConfigServlet,
-    FederationGroupsSettingJoinPolicyServlet,
-)
diff --git a/synapse/groups/__init__.py b/synapse/groups/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/synapse/groups/__init__.py
+++ /dev/null
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
deleted file mode 100644
index ed26d6a6ce..0000000000
--- a/synapse/groups/attestations.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright 2017 Vector Creations Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Attestations ensure that users and groups can't lie about their memberships.
-
-When a user joins a group the HS and GS swap attestations, which allow them
-both to independently prove to third parties their membership.These
-attestations have a validity period so need to be periodically renewed.
-
-If a user leaves (or gets kicked out of) a group, either side can still use
-their attestation to "prove" their membership, until the attestation expires.
-Therefore attestations shouldn't be relied on to prove membership in important
-cases, but can for less important situations, e.g. showing a users membership
-of groups on their profile, showing flairs, etc.
-
-An attestation is a signed blob of json that looks like:
-
-    {
-        "user_id": "@foo:a.example.com",
-        "group_id": "+bar:b.example.com",
-        "valid_until_ms": 1507994728530,
-        "signatures":{"matrix.org":{"ed25519:auto":"..."}}
-    }
-"""
-
-import logging
-import random
-from typing import TYPE_CHECKING, Optional, Tuple
-
-from signedjson.sign import sign_json
-
-from twisted.internet.defer import Deferred
-
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import JsonDict, get_domain_from_id
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# Default validity duration for new attestations we create
-DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000
-
-# We add some jitter to the validity duration of attestations so that if we
-# add lots of users at once we don't need to renew them all at once.
-# The jitter is a multiplier picked randomly between the first and second number
-DEFAULT_ATTESTATION_JITTER = (0.9, 1.3)
-
-# Start trying to update our attestations when they come this close to expiring
-UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000
-
-
-class GroupAttestationSigning:
-    """Creates and verifies group attestations."""
-
-    def __init__(self, hs: "HomeServer"):
-        self.keyring = hs.get_keyring()
-        self.clock = hs.get_clock()
-        self.server_name = hs.hostname
-        self.signing_key = hs.signing_key
-
-    async def verify_attestation(
-        self,
-        attestation: JsonDict,
-        group_id: str,
-        user_id: str,
-        server_name: Optional[str] = None,
-    ) -> None:
-        """Verifies that the given attestation matches the given parameters.
-
-        An optional server_name can be supplied to explicitly set which server's
-        signature is expected. Otherwise assumes that either the group_id or user_id
-        is local and uses the other's server as the one to check.
-        """
-
-        if not server_name:
-            if get_domain_from_id(group_id) == self.server_name:
-                server_name = get_domain_from_id(user_id)
-            elif get_domain_from_id(user_id) == self.server_name:
-                server_name = get_domain_from_id(group_id)
-            else:
-                raise Exception("Expected either group_id or user_id to be local")
-
-        if user_id != attestation["user_id"]:
-            raise SynapseError(400, "Attestation has incorrect user_id")
-
-        if group_id != attestation["group_id"]:
-            raise SynapseError(400, "Attestation has incorrect group_id")
-        valid_until_ms = attestation["valid_until_ms"]
-
-        # TODO: We also want to check that *new* attestations that people give
-        # us to store are valid for at least a little while.
-        now = self.clock.time_msec()
-        if valid_until_ms < now:
-            raise SynapseError(400, "Attestation expired")
-
-        assert server_name is not None
-        await self.keyring.verify_json_for_server(
-            server_name,
-            attestation,
-            now,
-        )
-
-    def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
-        """Create an attestation for the group_id and user_id with default
-        validity length.
-        """
-        validity_period = DEFAULT_ATTESTATION_LENGTH_MS * random.uniform(
-            *DEFAULT_ATTESTATION_JITTER
-        )
-        valid_until_ms = int(self.clock.time_msec() + validity_period)
-
-        return sign_json(
-            {
-                "group_id": group_id,
-                "user_id": user_id,
-                "valid_until_ms": valid_until_ms,
-            },
-            self.server_name,
-            self.signing_key,
-        )
-
-
-class GroupAttestionRenewer:
-    """Responsible for sending and receiving attestation updates."""
-
-    def __init__(self, hs: "HomeServer"):
-        self.clock = hs.get_clock()
-        self.store = hs.get_datastores().main
-        self.assestations = hs.get_groups_attestation_signing()
-        self.transport_client = hs.get_federation_transport_client()
-        self.is_mine_id = hs.is_mine_id
-        self.attestations = hs.get_groups_attestation_signing()
-
-        if not hs.config.worker.worker_app:
-            self._renew_attestations_loop = self.clock.looping_call(
-                self._start_renew_attestations, 30 * 60 * 1000
-            )
-
-    async def on_renew_attestation(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """When a remote updates an attestation"""
-        attestation = content["attestation"]
-
-        if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
-            raise SynapseError(400, "Neither user not group are on this server")
-
-        await self.attestations.verify_attestation(
-            attestation, user_id=user_id, group_id=group_id
-        )
-
-        await self.store.update_remote_attestion(group_id, user_id, attestation)
-
-        return {}
-
-    def _start_renew_attestations(self) -> "Deferred[None]":
-        return run_as_background_process("renew_attestations", self._renew_attestations)
-
-    async def _renew_attestations(self) -> None:
-        """Called periodically to check if we need to update any of our attestations"""
-
-        now = self.clock.time_msec()
-
-        rows = await self.store.get_attestations_need_renewals(
-            now + UPDATE_ATTESTATION_TIME_MS
-        )
-
-        async def _renew_attestation(group_user: Tuple[str, str]) -> None:
-            group_id, user_id = group_user
-            try:
-                if not self.is_mine_id(group_id):
-                    destination = get_domain_from_id(group_id)
-                elif not self.is_mine_id(user_id):
-                    destination = get_domain_from_id(user_id)
-                else:
-                    logger.warning(
-                        "Incorrectly trying to do attestations for user: %r in %r",
-                        user_id,
-                        group_id,
-                    )
-                    await self.store.remove_attestation_renewal(group_id, user_id)
-                    return
-
-                attestation = self.attestations.create_attestation(group_id, user_id)
-
-                await self.transport_client.renew_group_attestation(
-                    destination, group_id, user_id, content={"attestation": attestation}
-                )
-
-                await self.store.update_attestation_renewal(
-                    group_id, user_id, attestation
-                )
-            except (RequestSendFailed, HttpResponseException) as e:
-                logger.warning(
-                    "Failed to renew attestation of %r in %r: %s", user_id, group_id, e
-                )
-            except Exception:
-                logger.exception(
-                    "Error renewing attestation of %r in %r", user_id, group_id
-                )
-
-        for row in rows:
-            await _renew_attestation((row["group_id"], row["user_id"]))
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
deleted file mode 100644
index dfd24af695..0000000000
--- a/synapse/groups/groups_server.py
+++ /dev/null
@@ -1,1019 +0,0 @@
-# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
-# Copyright 2019 Michael Telatynski <7t3chguy@gmail.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Optional
-
-from synapse.api.errors import Codes, SynapseError
-from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
-from synapse.types import GroupID, JsonDict, RoomID, UserID, get_domain_from_id
-from synapse.util.async_helpers import concurrently_execute
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# TODO: Allow users to "knock" or simply join depending on rules
-# TODO: Federation admin APIs
-# TODO: is_privileged flag to users and is_public to users and rooms
-# TODO: Audit log for admins (profile updates, membership changes, users who tried
-#       to join but were rejected, etc)
-# TODO: Flairs
-
-
-# Note that the maximum lengths are somewhat arbitrary.
-MAX_SHORT_DESC_LEN = 1000
-MAX_LONG_DESC_LEN = 10000
-
-
-class GroupsServerWorkerHandler:
-    def __init__(self, hs: "HomeServer"):
-        self.hs = hs
-        self.store = hs.get_datastores().main
-        self.room_list_handler = hs.get_room_list_handler()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.keyring = hs.get_keyring()
-        self.is_mine_id = hs.is_mine_id
-        self.signing_key = hs.signing_key
-        self.server_name = hs.hostname
-        self.attestations = hs.get_groups_attestation_signing()
-        self.transport_client = hs.get_federation_transport_client()
-        self.profile_handler = hs.get_profile_handler()
-
-    async def check_group_is_ours(
-        self,
-        group_id: str,
-        requester_user_id: str,
-        and_exists: bool = False,
-        and_is_admin: Optional[str] = None,
-    ) -> Optional[dict]:
-        """Check that the group is ours, and optionally if it exists.
-
-        If group does exist then return group.
-
-        Args:
-            group_id: The group ID to check.
-            requester_user_id: The user ID of the requester.
-            and_exists: whether to also check if group exists
-            and_is_admin: whether to also check if given str is a user_id
-                that is an admin
-        """
-        if not self.is_mine_id(group_id):
-            raise SynapseError(400, "Group not on this server")
-
-        group = await self.store.get_group(group_id)
-        if and_exists and not group:
-            raise SynapseError(404, "Unknown group")
-
-        is_user_in_group = await self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-        if group and not is_user_in_group and not group["is_public"]:
-            raise SynapseError(404, "Unknown group")
-
-        if and_is_admin:
-            is_admin = await self.store.is_user_admin_in_group(group_id, and_is_admin)
-            if not is_admin:
-                raise SynapseError(403, "User is not admin in group")
-
-        return group
-
-    async def get_group_summary(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the summary for a group as seen by requester_user_id.
-
-        The group summary consists of the profile of the room, and a curated
-        list of users and rooms. These list *may* be organised by role/category.
-        The roles/categories are ordered, and so are the users/rooms within them.
-
-        A user/room may appear in multiple roles/categories.
-        """
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = await self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        profile = await self.get_group_profile(group_id, requester_user_id)
-
-        users, roles = await self.store.get_users_for_summary_by_role(
-            group_id, include_private=is_user_in_group
-        )
-
-        # TODO: Add profiles to users
-
-        rooms, categories = await self.store.get_rooms_for_summary_by_category(
-            group_id, include_private=is_user_in_group
-        )
-
-        for room_entry in rooms:
-            room_id = room_entry["room_id"]
-            joined_users = await self.store.get_users_in_room(room_id)
-            entry = await self.room_list_handler.generate_room_entry(
-                room_id, len(joined_users), with_alias=False, allow_private=True
-            )
-            if entry is None:
-                continue
-            entry = dict(entry)  # so we don't change what's cached
-            entry.pop("room_id", None)
-
-            room_entry["profile"] = entry
-
-        rooms.sort(key=lambda e: e.get("order", 0))
-
-        for user in users:
-            user_id = user["user_id"]
-
-            if not self.is_mine_id(requester_user_id):
-                attestation = await self.store.get_remote_attestation(group_id, user_id)
-                if not attestation:
-                    continue
-
-                user["attestation"] = attestation
-            else:
-                user["attestation"] = self.attestations.create_attestation(
-                    group_id, user_id
-                )
-
-            user_profile = await self.profile_handler.get_profile_from_cache(user_id)
-            user.update(user_profile)
-
-        users.sort(key=lambda e: e.get("order", 0))
-
-        membership_info = await self.store.get_users_membership_info_in_group(
-            group_id, requester_user_id
-        )
-
-        return {
-            "profile": profile,
-            "users_section": {
-                "users": users,
-                "roles": roles,
-                "total_user_count_estimate": 0,  # TODO
-            },
-            "rooms_section": {
-                "rooms": rooms,
-                "categories": categories,
-                "total_room_count_estimate": 0,  # TODO
-            },
-            "user": membership_info,
-        }
-
-    async def get_group_categories(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get all categories in a group (as seen by user)"""
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        categories = await self.store.get_group_categories(group_id=group_id)
-        return {"categories": categories}
-
-    async def get_group_category(
-        self, group_id: str, requester_user_id: str, category_id: str
-    ) -> JsonDict:
-        """Get a specific category in a group (as seen by user)"""
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        return await self.store.get_group_category(
-            group_id=group_id, category_id=category_id
-        )
-
-    async def get_group_roles(self, group_id: str, requester_user_id: str) -> JsonDict:
-        """Get all roles in a group (as seen by user)"""
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        roles = await self.store.get_group_roles(group_id=group_id)
-        return {"roles": roles}
-
-    async def get_group_role(
-        self, group_id: str, requester_user_id: str, role_id: str
-    ) -> JsonDict:
-        """Get a specific role in a group (as seen by user)"""
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        return await self.store.get_group_role(group_id=group_id, role_id=role_id)
-
-    async def get_group_profile(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the group profile as seen by requester_user_id"""
-
-        await self.check_group_is_ours(group_id, requester_user_id)
-
-        group = await self.store.get_group(group_id)
-
-        if group:
-            cols = [
-                "name",
-                "short_description",
-                "long_description",
-                "avatar_url",
-                "is_public",
-            ]
-            group_description = {key: group[key] for key in cols}
-            group_description["is_openly_joinable"] = group["join_policy"] == "open"
-
-            return group_description
-        else:
-            raise SynapseError(404, "Unknown group")
-
-    async def get_users_in_group(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the users in group as seen by requester_user_id.
-
-        The ordering is arbitrary at the moment
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = await self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        user_results = await self.store.get_users_in_group(
-            group_id, include_private=is_user_in_group
-        )
-
-        chunk = []
-        for user_result in user_results:
-            g_user_id = user_result["user_id"]
-            is_public = user_result["is_public"]
-            is_privileged = user_result["is_admin"]
-
-            entry = {"user_id": g_user_id}
-
-            profile = await self.profile_handler.get_profile_from_cache(g_user_id)
-            entry.update(profile)
-
-            entry["is_public"] = bool(is_public)
-            entry["is_privileged"] = bool(is_privileged)
-
-            if not self.is_mine_id(g_user_id):
-                attestation = await self.store.get_remote_attestation(
-                    group_id, g_user_id
-                )
-                if not attestation:
-                    continue
-
-                entry["attestation"] = attestation
-            else:
-                entry["attestation"] = self.attestations.create_attestation(
-                    group_id, g_user_id
-                )
-
-            chunk.append(entry)
-
-        # TODO: If admin add lists of users whose attestations have timed out
-
-        return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
-
-    async def get_invited_users_in_group(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the users that have been invited to a group as seen by requester_user_id.
-
-        The ordering is arbitrary at the moment
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = await self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        if not is_user_in_group:
-            raise SynapseError(403, "User not in group")
-
-        invited_users = await self.store.get_invited_users_in_group(group_id)
-
-        user_profiles = []
-
-        for user_id in invited_users:
-            user_profile = {"user_id": user_id}
-            try:
-                profile = await self.profile_handler.get_profile_from_cache(user_id)
-                user_profile.update(profile)
-            except Exception as e:
-                logger.warning("Error getting profile for %s: %s", user_id, e)
-            user_profiles.append(user_profile)
-
-        return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
-
-    async def get_rooms_in_group(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the rooms in group as seen by requester_user_id
-
-        This returns rooms in order of decreasing number of joined users
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_user_in_group = await self.store.is_user_in_group(
-            requester_user_id, group_id
-        )
-
-        # Note! room_results["is_public"] is about whether the room is considered
-        # public from the group's point of view. (i.e. whether non-group members
-        # should be able to see the room is in the group).
-        # This is not the same as whether the room itself is public (in the sense
-        # of being visible in the room directory).
-        # As such, room_results["is_public"] itself is not sufficient to determine
-        # whether any given user is permitted to see the room's metadata.
-        room_results = await self.store.get_rooms_in_group(
-            group_id, include_private=is_user_in_group
-        )
-
-        chunk = []
-        for room_result in room_results:
-            room_id = room_result["room_id"]
-
-            joined_users = await self.store.get_users_in_room(room_id)
-
-            # check the user is actually allowed to see the room before showing it to them
-            allow_private = requester_user_id in joined_users
-
-            entry = await self.room_list_handler.generate_room_entry(
-                room_id,
-                len(joined_users),
-                with_alias=False,
-                allow_private=allow_private,
-            )
-
-            if not entry:
-                continue
-
-            entry["is_public"] = bool(room_result["is_public"])
-
-            chunk.append(entry)
-
-        chunk.sort(key=lambda e: -e["num_joined_members"])
-
-        return {"chunk": chunk, "total_room_count_estimate": len(chunk)}
-
-
-class GroupsServerHandler(GroupsServerWorkerHandler):
-    def __init__(self, hs: "HomeServer"):
-        super().__init__(hs)
-
-        # Ensure attestations get renewed
-        hs.get_groups_attestation_renewer()
-
-    async def update_group_summary_room(
-        self,
-        group_id: str,
-        requester_user_id: str,
-        room_id: str,
-        category_id: str,
-        content: JsonDict,
-    ) -> JsonDict:
-        """Add/update a room to the group summary"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        RoomID.from_string(room_id)  # Ensure valid room id
-
-        order = content.get("order", None)
-
-        is_public = _parse_visibility_from_contents(content)
-
-        await self.store.add_room_to_summary(
-            group_id=group_id,
-            room_id=room_id,
-            category_id=category_id,
-            order=order,
-            is_public=is_public,
-        )
-
-        return {}
-
-    async def delete_group_summary_room(
-        self, group_id: str, requester_user_id: str, room_id: str, category_id: str
-    ) -> JsonDict:
-        """Remove a room from the summary"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        await self.store.remove_room_from_summary(
-            group_id=group_id, room_id=room_id, category_id=category_id
-        )
-
-        return {}
-
-    async def set_group_join_policy(
-        self, group_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Sets the group join policy.
-
-        Currently supported policies are:
-         - "invite": an invite must be received and accepted in order to join.
-         - "open": anyone can join.
-        """
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        join_policy = _parse_join_policy_from_contents(content)
-        if join_policy is None:
-            raise SynapseError(400, "No value specified for 'm.join_policy'")
-
-        await self.store.set_group_join_policy(group_id, join_policy=join_policy)
-
-        return {}
-
-    async def update_group_category(
-        self, group_id: str, requester_user_id: str, category_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Add/Update a group category"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        is_public = _parse_visibility_from_contents(content)
-        profile = content.get("profile")
-
-        await self.store.upsert_group_category(
-            group_id=group_id,
-            category_id=category_id,
-            is_public=is_public,
-            profile=profile,
-        )
-
-        return {}
-
-    async def delete_group_category(
-        self, group_id: str, requester_user_id: str, category_id: str
-    ) -> JsonDict:
-        """Delete a group category"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        await self.store.remove_group_category(
-            group_id=group_id, category_id=category_id
-        )
-
-        return {}
-
-    async def update_group_role(
-        self, group_id: str, requester_user_id: str, role_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Add/update a role in a group"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        is_public = _parse_visibility_from_contents(content)
-
-        profile = content.get("profile")
-
-        await self.store.upsert_group_role(
-            group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
-        )
-
-        return {}
-
-    async def delete_group_role(
-        self, group_id: str, requester_user_id: str, role_id: str
-    ) -> JsonDict:
-        """Remove role from group"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        await self.store.remove_group_role(group_id=group_id, role_id=role_id)
-
-        return {}
-
-    async def update_group_summary_user(
-        self,
-        group_id: str,
-        requester_user_id: str,
-        user_id: str,
-        role_id: str,
-        content: JsonDict,
-    ) -> JsonDict:
-        """Add/update a users entry in the group summary"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        order = content.get("order", None)
-
-        is_public = _parse_visibility_from_contents(content)
-
-        await self.store.add_user_to_summary(
-            group_id=group_id,
-            user_id=user_id,
-            role_id=role_id,
-            order=order,
-            is_public=is_public,
-        )
-
-        return {}
-
-    async def delete_group_summary_user(
-        self, group_id: str, requester_user_id: str, user_id: str, role_id: str
-    ) -> JsonDict:
-        """Remove a user from the group summary"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        await self.store.remove_user_from_summary(
-            group_id=group_id, user_id=user_id, role_id=role_id
-        )
-
-        return {}
-
-    async def update_group_profile(
-        self, group_id: str, requester_user_id: str, content: JsonDict
-    ) -> None:
-        """Update the group profile"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        profile = {}
-        for keyname, max_length in (
-            ("name", MAX_DISPLAYNAME_LEN),
-            ("avatar_url", MAX_AVATAR_URL_LEN),
-            ("short_description", MAX_SHORT_DESC_LEN),
-            ("long_description", MAX_LONG_DESC_LEN),
-        ):
-            if keyname in content:
-                value = content[keyname]
-                if not isinstance(value, str):
-                    raise SynapseError(
-                        400,
-                        "%r value is not a string" % (keyname,),
-                        errcode=Codes.INVALID_PARAM,
-                    )
-                if len(value) > max_length:
-                    raise SynapseError(
-                        400,
-                        "Invalid %s parameter" % (keyname,),
-                        errcode=Codes.INVALID_PARAM,
-                    )
-                profile[keyname] = value
-
-        await self.store.update_group_profile(group_id, profile)
-
-    async def add_room_to_group(
-        self, group_id: str, requester_user_id: str, room_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Add room to group"""
-        RoomID.from_string(room_id)  # Ensure valid room id
-
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        is_public = _parse_visibility_from_contents(content)
-
-        await self.store.add_room_to_group(group_id, room_id, is_public=is_public)
-
-        return {}
-
-    async def update_room_in_group(
-        self,
-        group_id: str,
-        requester_user_id: str,
-        room_id: str,
-        config_key: str,
-        content: JsonDict,
-    ) -> JsonDict:
-        """Update room in group"""
-        RoomID.from_string(room_id)  # Ensure valid room id
-
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        if config_key == "m.visibility":
-            is_public = _parse_visibility_dict(content)
-
-            await self.store.update_room_in_group_visibility(
-                group_id, room_id, is_public=is_public
-            )
-        else:
-            raise SynapseError(400, "Unknown config option")
-
-        return {}
-
-    async def remove_room_from_group(
-        self, group_id: str, requester_user_id: str, room_id: str
-    ) -> JsonDict:
-        """Remove room from group"""
-        await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-
-        await self.store.remove_room_from_group(group_id, room_id)
-
-        return {}
-
-    async def invite_to_group(
-        self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Invite user to group"""
-
-        group = await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
-        )
-        if not group:
-            raise SynapseError(400, "Group does not exist", errcode=Codes.BAD_STATE)
-
-        # TODO: Check if user knocked
-
-        invited_users = await self.store.get_invited_users_in_group(group_id)
-        if user_id in invited_users:
-            raise SynapseError(
-                400, "User already invited to group", errcode=Codes.BAD_STATE
-            )
-
-        user_results = await self.store.get_users_in_group(
-            group_id, include_private=True
-        )
-        if user_id in (user_result["user_id"] for user_result in user_results):
-            raise SynapseError(400, "User already in group")
-
-        content = {
-            "profile": {"name": group["name"], "avatar_url": group["avatar_url"]},
-            "inviter": requester_user_id,
-        }
-
-        if self.hs.is_mine_id(user_id):
-            groups_local = self.hs.get_groups_local_handler()
-            assert isinstance(
-                groups_local, GroupsLocalHandler
-            ), "Workers cannot invites users to groups."
-            res = await groups_local.on_invite(group_id, user_id, content)
-            local_attestation = None
-        else:
-            local_attestation = self.attestations.create_attestation(group_id, user_id)
-            content.update({"attestation": local_attestation})
-
-            res = await self.transport_client.invite_to_group_notification(
-                get_domain_from_id(user_id), group_id, user_id, content
-            )
-
-            user_profile = res.get("user_profile", {})
-            await self.store.add_remote_profile_cache(
-                user_id,
-                displayname=user_profile.get("displayname"),
-                avatar_url=user_profile.get("avatar_url"),
-            )
-
-        if res["state"] == "join":
-            if not self.hs.is_mine_id(user_id):
-                remote_attestation = res["attestation"]
-
-                await self.attestations.verify_attestation(
-                    remote_attestation, user_id=user_id, group_id=group_id
-                )
-            else:
-                remote_attestation = None
-
-            await self.store.add_user_to_group(
-                group_id,
-                user_id,
-                is_admin=False,
-                is_public=False,  # TODO
-                local_attestation=local_attestation,
-                remote_attestation=remote_attestation,
-            )
-            return {"state": "join"}
-        elif res["state"] == "invite":
-            await self.store.add_group_invite(group_id, user_id)
-            return {"state": "invite"}
-        elif res["state"] == "reject":
-            return {"state": "reject"}
-        else:
-            raise SynapseError(502, "Unknown state returned by HS")
-
-    async def _add_user(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> Optional[JsonDict]:
-        """Add a user to a group based on a content dict.
-
-        See accept_invite, join_group.
-        """
-        if not self.hs.is_mine_id(user_id):
-            local_attestation: Optional[
-                JsonDict
-            ] = self.attestations.create_attestation(group_id, user_id)
-
-            remote_attestation = content["attestation"]
-
-            await self.attestations.verify_attestation(
-                remote_attestation, user_id=user_id, group_id=group_id
-            )
-        else:
-            local_attestation = None
-            remote_attestation = None
-
-        is_public = _parse_visibility_from_contents(content)
-
-        await self.store.add_user_to_group(
-            group_id,
-            user_id,
-            is_admin=False,
-            is_public=is_public,
-            local_attestation=local_attestation,
-            remote_attestation=remote_attestation,
-        )
-
-        return local_attestation
-
-    async def accept_invite(
-        self, group_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """User tries to accept an invite to the group.
-
-        This is different from them asking to join, and so should error if no
-        invite exists (and they're not a member of the group)
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_invited = await self.store.is_user_invited_to_local_group(
-            group_id, requester_user_id
-        )
-        if not is_invited:
-            raise SynapseError(403, "User not invited to group")
-
-        local_attestation = await self._add_user(group_id, requester_user_id, content)
-
-        return {"state": "join", "attestation": local_attestation}
-
-    async def join_group(
-        self, group_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """User tries to join the group.
-
-        This will error if the group requires an invite/knock to join
-        """
-
-        group_info = await self.check_group_is_ours(
-            group_id, requester_user_id, and_exists=True
-        )
-        if not group_info:
-            raise SynapseError(404, "Group does not exist", errcode=Codes.NOT_FOUND)
-        if group_info["join_policy"] != "open":
-            raise SynapseError(403, "Group is not publicly joinable")
-
-        local_attestation = await self._add_user(group_id, requester_user_id, content)
-
-        return {"state": "join", "attestation": local_attestation}
-
-    async def remove_user_from_group(
-        self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Remove a user from the group; either a user is leaving or an admin
-        kicked them.
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        is_kick = False
-        if requester_user_id != user_id:
-            is_admin = await self.store.is_user_admin_in_group(
-                group_id, requester_user_id
-            )
-            if not is_admin:
-                raise SynapseError(403, "User is not admin in group")
-
-            is_kick = True
-
-        await self.store.remove_user_from_group(group_id, user_id)
-
-        if is_kick:
-            if self.hs.is_mine_id(user_id):
-                groups_local = self.hs.get_groups_local_handler()
-                assert isinstance(
-                    groups_local, GroupsLocalHandler
-                ), "Workers cannot remove users from groups."
-                await groups_local.user_removed_from_group(group_id, user_id, {})
-            else:
-                await self.transport_client.remove_user_from_group_notification(
-                    get_domain_from_id(user_id), group_id, user_id, {}
-                )
-
-        if not self.hs.is_mine_id(user_id):
-            await self.store.maybe_delete_remote_profile_cache(user_id)
-
-        # Delete group if the last user has left
-        users = await self.store.get_users_in_group(group_id, include_private=True)
-        if not users:
-            await self.store.delete_group(group_id)
-
-        return {}
-
-    async def create_group(
-        self, group_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        logger.info("Attempting to create group with ID: %r", group_id)
-
-        # parsing the id into a GroupID validates it.
-        group_id_obj = GroupID.from_string(group_id)
-
-        group = await self.check_group_is_ours(group_id, requester_user_id)
-        if group:
-            raise SynapseError(400, "Group already exists")
-
-        is_admin = await self.auth.is_server_admin(
-            UserID.from_string(requester_user_id)
-        )
-        if not is_admin:
-            if not self.hs.config.groups.enable_group_creation:
-                raise SynapseError(
-                    403, "Only a server admin can create groups on this server"
-                )
-            localpart = group_id_obj.localpart
-            if not localpart.startswith(self.hs.config.groups.group_creation_prefix):
-                raise SynapseError(
-                    400,
-                    "Can only create groups with prefix %r on this server"
-                    % (self.hs.config.groups.group_creation_prefix,),
-                )
-
-        profile = content.get("profile", {})
-        name = profile.get("name")
-        avatar_url = profile.get("avatar_url")
-        short_description = profile.get("short_description")
-        long_description = profile.get("long_description")
-        user_profile = content.get("user_profile", {})
-
-        await self.store.create_group(
-            group_id,
-            requester_user_id,
-            name=name,
-            avatar_url=avatar_url,
-            short_description=short_description,
-            long_description=long_description,
-        )
-
-        if not self.hs.is_mine_id(requester_user_id):
-            remote_attestation = content["attestation"]
-
-            await self.attestations.verify_attestation(
-                remote_attestation, user_id=requester_user_id, group_id=group_id
-            )
-
-            local_attestation: Optional[
-                JsonDict
-            ] = self.attestations.create_attestation(group_id, requester_user_id)
-        else:
-            local_attestation = None
-            remote_attestation = None
-
-        await self.store.add_user_to_group(
-            group_id,
-            requester_user_id,
-            is_admin=True,
-            is_public=True,  # TODO
-            local_attestation=local_attestation,
-            remote_attestation=remote_attestation,
-        )
-
-        if not self.hs.is_mine_id(requester_user_id):
-            await self.store.add_remote_profile_cache(
-                requester_user_id,
-                displayname=user_profile.get("displayname"),
-                avatar_url=user_profile.get("avatar_url"),
-            )
-
-        return {"group_id": group_id}
-
-    async def delete_group(self, group_id: str, requester_user_id: str) -> None:
-        """Deletes a group, kicking out all current members.
-
-        Only group admins or server admins can call this request
-
-        Args:
-            group_id: The group ID to delete.
-            requester_user_id: The user requesting to delete the group.
-        """
-
-        await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
-        # Only server admins or group admins can delete groups.
-
-        is_admin = await self.store.is_user_admin_in_group(group_id, requester_user_id)
-
-        if not is_admin:
-            is_admin = await self.auth.is_server_admin(
-                UserID.from_string(requester_user_id)
-            )
-
-        if not is_admin:
-            raise SynapseError(403, "User is not an admin")
-
-        # Before deleting the group lets kick everyone out of it
-        users = await self.store.get_users_in_group(group_id, include_private=True)
-
-        async def _kick_user_from_group(user_id: str) -> None:
-            if self.hs.is_mine_id(user_id):
-                groups_local = self.hs.get_groups_local_handler()
-                assert isinstance(
-                    groups_local, GroupsLocalHandler
-                ), "Workers cannot kick users from groups."
-                await groups_local.user_removed_from_group(group_id, user_id, {})
-            else:
-                await self.transport_client.remove_user_from_group_notification(
-                    get_domain_from_id(user_id), group_id, user_id, {}
-                )
-                await self.store.maybe_delete_remote_profile_cache(user_id)
-
-        # We kick users out in the order of:
-        #   1. Non-admins
-        #   2. Other admins
-        #   3. The requester
-        #
-        # This is so that if the deletion fails for some reason other admins or
-        # the requester still has auth to retry.
-        non_admins = []
-        admins = []
-        for u in users:
-            if u["user_id"] == requester_user_id:
-                continue
-            if u["is_admin"]:
-                admins.append(u["user_id"])
-            else:
-                non_admins.append(u["user_id"])
-
-        await concurrently_execute(_kick_user_from_group, non_admins, 10)
-        await concurrently_execute(_kick_user_from_group, admins, 10)
-        await _kick_user_from_group(requester_user_id)
-
-        await self.store.delete_group(group_id)
-
-
-def _parse_join_policy_from_contents(content: JsonDict) -> Optional[str]:
-    """Given a content for a request, return the specified join policy or None"""
-
-    join_policy_dict = content.get("m.join_policy")
-    if join_policy_dict:
-        return _parse_join_policy_dict(join_policy_dict)
-    else:
-        return None
-
-
-def _parse_join_policy_dict(join_policy_dict: JsonDict) -> str:
-    """Given a dict for the "m.join_policy" config return the join policy specified"""
-    join_policy_type = join_policy_dict.get("type")
-    if not join_policy_type:
-        return "invite"
-
-    if join_policy_type not in ("invite", "open"):
-        raise SynapseError(400, "Synapse only supports 'invite'/'open' join rule")
-    return join_policy_type
-
-
-def _parse_visibility_from_contents(content: JsonDict) -> bool:
-    """Given a content for a request parse out whether the entity should be
-    public or not
-    """
-
-    visibility = content.get("m.visibility")
-    if visibility:
-        return _parse_visibility_dict(visibility)
-    else:
-        is_public = True
-
-    return is_public
-
-
-def _parse_visibility_dict(visibility: JsonDict) -> bool:
-    """Given a dict for the "m.visibility" config return if the entity should
-    be public or not
-    """
-    vis_type = visibility.get("type")
-    if not vis_type:
-        return True
-
-    if vis_type not in ("public", "private"):
-        raise SynapseError(400, "Synapse only supports 'public'/'private' visibility")
-    return vis_type == "public"
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 96376963f2..50e34743b7 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -31,7 +31,7 @@ class AdminHandler:
     def __init__(self, hs: "HomeServer"):
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
 
     async def get_whois(self, user: UserID) -> JsonDict:
         connections = []
@@ -233,7 +233,7 @@ class AdminHandler:
             for event_id in extremities:
                 if not event_to_unseen_prevs[event_id]:
                     continue
-                state = await self.state_store.get_state_for_event(event_id)
+                state = await self.state_storage.get_state_for_event(event_id)
                 writer.write_state(room_id, event_id, state)
 
         return writer.finished()
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 1d6d1f8a92..b21e469865 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -70,7 +70,7 @@ class DeviceWorkerHandler:
         self.store = hs.get_datastores().main
         self.notifier = hs.get_notifier()
         self.state = hs.get_state_handler()
-        self.state_store = hs.get_storage().state
+        self.state_storage = hs.get_storage().state
         self._auth_handler = hs.get_auth_handler()
         self.server_name = hs.hostname
 
@@ -203,7 +203,9 @@ class DeviceWorkerHandler:
                 continue
 
             # mapping from event_id -> state_dict
-            prev_state_ids = await self.state_store.get_state_ids_for_events(event_ids)
+            prev_state_ids = await self.state_storage.get_state_ids_for_events(
+                event_ids
+            )
 
             # Check if we've joined the room? If so we just blindly add all the users to
             # the "possibly changed" users.
@@ -763,6 +765,10 @@ class DeviceListUpdater:
         device_id = edu_content.pop("device_id")
         stream_id = str(edu_content.pop("stream_id"))  # They may come as ints
         prev_ids = edu_content.pop("prev_id", [])
+        if not isinstance(prev_ids, list):
+            raise SynapseError(
+                400, "Device list update had an invalid 'prev_ids' field"
+            )
         prev_ids = [str(p) for p in prev_ids]  # They may come as ints
 
         if get_domain_from_id(user_id) != origin:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 0386d0a07b..c8233270d7 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -126,7 +126,7 @@ class FederationHandler:
 
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
         self.federation_client = hs.get_federation_client()
         self.state_handler = hs.get_state_handler()
         self.server_name = hs.hostname
@@ -1027,7 +1027,9 @@ class FederationHandler:
         if event.internal_metadata.outlier:
             raise NotFoundError("State not known at event %s" % (event_id,))
 
-        state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id])
+        state_groups = await self.state_storage.get_state_groups_ids(
+            room_id, [event_id]
+        )
 
         # get_state_groups_ids should return exactly one result
         assert len(state_groups) == 1
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 05c122f224..a1361af272 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -99,7 +99,7 @@ class FederationEventHandler:
     def __init__(self, hs: "HomeServer"):
         self._store = hs.get_datastores().main
         self._storage = hs.get_storage()
-        self._state_store = self._storage.state
+        self._state_storage = self._storage.state
 
         self._state_handler = hs.get_state_handler()
         self._event_creation_handler = hs.get_event_creation_handler()
@@ -274,7 +274,7 @@ class FederationEventHandler:
                     affected=pdu.event_id,
                 )
 
-        await self._process_received_pdu(origin, pdu, state=None)
+        await self._process_received_pdu(origin, pdu, state_ids=None)
 
     async def on_send_membership_event(
         self, origin: str, event: EventBase
@@ -463,7 +463,9 @@ class FederationEventHandler:
         with nested_logging_context(suffix=event.event_id):
             context = await self._state_handler.compute_event_context(
                 event,
-                old_state=state,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id for e in state
+                },
                 partial_state=partial_state,
             )
 
@@ -477,7 +479,23 @@ class FederationEventHandler:
             # and discover that we do not have it.
             event.internal_metadata.proactively_send = False
 
-            return await self.persist_events_and_notify(room_id, [(event, context)])
+            stream_id_after_persist = await self.persist_events_and_notify(
+                room_id, [(event, context)]
+            )
+
+            # If we're joining the room again, check if there is new marker
+            # state indicating that there is new history imported somewhere in
+            # the DAG. Multiple markers can exist in the current state with
+            # unique state_keys.
+            #
+            # Do this after the state from the remote join was persisted (via
+            # `persist_events_and_notify`). Otherwise we can run into a
+            # situation where the create event doesn't exist yet in the
+            # `current_state_events`
+            for e in state:
+                await self._handle_marker_event(origin, e)
+
+            return stream_id_after_persist
 
     async def update_state_for_partial_state_event(
         self, destination: str, event: EventBase
@@ -496,12 +514,12 @@ class FederationEventHandler:
             #
             # This is the same operation as we do when we receive a regular event
             # over federation.
-            state = await self._resolve_state_at_missing_prevs(destination, event)
+            state_ids = await self._resolve_state_at_missing_prevs(destination, event)
 
             # build a new state group for it if need be
             context = await self._state_handler.compute_event_context(
                 event,
-                old_state=state,
+                state_ids_before_event=state_ids,
             )
             if context.partial_state:
                 # this can happen if some or all of the event's prev_events still have
@@ -517,7 +535,7 @@ class FederationEventHandler:
                 )
                 return
             await self._store.update_state_for_partial_state_event(event, context)
-            self._state_store.notify_event_un_partial_stated(event.event_id)
+            self._state_storage.notify_event_un_partial_stated(event.event_id)
 
     async def backfill(
         self, dest: str, room_id: str, limit: int, extremities: Collection[str]
@@ -751,11 +769,12 @@ class FederationEventHandler:
             return
 
         try:
-            state = await self._resolve_state_at_missing_prevs(origin, event)
+            state_ids = await self._resolve_state_at_missing_prevs(origin, event)
             # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
             #   not return partial state
+
             await self._process_received_pdu(
-                origin, event, state=state, backfilled=backfilled
+                origin, event, state_ids=state_ids, backfilled=backfilled
             )
         except FederationError as e:
             if e.code == 403:
@@ -765,7 +784,7 @@ class FederationEventHandler:
 
     async def _resolve_state_at_missing_prevs(
         self, dest: str, event: EventBase
-    ) -> Optional[Iterable[EventBase]]:
+    ) -> Optional[StateMap[str]]:
         """Calculate the state at an event with missing prev_events.
 
         This is used when we have pulled a batch of events from a remote server, and
@@ -792,8 +811,8 @@ class FederationEventHandler:
             event: an event to check for missing prevs.
 
         Returns:
-            if we already had all the prev events, `None`. Otherwise, returns a list of
-            the events in the state at `event`.
+            if we already had all the prev events, `None`. Otherwise, returns
+            the event ids of the state at `event`.
         """
         room_id = event.room_id
         event_id = event.event_id
@@ -813,10 +832,10 @@ class FederationEventHandler:
         )
         # Calculate the state after each of the previous events, and
         # resolve them to find the correct state at the current event.
-        event_map = {event_id: event}
+
         try:
             # Get the state of the events we know about
-            ours = await self._state_store.get_state_groups_ids(room_id, seen)
+            ours = await self._state_storage.get_state_groups_ids(room_id, seen)
 
             # state_maps is a list of mappings from (type, state_key) to event_id
             state_maps: List[StateMap[str]] = list(ours.values())
@@ -833,40 +852,23 @@ class FederationEventHandler:
                     # note that if any of the missing prevs share missing state or
                     # auth events, the requests to fetch those events are deduped
                     # by the get_pdu_cache in federation_client.
-                    remote_state = await self._get_state_after_missing_prev_event(
-                        dest, room_id, p
+                    remote_state_map = (
+                        await self._get_state_ids_after_missing_prev_event(
+                            dest, room_id, p
+                        )
                     )
 
-                    remote_state_map = {
-                        (x.type, x.state_key): x.event_id for x in remote_state
-                    }
                     state_maps.append(remote_state_map)
 
-                    for x in remote_state:
-                        event_map[x.event_id] = x
-
             room_version = await self._store.get_room_version_id(room_id)
             state_map = await self._state_resolution_handler.resolve_events_with_store(
                 room_id,
                 room_version,
                 state_maps,
-                event_map,
+                event_map={event_id: event},
                 state_res_store=StateResolutionStore(self._store),
             )
 
-            # We need to give _process_received_pdu the actual state events
-            # rather than event ids, so generate that now.
-
-            # First though we need to fetch all the events that are in
-            # state_map, so we can build up the state below.
-            evs = await self._store.get_events(
-                list(state_map.values()),
-                get_prev_content=False,
-                redact_behaviour=EventRedactBehaviour.as_is,
-            )
-            event_map.update(evs)
-
-            state = [event_map[e] for e in state_map.values()]
         except Exception:
             logger.warning(
                 "Error attempting to resolve state at missing prev_events",
@@ -878,14 +880,14 @@ class FederationEventHandler:
                 "We can't get valid state history.",
                 affected=event_id,
             )
-        return state
+        return state_map
 
-    async def _get_state_after_missing_prev_event(
+    async def _get_state_ids_after_missing_prev_event(
         self,
         destination: str,
         room_id: str,
         event_id: str,
-    ) -> List[EventBase]:
+    ) -> StateMap[str]:
         """Requests all of the room state at a given event from a remote homeserver.
 
         Args:
@@ -894,7 +896,7 @@ class FederationEventHandler:
             event_id: The id of the event we want the state at.
 
         Returns:
-            A list of events in the state, including the event itself
+            The event ids of the state *after* the given event.
         """
         (
             state_event_ids,
@@ -909,19 +911,17 @@ class FederationEventHandler:
             len(auth_event_ids),
         )
 
-        # start by just trying to fetch the events from the store
+        # Start by checking events we already have in the DB
         desired_events = set(state_event_ids)
         desired_events.add(event_id)
         logger.debug("Fetching %i events from cache/store", len(desired_events))
-        fetched_events = await self._store.get_events(
-            desired_events, allow_rejected=True
-        )
+        have_events = await self._store.have_seen_events(room_id, desired_events)
 
-        missing_desired_events = desired_events - fetched_events.keys()
+        missing_desired_events = desired_events - have_events
         logger.debug(
             "We are missing %i events (got %i)",
             len(missing_desired_events),
-            len(fetched_events),
+            len(have_events),
         )
 
         # We probably won't need most of the auth events, so let's just check which
@@ -932,7 +932,7 @@ class FederationEventHandler:
         #   already have a bunch of the state events. It would be nice if the
         #   federation api gave us a way of finding out which we actually need.
 
-        missing_auth_events = set(auth_event_ids) - fetched_events.keys()
+        missing_auth_events = set(auth_event_ids) - have_events
         missing_auth_events.difference_update(
             await self._store.have_seen_events(room_id, missing_auth_events)
         )
@@ -958,47 +958,51 @@ class FederationEventHandler:
                 destination=destination, room_id=room_id, event_ids=missing_events
             )
 
-        # we need to make sure we re-load from the database to get the rejected
-        # state correct.
-        fetched_events.update(
-            await self._store.get_events(missing_desired_events, allow_rejected=True)
-        )
+        # We now need to fill out the state map, which involves fetching the
+        # type and state key for each event ID in the state.
+        state_map = {}
 
-        # check for events which were in the wrong room.
-        #
-        # this can happen if a remote server claims that the state or
-        # auth_events at an event in room A are actually events in room B
-
-        bad_events = [
-            (event_id, event.room_id)
-            for event_id, event in fetched_events.items()
-            if event.room_id != room_id
-        ]
+        event_metadata = await self._store.get_metadata_for_events(state_event_ids)
+        for state_event_id, metadata in event_metadata.items():
+            if metadata.room_id != room_id:
+                # This is a bogus situation, but since we may only discover it a long time
+                # after it happened, we try our best to carry on, by just omitting the
+                # bad events from the returned state set.
+                #
+                # This can happen if a remote server claims that the state or
+                # auth_events at an event in room A are actually events in room B
+                logger.warning(
+                    "Remote server %s claims event %s in room %s is an auth/state "
+                    "event in room %s",
+                    destination,
+                    state_event_id,
+                    metadata.room_id,
+                    room_id,
+                )
+                continue
 
-        for bad_event_id, bad_room_id in bad_events:
-            # This is a bogus situation, but since we may only discover it a long time
-            # after it happened, we try our best to carry on, by just omitting the
-            # bad events from the returned state set.
-            logger.warning(
-                "Remote server %s claims event %s in room %s is an auth/state "
-                "event in room %s",
-                destination,
-                bad_event_id,
-                bad_room_id,
-                room_id,
-            )
+            if metadata.state_key is None:
+                logger.warning(
+                    "Remote server gave us non-state event in state: %s", state_event_id
+                )
+                continue
 
-            del fetched_events[bad_event_id]
+            state_map[(metadata.event_type, metadata.state_key)] = state_event_id
 
         # if we couldn't get the prev event in question, that's a problem.
-        remote_event = fetched_events.get(event_id)
+        remote_event = await self._store.get_event(
+            event_id,
+            allow_none=True,
+            allow_rejected=True,
+            redact_behaviour=EventRedactBehaviour.as_is,
+        )
         if not remote_event:
             raise Exception("Unable to get missing prev_event %s" % (event_id,))
 
         # missing state at that event is a warning, not a blocker
         # XXX: this doesn't sound right? it means that we'll end up with incomplete
         #   state.
-        failed_to_fetch = desired_events - fetched_events.keys()
+        failed_to_fetch = desired_events - event_metadata.keys()
         if failed_to_fetch:
             logger.warning(
                 "Failed to fetch missing state events for %s %s",
@@ -1006,14 +1010,12 @@ class FederationEventHandler:
                 failed_to_fetch,
             )
 
-        remote_state = [
-            fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events
-        ]
-
         if remote_event.is_state() and remote_event.rejected_reason is None:
-            remote_state.append(remote_event)
+            state_map[
+                (remote_event.type, remote_event.state_key)
+            ] = remote_event.event_id
 
-        return remote_state
+        return state_map
 
     async def _get_state_and_persist(
         self, destination: str, room_id: str, event_id: str
@@ -1040,7 +1042,7 @@ class FederationEventHandler:
         self,
         origin: str,
         event: EventBase,
-        state: Optional[Iterable[EventBase]],
+        state_ids: Optional[StateMap[str]],
         backfilled: bool = False,
     ) -> None:
         """Called when we have a new non-outlier event.
@@ -1062,7 +1064,7 @@ class FederationEventHandler:
 
             event: event to be persisted
 
-            state: Normally None, but if we are handling a gap in the graph
+            state_ids: Normally None, but if we are handling a gap in the graph
                 (ie, we are missing one or more prev_events), the resolved state at the
                 event
 
@@ -1074,7 +1076,8 @@ class FederationEventHandler:
 
         try:
             context = await self._state_handler.compute_event_context(
-                event, old_state=state
+                event,
+                state_ids_before_event=state_ids,
             )
             context = await self._check_event_auth(
                 origin,
@@ -1091,7 +1094,7 @@ class FederationEventHandler:
             # For new (non-backfilled and non-outlier) events we check if the event
             # passes auth based on the current state. If it doesn't then we
             # "soft-fail" the event.
-            await self._check_for_soft_fail(event, state, origin=origin)
+            await self._check_for_soft_fail(event, state_ids, origin=origin)
 
         await self._run_push_actions_and_persist_event(event, context, backfilled)
 
@@ -1230,6 +1233,14 @@ class FederationEventHandler:
             # Nothing to retrieve then (invalid marker)
             return
 
+        already_seen_insertion_event = await self._store.have_seen_event(
+            marker_event.room_id, insertion_event_id
+        )
+        if already_seen_insertion_event:
+            # No need to process a marker again if we have already seen the
+            # insertion event that it was pointing to
+            return
+
         logger.debug(
             "_handle_marker_event: backfilling insertion event %s", insertion_event_id
         )
@@ -1565,7 +1576,7 @@ class FederationEventHandler:
     async def _check_for_soft_fail(
         self,
         event: EventBase,
-        state: Optional[Iterable[EventBase]],
+        state_ids: Optional[StateMap[str]],
         origin: str,
     ) -> None:
         """Checks if we should soft fail the event; if so, marks the event as
@@ -1573,7 +1584,7 @@ class FederationEventHandler:
 
         Args:
             event
-            state: The state at the event if we don't have all the event's prev events
+            state_ids: The state at the event if we don't have all the event's prev events
             origin: The host the event originates from.
         """
         extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
@@ -1589,7 +1600,7 @@ class FederationEventHandler:
         room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
 
         # Calculate the "current state".
-        if state is not None:
+        if state_ids is not None:
             # If we're explicitly given the state then we won't have all the
             # prev events, and so we have a gap in the graph. In this case
             # we want to be a little careful as we might have been down for
@@ -1602,17 +1613,20 @@ class FederationEventHandler:
             # given state at the event. This should correctly handle cases
             # like bans, especially with state res v2.
 
-            state_sets_d = await self._state_store.get_state_groups(
+            state_sets_d = await self._state_storage.get_state_groups_ids(
                 event.room_id, extrem_ids
             )
-            state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
-            state_sets.append(state)
-            current_states = await self._state_handler.resolve_events(
-                room_version, state_sets, event
+            state_sets: List[StateMap[str]] = list(state_sets_d.values())
+            state_sets.append(state_ids)
+            current_state_ids = (
+                await self._state_resolution_handler.resolve_events_with_store(
+                    event.room_id,
+                    room_version,
+                    state_sets,
+                    event_map=None,
+                    state_res_store=StateResolutionStore(self._store),
+                )
             )
-            current_state_ids: StateMap[str] = {
-                k: e.event_id for k, e in current_states.items()
-            }
         else:
             current_state_ids = await self._state_handler.get_current_state_ids(
                 event.room_id, latest_event_ids=extrem_ids
@@ -1871,7 +1885,7 @@ class FederationEventHandler:
 
         # create a new state group as a delta from the existing one.
         prev_group = context.state_group
-        state_group = await self._state_store.store_state_group(
+        state_group = await self._state_storage.store_state_group(
             event.event_id,
             event.room_id,
             prev_group=prev_group,
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
deleted file mode 100644
index e7a399787b..0000000000
--- a/synapse/handlers/groups_local.py
+++ /dev/null
@@ -1,503 +0,0 @@
-# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Iterable, List, Set
-
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
-from synapse.types import GroupID, JsonDict, get_domain_from_id
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-def _create_rerouter(func_name: str) -> Callable[..., Awaitable[JsonDict]]:
-    """Returns an async function that looks at the group id and calls the function
-    on federation or the local group server if the group is local
-    """
-
-    async def f(
-        self: "GroupsLocalWorkerHandler", group_id: str, *args: Any, **kwargs: Any
-    ) -> JsonDict:
-        if not GroupID.is_valid(group_id):
-            raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
-
-        if self.is_mine_id(group_id):
-            return await getattr(self.groups_server_handler, func_name)(
-                group_id, *args, **kwargs
-            )
-        else:
-            destination = get_domain_from_id(group_id)
-
-            try:
-                return await getattr(self.transport_client, func_name)(
-                    destination, group_id, *args, **kwargs
-                )
-            except HttpResponseException as e:
-                # Capture errors returned by the remote homeserver and
-                # re-throw specific errors as SynapseErrors. This is so
-                # when the remote end responds with things like 403 Not
-                # In Group, we can communicate that to the client instead
-                # of a 500.
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-    return f
-
-
-class GroupsLocalWorkerHandler:
-    def __init__(self, hs: "HomeServer"):
-        self.hs = hs
-        self.store = hs.get_datastores().main
-        self.room_list_handler = hs.get_room_list_handler()
-        self.groups_server_handler = hs.get_groups_server_handler()
-        self.transport_client = hs.get_federation_transport_client()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.keyring = hs.get_keyring()
-        self.is_mine_id = hs.is_mine_id
-        self.signing_key = hs.signing_key
-        self.server_name = hs.hostname
-        self.notifier = hs.get_notifier()
-        self.attestations = hs.get_groups_attestation_signing()
-
-        self.profile_handler = hs.get_profile_handler()
-
-    # The following functions merely route the query to the local groups server
-    # or federation depending on if the group is local or remote
-
-    get_group_profile = _create_rerouter("get_group_profile")
-    get_rooms_in_group = _create_rerouter("get_rooms_in_group")
-    get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
-    get_group_category = _create_rerouter("get_group_category")
-    get_group_categories = _create_rerouter("get_group_categories")
-    get_group_role = _create_rerouter("get_group_role")
-    get_group_roles = _create_rerouter("get_group_roles")
-
-    async def get_group_summary(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get the group summary for a group.
-
-        If the group is remote we check that the users have valid attestations.
-        """
-        if self.is_mine_id(group_id):
-            res = await self.groups_server_handler.get_group_summary(
-                group_id, requester_user_id
-            )
-        else:
-            try:
-                res = await self.transport_client.get_group_summary(
-                    get_domain_from_id(group_id), group_id, requester_user_id
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-            group_server_name = get_domain_from_id(group_id)
-
-            # Loop through the users and validate the attestations.
-            chunk = res["users_section"]["users"]
-            valid_users = []
-            for entry in chunk:
-                g_user_id = entry["user_id"]
-                attestation = entry.pop("attestation", {})
-                try:
-                    if get_domain_from_id(g_user_id) != group_server_name:
-                        await self.attestations.verify_attestation(
-                            attestation,
-                            group_id=group_id,
-                            user_id=g_user_id,
-                            server_name=get_domain_from_id(g_user_id),
-                        )
-                    valid_users.append(entry)
-                except Exception as e:
-                    logger.info("Failed to verify user is in group: %s", e)
-
-            res["users_section"]["users"] = valid_users
-
-            res["users_section"]["users"].sort(key=lambda e: e.get("order", 0))
-            res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0))
-
-        # Add `is_publicised` flag to indicate whether the user has publicised their
-        # membership of the group on their profile
-        result = await self.store.get_publicised_groups_for_user(requester_user_id)
-        is_publicised = group_id in result
-
-        res.setdefault("user", {})["is_publicised"] = is_publicised
-
-        return res
-
-    async def get_users_in_group(
-        self, group_id: str, requester_user_id: str
-    ) -> JsonDict:
-        """Get users in a group"""
-        if self.is_mine_id(group_id):
-            return await self.groups_server_handler.get_users_in_group(
-                group_id, requester_user_id
-            )
-
-        group_server_name = get_domain_from_id(group_id)
-
-        try:
-            res = await self.transport_client.get_users_in_group(
-                get_domain_from_id(group_id), group_id, requester_user_id
-            )
-        except HttpResponseException as e:
-            raise e.to_synapse_error()
-        except RequestSendFailed:
-            raise SynapseError(502, "Failed to contact group server")
-
-        chunk = res["chunk"]
-        valid_entries = []
-        for entry in chunk:
-            g_user_id = entry["user_id"]
-            attestation = entry.pop("attestation", {})
-            try:
-                if get_domain_from_id(g_user_id) != group_server_name:
-                    await self.attestations.verify_attestation(
-                        attestation,
-                        group_id=group_id,
-                        user_id=g_user_id,
-                        server_name=get_domain_from_id(g_user_id),
-                    )
-                valid_entries.append(entry)
-            except Exception as e:
-                logger.info("Failed to verify user is in group: %s", e)
-
-        res["chunk"] = valid_entries
-
-        return res
-
-    async def get_joined_groups(self, user_id: str) -> JsonDict:
-        group_ids = await self.store.get_joined_groups(user_id)
-        return {"groups": group_ids}
-
-    async def get_publicised_groups_for_user(self, user_id: str) -> JsonDict:
-        if self.hs.is_mine_id(user_id):
-            result = await self.store.get_publicised_groups_for_user(user_id)
-
-            # Check AS associated groups for this user - this depends on the
-            # RegExps in the AS registration file (under `users`)
-            for app_service in self.store.get_app_services():
-                result.extend(app_service.get_groups_for_user(user_id))
-
-            return {"groups": result}
-        else:
-            try:
-                bulk_result = await self.transport_client.bulk_get_publicised_groups(
-                    get_domain_from_id(user_id), [user_id]
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-            result = bulk_result.get("users", {}).get(user_id)
-            # TODO: Verify attestations
-            return {"groups": result}
-
-    async def bulk_get_publicised_groups(
-        self, user_ids: Iterable[str], proxy: bool = True
-    ) -> JsonDict:
-        destinations: Dict[str, Set[str]] = {}
-        local_users = set()
-
-        for user_id in user_ids:
-            if self.hs.is_mine_id(user_id):
-                local_users.add(user_id)
-            else:
-                destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
-
-        if not proxy and destinations:
-            raise SynapseError(400, "Some user_ids are not local")
-
-        results = {}
-        failed_results: List[str] = []
-        for destination, dest_user_ids in destinations.items():
-            try:
-                r = await self.transport_client.bulk_get_publicised_groups(
-                    destination, list(dest_user_ids)
-                )
-                results.update(r["users"])
-            except Exception:
-                failed_results.extend(dest_user_ids)
-
-        for uid in local_users:
-            results[uid] = await self.store.get_publicised_groups_for_user(uid)
-
-            # Check AS associated groups for this user - this depends on the
-            # RegExps in the AS registration file (under `users`)
-            for app_service in self.store.get_app_services():
-                results[uid].extend(app_service.get_groups_for_user(uid))
-
-        return {"users": results}
-
-
-class GroupsLocalHandler(GroupsLocalWorkerHandler):
-    def __init__(self, hs: "HomeServer"):
-        super().__init__(hs)
-
-        # Ensure attestations get renewed
-        hs.get_groups_attestation_renewer()
-
-    # The following functions merely route the query to the local groups server
-    # or federation depending on if the group is local or remote
-
-    update_group_profile = _create_rerouter("update_group_profile")
-
-    add_room_to_group = _create_rerouter("add_room_to_group")
-    update_room_in_group = _create_rerouter("update_room_in_group")
-    remove_room_from_group = _create_rerouter("remove_room_from_group")
-
-    update_group_summary_room = _create_rerouter("update_group_summary_room")
-    delete_group_summary_room = _create_rerouter("delete_group_summary_room")
-
-    update_group_category = _create_rerouter("update_group_category")
-    delete_group_category = _create_rerouter("delete_group_category")
-
-    update_group_summary_user = _create_rerouter("update_group_summary_user")
-    delete_group_summary_user = _create_rerouter("delete_group_summary_user")
-
-    update_group_role = _create_rerouter("update_group_role")
-    delete_group_role = _create_rerouter("delete_group_role")
-
-    set_group_join_policy = _create_rerouter("set_group_join_policy")
-
-    async def create_group(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Create a group"""
-
-        logger.info("Asking to create group with ID: %r", group_id)
-
-        if self.is_mine_id(group_id):
-            res = await self.groups_server_handler.create_group(
-                group_id, user_id, content
-            )
-            local_attestation = None
-            remote_attestation = None
-        else:
-            raise SynapseError(400, "Unable to create remote groups")
-
-        is_publicised = content.get("publicise", False)
-        token = await self.store.register_user_group_membership(
-            group_id,
-            user_id,
-            membership="join",
-            is_admin=True,
-            local_attestation=local_attestation,
-            remote_attestation=remote_attestation,
-            is_publicised=is_publicised,
-        )
-        self.notifier.on_new_event("groups_key", token, users=[user_id])
-
-        return res
-
-    async def join_group(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Request to join a group"""
-        if self.is_mine_id(group_id):
-            await self.groups_server_handler.join_group(group_id, user_id, content)
-            local_attestation = None
-            remote_attestation = None
-        else:
-            local_attestation = self.attestations.create_attestation(group_id, user_id)
-            content["attestation"] = local_attestation
-
-            try:
-                res = await self.transport_client.join_group(
-                    get_domain_from_id(group_id), group_id, user_id, content
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-            remote_attestation = res["attestation"]
-
-            await self.attestations.verify_attestation(
-                remote_attestation,
-                group_id=group_id,
-                user_id=user_id,
-                server_name=get_domain_from_id(group_id),
-            )
-
-        # TODO: Check that the group is public and we're being added publicly
-        is_publicised = content.get("publicise", False)
-
-        token = await self.store.register_user_group_membership(
-            group_id,
-            user_id,
-            membership="join",
-            is_admin=False,
-            local_attestation=local_attestation,
-            remote_attestation=remote_attestation,
-            is_publicised=is_publicised,
-        )
-        self.notifier.on_new_event("groups_key", token, users=[user_id])
-
-        return {}
-
-    async def accept_invite(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Accept an invite to a group"""
-        if self.is_mine_id(group_id):
-            await self.groups_server_handler.accept_invite(group_id, user_id, content)
-            local_attestation = None
-            remote_attestation = None
-        else:
-            local_attestation = self.attestations.create_attestation(group_id, user_id)
-            content["attestation"] = local_attestation
-
-            try:
-                res = await self.transport_client.accept_group_invite(
-                    get_domain_from_id(group_id), group_id, user_id, content
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-            remote_attestation = res["attestation"]
-
-            await self.attestations.verify_attestation(
-                remote_attestation,
-                group_id=group_id,
-                user_id=user_id,
-                server_name=get_domain_from_id(group_id),
-            )
-
-        # TODO: Check that the group is public and we're being added publicly
-        is_publicised = content.get("publicise", False)
-
-        token = await self.store.register_user_group_membership(
-            group_id,
-            user_id,
-            membership="join",
-            is_admin=False,
-            local_attestation=local_attestation,
-            remote_attestation=remote_attestation,
-            is_publicised=is_publicised,
-        )
-        self.notifier.on_new_event("groups_key", token, users=[user_id])
-
-        return {}
-
-    async def invite(
-        self, group_id: str, user_id: str, requester_user_id: str, config: JsonDict
-    ) -> JsonDict:
-        """Invite a user to a group"""
-        content = {"requester_user_id": requester_user_id, "config": config}
-        if self.is_mine_id(group_id):
-            res = await self.groups_server_handler.invite_to_group(
-                group_id, user_id, requester_user_id, content
-            )
-        else:
-            try:
-                res = await self.transport_client.invite_to_group(
-                    get_domain_from_id(group_id),
-                    group_id,
-                    user_id,
-                    requester_user_id,
-                    content,
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-        return res
-
-    async def on_invite(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """One of our users were invited to a group"""
-        # TODO: Support auto join and rejection
-
-        if not self.is_mine_id(user_id):
-            raise SynapseError(400, "User not on this server")
-
-        local_profile = {}
-        if "profile" in content:
-            if "name" in content["profile"]:
-                local_profile["name"] = content["profile"]["name"]
-            if "avatar_url" in content["profile"]:
-                local_profile["avatar_url"] = content["profile"]["avatar_url"]
-
-        token = await self.store.register_user_group_membership(
-            group_id,
-            user_id,
-            membership="invite",
-            content={"profile": local_profile, "inviter": content["inviter"]},
-        )
-        self.notifier.on_new_event("groups_key", token, users=[user_id])
-        try:
-            user_profile = await self.profile_handler.get_profile(user_id)
-        except Exception as e:
-            logger.warning("No profile for user %s: %s", user_id, e)
-            user_profile = {}
-
-        return {"state": "invite", "user_profile": user_profile}
-
-    async def remove_user_from_group(
-        self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
-    ) -> JsonDict:
-        """Remove a user from a group"""
-        if user_id == requester_user_id:
-            token = await self.store.register_user_group_membership(
-                group_id, user_id, membership="leave"
-            )
-            self.notifier.on_new_event("groups_key", token, users=[user_id])
-
-            # TODO: Should probably remember that we tried to leave so that we can
-            # retry if the group server is currently down.
-
-        if self.is_mine_id(group_id):
-            res = await self.groups_server_handler.remove_user_from_group(
-                group_id, user_id, requester_user_id, content
-            )
-        else:
-            content["requester_user_id"] = requester_user_id
-            try:
-                res = await self.transport_client.remove_user_from_group(
-                    get_domain_from_id(group_id),
-                    group_id,
-                    requester_user_id,
-                    user_id,
-                    content,
-                )
-            except HttpResponseException as e:
-                raise e.to_synapse_error()
-            except RequestSendFailed:
-                raise SynapseError(502, "Failed to contact group server")
-
-        return res
-
-    async def user_removed_from_group(
-        self, group_id: str, user_id: str, content: JsonDict
-    ) -> None:
-        """One of our users was removed/kicked from a group"""
-        # TODO: Check if user in group
-        token = await self.store.register_user_group_membership(
-            group_id, user_id, membership="leave"
-        )
-        self.notifier.on_new_event("groups_key", token, users=[user_id])
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index d79248ad90..c06932a41a 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -68,7 +68,7 @@ class InitialSyncHandler:
         ] = ResponseCache(hs.get_clock(), "initial_sync_cache")
         self._event_serializer = hs.get_event_client_serializer()
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
 
     async def snapshot_all_rooms(
         self,
@@ -198,7 +198,7 @@ class InitialSyncHandler:
                         event.stream_ordering,
                     )
                     deferred_room_state = run_in_background(
-                        self.state_store.get_state_for_events, [event.event_id]
+                        self.state_storage.get_state_for_events, [event.event_id]
                     ).addCallback(
                         lambda states: cast(StateMap[EventBase], states[event.event_id])
                     )
@@ -355,7 +355,7 @@ class InitialSyncHandler:
         member_event_id: str,
         is_peeking: bool,
     ) -> JsonDict:
-        room_state = await self.state_store.get_state_for_event(member_event_id)
+        room_state = await self.state_storage.get_state_for_event(member_event_id)
 
         limit = pagin_config.limit if pagin_config else None
         if limit is None:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index e566ff1f8e..7ca126dbd1 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -23,6 +23,7 @@ from canonicaljson import encode_canonical_json
 
 from twisted.internet.interfaces import IDelayedCall
 
+import synapse
 from synapse import event_auth
 from synapse.api.constants import (
     EventContentFields,
@@ -54,7 +55,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
 from synapse.storage.databases.main.events_worker import EventRedactBehaviour
 from synapse.storage.state import StateFilter
-from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
+from synapse.types import (
+    MutableStateMap,
+    Requester,
+    RoomAlias,
+    StreamToken,
+    UserID,
+    create_requester,
+)
 from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError
 from synapse.util.async_helpers import Linearizer, gather_results
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -77,7 +85,7 @@ class MessageHandler:
         self.state = hs.get_state_handler()
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
         self._event_serializer = hs.get_event_client_serializer()
         self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
 
@@ -124,7 +132,7 @@ class MessageHandler:
             assert (
                 membership_event_id is not None
             ), "check_user_in_room_or_world_readable returned invalid data"
-            room_state = await self.state_store.get_state_for_events(
+            room_state = await self.state_storage.get_state_for_events(
                 [membership_event_id], StateFilter.from_types([key])
             )
             data = room_state[membership_event_id].get(key)
@@ -185,7 +193,7 @@ class MessageHandler:
 
             # check whether the user is in the room at that time to determine
             # whether they should be treated as peeking.
-            state_map = await self.state_store.get_state_for_event(
+            state_map = await self.state_storage.get_state_for_event(
                 last_event.event_id,
                 StateFilter.from_types([(EventTypes.Member, user_id)]),
             )
@@ -206,7 +214,7 @@ class MessageHandler:
             )
 
             if visible_events:
-                room_state_events = await self.state_store.get_state_for_events(
+                room_state_events = await self.state_storage.get_state_for_events(
                     [last_event.event_id], state_filter=state_filter
                 )
                 room_state: Mapping[Any, EventBase] = room_state_events[
@@ -236,7 +244,7 @@ class MessageHandler:
                 assert (
                     membership_event_id is not None
                 ), "check_user_in_room_or_world_readable returned invalid data"
-                room_state_events = await self.state_store.get_state_for_events(
+                room_state_events = await self.state_storage.get_state_for_events(
                     [membership_event_id], state_filter=state_filter
                 )
                 room_state = room_state_events[membership_event_id]
@@ -885,11 +893,11 @@ class EventCreationHandler:
                 event.sender,
             )
 
-            spam_error = await self.spam_checker.check_event_for_spam(event)
-            if spam_error:
-                if not isinstance(spam_error, str):
-                    spam_error = "Spam is not permitted here"
-                raise SynapseError(403, spam_error, Codes.FORBIDDEN)
+            spam_check = await self.spam_checker.check_event_for_spam(event)
+            if spam_check is not synapse.spam_checker_api.Allow.ALLOW:
+                raise SynapseError(
+                    403, "This message had been rejected as probable spam", spam_check
+                )
 
             ev = await self.handle_new_client_event(
                 requester=requester,
@@ -1021,8 +1029,35 @@ class EventCreationHandler:
             #
             # TODO(faster_joins): figure out how this works, and make sure that the
             #   old state is complete.
-            old_state = await self.store.get_events_as_list(state_event_ids)
-            context = await self.state.compute_event_context(event, old_state=old_state)
+            metadata = await self.store.get_metadata_for_events(state_event_ids)
+
+            state_map_for_event: MutableStateMap[str] = {}
+            for state_id in state_event_ids:
+                data = metadata.get(state_id)
+                if data is None:
+                    # We're trying to persist a new historical batch of events
+                    # with the given state, e.g. via
+                    # `RoomBatchSendEventRestServlet`. The state can be inferred
+                    # by Synapse or set directly by the client.
+                    #
+                    # Either way, we should have persisted all the state before
+                    # getting here.
+                    raise Exception(
+                        f"State event {state_id} not found in DB,"
+                        " Synapse should have persisted it before using it."
+                    )
+
+                if data.state_key is None:
+                    raise Exception(
+                        f"Trying to set non-state event {state_id} as state"
+                    )
+
+                state_map_for_event[(data.event_type, data.state_key)] = state_id
+
+            context = await self.state.compute_event_context(
+                event,
+                state_ids_before_event=state_map_for_event,
+            )
         else:
             context = await self.state.compute_event_context(event)
 
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 6ae88add95..6f4820c240 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -130,7 +130,7 @@ class PaginationHandler:
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
         self.clock = hs.get_clock()
         self._server_name = hs.hostname
         self._room_shutdown_handler = hs.get_room_shutdown_handler()
@@ -239,7 +239,7 @@ class PaginationHandler:
             # defined in the server's configuration, we can safely assume that's the
             # case and use it for this room.
             max_lifetime = (
-                retention_policy["max_lifetime"] or self._retention_default_max_lifetime
+                retention_policy.max_lifetime or self._retention_default_max_lifetime
             )
 
             # Cap the effective max_lifetime to be within the range allowed in the
@@ -539,7 +539,7 @@ class PaginationHandler:
                 (EventTypes.Member, event.sender) for event in events
             )
 
-            state_ids = await self.state_store.get_state_ids_for_event(
+            state_ids = await self.state_storage.get_state_ids_for_event(
                 events[0].event_id, state_filter=state_filter
             )
 
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 92e1de0500..e2775b34f1 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1193,7 +1193,7 @@ class RoomContextHandler:
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
         self._relations_handler = hs.get_relations_handler()
 
     async def get_event_context(
@@ -1293,7 +1293,7 @@ class RoomContextHandler:
         # first? Shouldn't we be consistent with /sync?
         # https://github.com/matrix-org/matrix-doc/issues/687
 
-        state = await self.state_store.get_state_for_events(
+        state = await self.state_storage.get_state_for_events(
             [last_event_id], state_filter=state_filter
         )
 
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index fbfd748406..7ce32f2e9c 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -17,7 +17,7 @@ class RoomBatchHandler:
     def __init__(self, hs: "HomeServer"):
         self.hs = hs
         self.store = hs.get_datastores().main
-        self.state_store = hs.get_storage().state
+        self.state_storage = hs.get_storage().state
         self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
         self.auth = hs.get_auth()
@@ -141,7 +141,7 @@ class RoomBatchHandler:
         ) = await self.store.get_max_depth_of(event_ids)
         # mapping from (type, state_key) -> state_event_id
         assert most_recent_event_id is not None
-        prev_state_map = await self.state_store.get_state_ids_for_event(
+        prev_state_map = await self.state_storage.get_state_ids_for_event(
             most_recent_event_id
         )
         # List of state event ID's
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index ea876c168d..00662dc961 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1081,17 +1081,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         # Transfer alias mappings in the room directory
         await self.store.update_aliases_for_room(old_room_id, room_id)
 
-        # Check if any groups we own contain the predecessor room
-        local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
-        for group_id in local_group_ids:
-            # Add new the new room to those groups
-            await self.store.add_room_to_group(
-                group_id, room_id, old_room is not None and old_room["is_public"]
-            )
-
-            # Remove the old room from those groups
-            await self.store.remove_room_from_group(group_id, old_room_id)
-
     async def copy_user_state_on_room_upgrade(
         self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
     ) -> None:
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index af83de3193..75aee6a111 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -662,7 +662,8 @@ class RoomSummaryHandler:
         # The API doesn't return the room version so assume that a
         # join rule of knock is valid.
         if (
-            room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
+            room.get("join_rule")
+            in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
             or room.get("world_readable") is True
         ):
             return True
@@ -713,9 +714,6 @@ class RoomSummaryHandler:
             "canonical_alias": stats["canonical_alias"],
             "num_joined_members": stats["joined_members"],
             "avatar_url": stats["avatar"],
-            # plural join_rules is a documentation error but kept for historical
-            # purposes. Should match /publicRooms.
-            "join_rules": stats["join_rules"],
             "join_rule": stats["join_rules"],
             "world_readable": (
                 stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index cd1c47dae8..e02c915248 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -56,7 +56,7 @@ class SearchHandler:
         self._event_serializer = hs.get_event_client_serializer()
         self._relations_handler = hs.get_relations_handler()
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
         self.auth = hs.get_auth()
 
     async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]:
@@ -677,7 +677,7 @@ class SearchHandler:
                     [(EventTypes.Member, sender) for sender in senders]
                 )
 
-                state = await self.state_store.get_state_for_event(
+                state = await self.state_storage.get_state_for_event(
                     last_event_id, state_filter
                 )
 
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 59b5d497be..c5c538e0c3 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -166,16 +166,6 @@ class KnockedSyncResult:
         return True
 
 
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class GroupsSyncResult:
-    join: JsonDict
-    invite: JsonDict
-    leave: JsonDict
-
-    def __bool__(self) -> bool:
-        return bool(self.join or self.invite or self.leave)
-
-
 @attr.s(slots=True, auto_attribs=True)
 class _RoomChanges:
     """The set of room entries to include in the sync, plus the set of joined
@@ -206,7 +196,6 @@ class SyncResult:
             for this device
         device_unused_fallback_key_types: List of key types that have an unused fallback
             key
-        groups: Group updates, if any
     """
 
     next_batch: StreamToken
@@ -220,7 +209,6 @@ class SyncResult:
     device_lists: DeviceListUpdates
     device_one_time_keys_count: JsonDict
     device_unused_fallback_key_types: List[str]
-    groups: Optional[GroupsSyncResult]
 
     def __bool__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
@@ -236,7 +224,6 @@ class SyncResult:
             or self.account_data
             or self.to_device
             or self.device_lists
-            or self.groups
         )
 
 
@@ -252,7 +239,7 @@ class SyncHandler:
         self.state = hs.get_state_handler()
         self.auth = hs.get_auth()
         self.storage = hs.get_storage()
-        self.state_store = self.storage.state
+        self.state_storage = self.storage.state
 
         # TODO: flush cache entries on subsequent sync request.
         #    Once we get the next /sync request (ie, one with the same access token
@@ -643,7 +630,7 @@ class SyncHandler:
             event: event of interest
             state_filter: The state filter used to fetch state from the database.
         """
-        state_ids = await self.state_store.get_state_ids_for_event(
+        state_ids = await self.state_storage.get_state_ids_for_event(
             event.event_id, state_filter=state_filter or StateFilter.all()
         )
         if event.is_state():
@@ -723,7 +710,7 @@ class SyncHandler:
             return None
 
         last_event = last_events[-1]
-        state_ids = await self.state_store.get_state_ids_for_event(
+        state_ids = await self.state_storage.get_state_ids_for_event(
             last_event.event_id,
             state_filter=StateFilter.from_types(
                 [(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
@@ -901,11 +888,13 @@ class SyncHandler:
 
             if full_state:
                 if batch:
-                    current_state_ids = await self.state_store.get_state_ids_for_event(
-                        batch.events[-1].event_id, state_filter=state_filter
+                    current_state_ids = (
+                        await self.state_storage.get_state_ids_for_event(
+                            batch.events[-1].event_id, state_filter=state_filter
+                        )
                     )
 
-                    state_ids = await self.state_store.get_state_ids_for_event(
+                    state_ids = await self.state_storage.get_state_ids_for_event(
                         batch.events[0].event_id, state_filter=state_filter
                     )
 
@@ -926,7 +915,7 @@ class SyncHandler:
             elif batch.limited:
                 if batch:
                     state_at_timeline_start = (
-                        await self.state_store.get_state_ids_for_event(
+                        await self.state_storage.get_state_ids_for_event(
                             batch.events[0].event_id, state_filter=state_filter
                         )
                     )
@@ -960,8 +949,10 @@ class SyncHandler:
                 )
 
                 if batch:
-                    current_state_ids = await self.state_store.get_state_ids_for_event(
-                        batch.events[-1].event_id, state_filter=state_filter
+                    current_state_ids = (
+                        await self.state_storage.get_state_ids_for_event(
+                            batch.events[-1].event_id, state_filter=state_filter
+                        )
                     )
                 else:
                     # Its not clear how we get here, but empirically we do
@@ -991,7 +982,7 @@ class SyncHandler:
                         # So we fish out all the member events corresponding to the
                         # timeline here, and then dedupe any redundant ones below.
 
-                        state_ids = await self.state_store.get_state_ids_for_event(
+                        state_ids = await self.state_storage.get_state_ids_for_event(
                             batch.events[0].event_id,
                             # we only want members!
                             state_filter=StateFilter.from_types(
@@ -1157,10 +1148,6 @@ class SyncHandler:
                 await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
             )
 
-        if self.hs_config.experimental.groups_enabled:
-            logger.debug("Fetching group data")
-            await self._generate_sync_entry_for_groups(sync_result_builder)
-
         num_events = 0
 
         # debug for https://github.com/matrix-org/synapse/issues/9424
@@ -1184,57 +1171,11 @@ class SyncHandler:
             archived=sync_result_builder.archived,
             to_device=sync_result_builder.to_device,
             device_lists=device_lists,
-            groups=sync_result_builder.groups,
             device_one_time_keys_count=one_time_key_counts,
             device_unused_fallback_key_types=unused_fallback_key_types,
             next_batch=sync_result_builder.now_token,
         )
 
-    @measure_func("_generate_sync_entry_for_groups")
-    async def _generate_sync_entry_for_groups(
-        self, sync_result_builder: "SyncResultBuilder"
-    ) -> None:
-        user_id = sync_result_builder.sync_config.user.to_string()
-        since_token = sync_result_builder.since_token
-        now_token = sync_result_builder.now_token
-
-        if since_token and since_token.groups_key:
-            results = await self.store.get_groups_changes_for_user(
-                user_id, since_token.groups_key, now_token.groups_key
-            )
-        else:
-            results = await self.store.get_all_groups_for_user(
-                user_id, now_token.groups_key
-            )
-
-        invited = {}
-        joined = {}
-        left = {}
-        for result in results:
-            membership = result["membership"]
-            group_id = result["group_id"]
-            gtype = result["type"]
-            content = result["content"]
-
-            if membership == "join":
-                if gtype == "membership":
-                    # TODO: Add profile
-                    content.pop("membership", None)
-                    joined[group_id] = content["content"]
-                else:
-                    joined.setdefault(group_id, {})[gtype] = content
-            elif membership == "invite":
-                if gtype == "membership":
-                    content.pop("membership", None)
-                    invited[group_id] = content["content"]
-            else:
-                if gtype == "membership":
-                    left[group_id] = content["content"]
-
-        sync_result_builder.groups = GroupsSyncResult(
-            join=joined, invite=invited, leave=left
-        )
-
     @measure_func("_generate_sync_entry_for_device_list")
     async def _generate_sync_entry_for_device_list(
         self,
@@ -2333,7 +2274,6 @@ class SyncResultBuilder:
         invited
         knocked
         archived
-        groups
         to_device
     """
 
@@ -2349,7 +2289,6 @@ class SyncResultBuilder:
     invited: List[InvitedSyncResult] = attr.Factory(list)
     knocked: List[KnockedSyncResult] = attr.Factory(list)
     archived: List[ArchivedSyncResult] = attr.Factory(list)
-    groups: Optional[GroupsSyncResult] = None
     to_device: List[JsonDict] = attr.Factory(list)
 
     def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 0b9475debd..db44721ef5 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -92,9 +92,6 @@ incoming_responses_counter = Counter(
     "synapse_http_matrixfederationclient_responses", "", ["method", "code"]
 )
 
-# a federation response can be rather large (eg a big state_ids is 50M or so), so we
-# need a generous limit here.
-MAX_RESPONSE_SIZE = 100 * 1024 * 1024
 
 MAX_LONG_RETRIES = 10
 MAX_SHORT_RETRIES = 3
@@ -116,6 +113,11 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
     the content type doesn't match we fail the request.
     """
 
+    # a federation response can be rather large (eg a big state_ids is 50M or so), so we
+    # need a generous limit here.
+    MAX_RESPONSE_SIZE: int = 100 * 1024 * 1024
+    """The largest response this parser will accept."""
+
     @abc.abstractmethod
     def finish(self) -> T:
         """Called when response has finished streaming and the parser should
@@ -203,7 +205,6 @@ async def _handle_response(
     response: IResponse,
     start_ms: int,
     parser: ByteParser[T],
-    max_response_size: Optional[int] = None,
 ) -> T:
     """
     Reads the body of a response with a timeout and sends it to a parser
@@ -215,15 +216,12 @@ async def _handle_response(
         response: response to the request
         start_ms: Timestamp when request was made
         parser: The parser for the response
-        max_response_size: The maximum size to read from the response, if None
-            uses the default.
 
     Returns:
         The parsed response
     """
 
-    if max_response_size is None:
-        max_response_size = MAX_RESPONSE_SIZE
+    max_response_size = parser.MAX_RESPONSE_SIZE
 
     try:
         check_content_type_is(response.headers, parser.CONTENT_TYPE)
@@ -240,7 +238,7 @@ async def _handle_response(
             "{%s} [%s] JSON response exceeded max size %i - %s %s",
             request.txn_id,
             request.destination,
-            MAX_RESPONSE_SIZE,
+            max_response_size,
             request.method,
             request.uri.decode("ascii"),
         )
@@ -772,7 +770,6 @@ class MatrixFederationHttpClient:
         backoff_on_404: bool = False,
         try_trailing_slash_on_400: bool = False,
         parser: Literal[None] = None,
-        max_response_size: Optional[int] = None,
     ) -> Union[JsonDict, list]:
         ...
 
@@ -790,7 +787,6 @@ class MatrixFederationHttpClient:
         backoff_on_404: bool = False,
         try_trailing_slash_on_400: bool = False,
         parser: Optional[ByteParser[T]] = None,
-        max_response_size: Optional[int] = None,
     ) -> T:
         ...
 
@@ -807,7 +803,6 @@ class MatrixFederationHttpClient:
         backoff_on_404: bool = False,
         try_trailing_slash_on_400: bool = False,
         parser: Optional[ByteParser] = None,
-        max_response_size: Optional[int] = None,
     ):
         """Sends the specified json data using PUT
 
@@ -843,8 +838,6 @@ class MatrixFederationHttpClient:
                 enabled.
             parser: The parser to use to decode the response. Defaults to
                 parsing as JSON.
-            max_response_size: The maximum size to read from the response, if None
-                uses the default.
 
         Returns:
             Succeeds when we get a 2xx HTTP response. The
@@ -895,7 +888,6 @@ class MatrixFederationHttpClient:
             response,
             start_ms,
             parser=parser,
-            max_response_size=max_response_size,
         )
 
         return body
@@ -984,7 +976,6 @@ class MatrixFederationHttpClient:
         ignore_backoff: bool = False,
         try_trailing_slash_on_400: bool = False,
         parser: Literal[None] = None,
-        max_response_size: Optional[int] = None,
     ) -> Union[JsonDict, list]:
         ...
 
@@ -999,7 +990,6 @@ class MatrixFederationHttpClient:
         ignore_backoff: bool = ...,
         try_trailing_slash_on_400: bool = ...,
         parser: ByteParser[T] = ...,
-        max_response_size: Optional[int] = ...,
     ) -> T:
         ...
 
@@ -1013,7 +1003,6 @@ class MatrixFederationHttpClient:
         ignore_backoff: bool = False,
         try_trailing_slash_on_400: bool = False,
         parser: Optional[ByteParser] = None,
-        max_response_size: Optional[int] = None,
     ):
         """GETs some json from the given host homeserver and path
 
@@ -1043,9 +1032,6 @@ class MatrixFederationHttpClient:
             parser: The parser to use to decode the response. Defaults to
                 parsing as JSON.
 
-            max_response_size: The maximum size to read from the response. If None,
-                uses the default.
-
         Returns:
             Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
@@ -1090,7 +1076,6 @@ class MatrixFederationHttpClient:
             response,
             start_ms,
             parser=parser,
-            max_response_size=max_response_size,
         )
 
         return body
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 73f92d2df8..95f3b27927 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -35,6 +35,7 @@ from typing_extensions import ParamSpec
 from twisted.internet import defer
 from twisted.web.resource import Resource
 
+from synapse import spam_checker_api
 from synapse.api.errors import SynapseError
 from synapse.events import EventBase
 from synapse.events.presence_router import (
@@ -47,6 +48,7 @@ from synapse.events.spamcheck import (
     CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK,
     CHECK_REGISTRATION_FOR_SPAM_CALLBACK,
     CHECK_USERNAME_FOR_SPAM_CALLBACK,
+    SHOULD_DROP_FEDERATED_EVENT_CALLBACK,
     USER_MAY_CREATE_ROOM_ALIAS_CALLBACK,
     USER_MAY_CREATE_ROOM_CALLBACK,
     USER_MAY_INVITE_CALLBACK,
@@ -139,6 +141,9 @@ are loaded into Synapse.
 
 PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS
 
+ALLOW = spam_checker_api.Allow.ALLOW
+# Singleton value used to mark a message as permitted.
+
 __all__ = [
     "errors",
     "make_deferred_yieldable",
@@ -146,6 +151,7 @@ __all__ = [
     "respond_with_html",
     "run_in_background",
     "cached",
+    "Allow",
     "UserID",
     "DatabasePool",
     "LoggingTransaction",
@@ -234,6 +240,9 @@ class ModuleApi:
         self,
         *,
         check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
+        should_drop_federated_event: Optional[
+            SHOULD_DROP_FEDERATED_EVENT_CALLBACK
+        ] = None,
         user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
         user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
         user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
@@ -254,6 +263,7 @@ class ModuleApi:
         """
         return self._spam_checker.register_callbacks(
             check_event_for_spam=check_event_for_spam,
+            should_drop_federated_event=should_drop_federated_event,
             user_may_join_room=user_may_join_room,
             user_may_invite=user_may_invite,
             user_may_send_3pid_invite=user_may_send_3pid_invite,
diff --git a/synapse/module_api/errors.py b/synapse/module_api/errors.py
index e58e0e60fe..bedd045d6f 100644
--- a/synapse/module_api/errors.py
+++ b/synapse/module_api/errors.py
@@ -15,6 +15,7 @@
 """Exception types which are exposed as part of the stable module API"""
 
 from synapse.api.errors import (
+    Codes,
     InvalidClientCredentialsError,
     RedirectException,
     SynapseError,
@@ -24,6 +25,7 @@ from synapse.handlers.push_rules import InvalidRuleException
 from synapse.storage.push_rule import RuleNotFoundException
 
 __all__ = [
+    "Codes",
     "InvalidClientCredentialsError",
     "RedirectException",
     "SynapseError",
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index a17b35a605..819bc9e9b6 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -139,6 +139,7 @@ BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [
             {
                 "kind": "event_match",
                 "key": "content.body",
+                # Match the localpart of the requester's MXID.
                 "pattern_type": "user_localpart",
             }
         ],
@@ -191,6 +192,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "pattern": "invite",
                 "_cache_key": "_invite_member",
             },
+            # Match the requester's MXID.
             {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
         ],
         "actions": [
@@ -290,7 +292,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "_cache_key": "_room_server_acl",
             }
         ],
-        "actions": ["dont_notify"],
+        "actions": [],
     },
 ]
 
@@ -351,6 +353,18 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
         ],
     },
     {
+        "rule_id": "global/underride/.org.matrix.msc3772.thread_reply",
+        "conditions": [
+            {
+                "kind": "org.matrix.msc3772.relation_match",
+                "rel_type": "m.thread",
+                # Match the requester's MXID.
+                "sender_type": "user_id",
+            }
+        ],
+        "actions": ["notify", {"set_tweak": "highlight", "value": False}],
+    },
+    {
         "rule_id": "global/underride/.m.rule.message",
         "conditions": [
             {
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 4cc8a2ecca..1a8e7ef3dc 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -13,8 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import itertools
 import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union
 
 import attr
 from prometheus_client import Counter
@@ -121,6 +122,9 @@ class BulkPushRuleEvaluator:
             resizable=False,
         )
 
+        # Whether to support MSC3772 is supported.
+        self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
+
     async def _get_rules_for_event(
         self, event: EventBase, context: EventContext
     ) -> Dict[str, List[Dict[str, Any]]]:
@@ -192,6 +196,60 @@ class BulkPushRuleEvaluator:
 
         return pl_event.content if pl_event else {}, sender_level
 
+    async def _get_mutual_relations(
+        self, event: EventBase, rules: Iterable[Dict[str, Any]]
+    ) -> Dict[str, Set[Tuple[str, str]]]:
+        """
+        Fetch event metadata for events which related to the same event as the given event.
+
+        If the given event has no relation information, returns an empty dictionary.
+
+        Args:
+            event_id: The event ID which is targeted by relations.
+            rules: The push rules which will be processed for this event.
+
+        Returns:
+            A dictionary of relation type to:
+                A set of tuples of:
+                    The sender
+                    The event type
+        """
+
+        # If the experimental feature is not enabled, skip fetching relations.
+        if not self._relations_match_enabled:
+            return {}
+
+        # If the event does not have a relation, then cannot have any mutual
+        # relations.
+        relation = relation_from_event(event)
+        if not relation:
+            return {}
+
+        # Pre-filter to figure out which relation types are interesting.
+        rel_types = set()
+        for rule in rules:
+            # Skip disabled rules.
+            if "enabled" in rule and not rule["enabled"]:
+                continue
+
+            for condition in rule["conditions"]:
+                if condition["kind"] != "org.matrix.msc3772.relation_match":
+                    continue
+
+                # rel_type is required.
+                rel_type = condition.get("rel_type")
+                if rel_type:
+                    rel_types.add(rel_type)
+
+        # If no valid rules were found, no mutual relations.
+        if not rel_types:
+            return {}
+
+        # If any valid rules were found, fetch the mutual relations.
+        return await self.store.get_mutual_event_relations(
+            relation.parent_id, rel_types
+        )
+
     @measure_func("action_for_event_by_user")
     async def action_for_event_by_user(
         self, event: EventBase, context: EventContext
@@ -216,8 +274,17 @@ class BulkPushRuleEvaluator:
             sender_power_level,
         ) = await self._get_power_levels_and_sender_level(event, context)
 
+        relations = await self._get_mutual_relations(
+            event, itertools.chain(*rules_by_user.values())
+        )
+
         evaluator = PushRuleEvaluatorForEvent(
-            event, len(room_members), sender_power_level, power_levels
+            event,
+            len(room_members),
+            sender_power_level,
+            power_levels,
+            relations,
+            self._relations_match_enabled,
         )
 
         # If the event is not a state event check if any users ignore the sender.
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index 63b22d50ae..5117ef6854 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -48,6 +48,10 @@ def format_push_rules_for_user(
             elif pattern_type == "user_localpart":
                 c["pattern"] = user.localpart
 
+            sender_type = c.pop("sender_type", None)
+            if sender_type == "user_id":
+                c["sender"] = user.to_string()
+
         rulearray = rules["global"][template_name]
 
         template_rule = _rule_to_template(r)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 5ccdd88364..84124af965 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -114,7 +114,7 @@ class Mailer:
 
         self.send_email_handler = hs.get_send_email_handler()
         self.store = self.hs.get_datastores().main
-        self.state_store = self.hs.get_storage().state
+        self.state_storage = self.hs.get_storage().state
         self.macaroon_gen = self.hs.get_macaroon_generator()
         self.state_handler = self.hs.get_state_handler()
         self.storage = hs.get_storage()
@@ -494,7 +494,7 @@ class Mailer:
             )
         else:
             # Attempt to check the historical state for the room.
-            historical_state = await self.state_store.get_state_for_event(
+            historical_state = await self.state_storage.get_state_for_event(
                 event.event_id, StateFilter.from_types((type_state_key,))
             )
             sender_state_event = historical_state.get(type_state_key)
@@ -767,7 +767,7 @@ class Mailer:
                 member_event_ids.append(sender_state_event_id)
             else:
                 # Attempt to check the historical state for the room.
-                historical_state = await self.state_store.get_state_for_event(
+                historical_state = await self.state_storage.get_state_for_event(
                     event_id, StateFilter.from_types((type_state_key,))
                 )
                 sender_state_event = historical_state.get(type_state_key)
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
index 54db6b5612..2e8a017add 100644
--- a/synapse/push/push_rule_evaluator.py
+++ b/synapse/push/push_rule_evaluator.py
@@ -15,7 +15,7 @@
 
 import logging
 import re
-from typing import Any, Dict, List, Mapping, Optional, Pattern, Tuple, Union
+from typing import Any, Dict, List, Mapping, Optional, Pattern, Set, Tuple, Union
 
 from matrix_common.regex import glob_to_regex, to_word_pattern
 
@@ -120,11 +120,15 @@ class PushRuleEvaluatorForEvent:
         room_member_count: int,
         sender_power_level: int,
         power_levels: Dict[str, Union[int, Dict[str, int]]],
+        relations: Dict[str, Set[Tuple[str, str]]],
+        relations_match_enabled: bool,
     ):
         self._event = event
         self._room_member_count = room_member_count
         self._sender_power_level = sender_power_level
         self._power_levels = power_levels
+        self._relations = relations
+        self._relations_match_enabled = relations_match_enabled
 
         # Maps strings of e.g. 'content.body' -> event["content"]["body"]
         self._value_cache = _flatten_dict(event)
@@ -188,7 +192,16 @@ class PushRuleEvaluatorForEvent:
             return _sender_notification_permission(
                 self._event, condition, self._sender_power_level, self._power_levels
             )
+        elif (
+            condition["kind"] == "org.matrix.msc3772.relation_match"
+            and self._relations_match_enabled
+        ):
+            return self._relation_match(condition, user_id)
         else:
+            # XXX This looks incorrect -- we have reached an unknown condition
+            #     kind and are unconditionally returning that it matches. Note
+            #     that it seems possible to provide a condition to the /pushrules
+            #     endpoint with an unknown kind, see _rule_tuple_from_request_object.
             return True
 
     def _event_match(self, condition: dict, user_id: str) -> bool:
@@ -256,6 +269,41 @@ class PushRuleEvaluatorForEvent:
 
         return bool(r.search(body))
 
+    def _relation_match(self, condition: dict, user_id: str) -> bool:
+        """
+        Check an "relation_match" push rule condition.
+
+        Args:
+            condition: The "event_match" push rule condition to match.
+            user_id: The user's MXID.
+
+        Returns:
+             True if the condition matches the event, False otherwise.
+        """
+        rel_type = condition.get("rel_type")
+        if not rel_type:
+            logger.warning("relation_match condition missing rel_type")
+            return False
+
+        sender_pattern = condition.get("sender")
+        if sender_pattern is None:
+            sender_type = condition.get("sender_type")
+            if sender_type == "user_id":
+                sender_pattern = user_id
+        type_pattern = condition.get("type")
+
+        # If any other relations matches, return True.
+        for sender, event_type in self._relations.get(rel_type, ()):
+            if sender_pattern and not _glob_matches(sender_pattern, sender):
+                continue
+            if type_pattern and not _glob_matches(type_pattern, event_type):
+                continue
+            # All values must have matched.
+            return True
+
+        # No relations matched.
+        return False
+
 
 # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
 regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index fe34948168..32f52e54d8 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -58,6 +58,15 @@ class Command(metaclass=abc.ABCMeta):
         # by default, we just use the command name.
         return self.NAME
 
+    def redis_channel_name(self, prefix: str) -> str:
+        """
+        Returns the Redis channel name upon which to publish this command.
+
+        Args:
+            prefix: The prefix for the channel.
+        """
+        return prefix
+
 
 SC = TypeVar("SC", bound="_SimpleCommand")
 
@@ -395,6 +404,9 @@ class UserIpCommand(Command):
             f"{self.user_agent!r}, {self.device_id!r}, {self.last_seen})"
         )
 
+    def redis_channel_name(self, prefix: str) -> str:
+        return f"{prefix}/USER_IP"
+
 
 class RemoteServerUpCommand(_SimpleCommand):
     """Sent when a worker has detected that a remote server is no longer
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index 73294654ef..fd1c0ec6af 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -221,10 +221,10 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
         # remote instances.
         tcp_outbound_commands_counter.labels(cmd.NAME, "redis").inc()
 
+        channel_name = cmd.redis_channel_name(self.synapse_stream_prefix)
+
         await make_deferred_yieldable(
-            self.synapse_outbound_redis_connection.publish(
-                self.synapse_stream_prefix, encoded_string
-            )
+            self.synapse_outbound_redis_connection.publish(channel_name, encoded_string)
         )
 
 
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 57c4773edc..b712215112 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -26,7 +26,6 @@ from synapse.rest.client import (
     directory,
     events,
     filter,
-    groups,
     initial_sync,
     keys,
     knock,
@@ -118,8 +117,6 @@ class ClientRestResource(JsonResource):
         thirdparty.register_servlets(hs, client_resource)
         sendtodevice.register_servlets(hs, client_resource)
         user_directory.register_servlets(hs, client_resource)
-        if hs.config.experimental.groups_enabled:
-            groups.register_servlets(hs, client_resource)
         room_upgrade_rest_servlet.register_servlets(hs, client_resource)
         room_batch.register_servlets(hs, client_resource)
         capabilities.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index cb4d55c89d..1aa08f8d95 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -47,7 +47,6 @@ from synapse.rest.admin.federation import (
     DestinationRestServlet,
     ListDestinationsRestServlet,
 )
-from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
 from synapse.rest.admin.registration_tokens import (
     ListRegistrationTokensRestServlet,
@@ -293,8 +292,6 @@ def register_servlets_for_client_rest_resource(
     ResetPasswordRestServlet(hs).register(http_server)
     SearchUsersRestServlet(hs).register(http_server)
     UserRegisterServlet(hs).register(http_server)
-    if hs.config.experimental.groups_enabled:
-        DeleteGroupAdminRestServlet(hs).register(http_server)
     AccountValidityRenewServlet(hs).register(http_server)
 
     # Load the media repo ones if we're using them. Otherwise load the servlets which
diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py
deleted file mode 100644
index cd697e180e..0000000000
--- a/synapse/rest/admin/groups.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2019 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-from http import HTTPStatus
-from typing import TYPE_CHECKING, Tuple
-
-from synapse.api.errors import SynapseError
-from synapse.http.servlet import RestServlet
-from synapse.http.site import SynapseRequest
-from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
-from synapse.types import JsonDict
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class DeleteGroupAdminRestServlet(RestServlet):
-    """Allows deleting of local groups"""
-
-    PATTERNS = admin_patterns("/delete_group/(?P<group_id>[^/]*)$")
-
-    def __init__(self, hs: "HomeServer"):
-        self.group_server = hs.get_groups_server_handler()
-        self.is_mine_id = hs.is_mine_id
-        self.auth = hs.get_auth()
-
-    async def on_POST(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        await assert_user_is_admin(self.auth, requester.user)
-
-        if not self.is_mine_id(group_id):
-            raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local groups")
-
-        await self.group_server.delete_group(group_id, requester.user.to_string())
-        return HTTPStatus.OK, {}
diff --git a/synapse/rest/client/groups.py b/synapse/rest/client/groups.py
deleted file mode 100644
index 7e1149c7f4..0000000000
--- a/synapse/rest/client/groups.py
+++ /dev/null
@@ -1,962 +0,0 @@
-# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from functools import wraps
-from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple
-
-from twisted.web.server import Request
-
-from synapse.api.constants import (
-    MAX_GROUP_CATEGORYID_LENGTH,
-    MAX_GROUP_ROLEID_LENGTH,
-    MAX_GROUPID_LENGTH,
-)
-from synapse.api.errors import Codes, SynapseError
-from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.http.server import HttpServer
-from synapse.http.servlet import (
-    RestServlet,
-    assert_params_in_dict,
-    parse_json_object_from_request,
-)
-from synapse.http.site import SynapseRequest
-from synapse.types import GroupID, JsonDict
-
-from ._base import client_patterns
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-def _validate_group_id(
-    f: Callable[..., Awaitable[Tuple[int, JsonDict]]]
-) -> Callable[..., Awaitable[Tuple[int, JsonDict]]]:
-    """Wrapper to validate the form of the group ID.
-
-    Can be applied to any on_FOO methods that accepts a group ID as a URL parameter.
-    """
-
-    @wraps(f)
-    def wrapper(
-        self: RestServlet, request: Request, group_id: str, *args: Any, **kwargs: Any
-    ) -> Awaitable[Tuple[int, JsonDict]]:
-        if not GroupID.is_valid(group_id):
-            raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
-
-        return f(self, request, group_id, *args, **kwargs)
-
-    return wrapper
-
-
-class GroupServlet(RestServlet):
-    """Get the group profile"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/profile$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        group_description = await self.groups_handler.get_group_profile(
-            group_id, requester_user_id
-        )
-
-        return 200, group_description
-
-    @_validate_group_id
-    async def on_POST(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert_params_in_dict(
-            content, ("name", "avatar_url", "short_description", "long_description")
-        )
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot create group profiles."
-        await self.groups_handler.update_group_profile(
-            group_id, requester_user_id, content
-        )
-
-        return 200, {}
-
-
-class GroupSummaryServlet(RestServlet):
-    """Get the full group summary"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/summary$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        get_group_summary = await self.groups_handler.get_group_summary(
-            group_id, requester_user_id
-        )
-
-        return 200, get_group_summary
-
-
-class GroupSummaryRoomsCatServlet(RestServlet):
-    """Update/delete a rooms entry in the summary.
-
-    Matches both:
-        - /groups/:group/summary/rooms/:room_id
-        - /groups/:group/summary/categories/:category/rooms/:room_id
-    """
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/summary"
-        "(/categories/(?P<category_id>[^/]+))?"
-        "/rooms/(?P<room_id>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self,
-        request: SynapseRequest,
-        group_id: str,
-        category_id: Optional[str],
-        room_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        if category_id == "":
-            raise SynapseError(400, "category_id cannot be empty", Codes.INVALID_PARAM)
-
-        if category_id and len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
-            raise SynapseError(
-                400,
-                "category_id may not be longer than %s characters"
-                % (MAX_GROUP_CATEGORYID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group summaries."
-        resp = await self.groups_handler.update_group_summary_room(
-            group_id,
-            requester_user_id,
-            room_id=room_id,
-            category_id=category_id,
-            content=content,
-        )
-
-        return 200, resp
-
-    @_validate_group_id
-    async def on_DELETE(
-        self, request: SynapseRequest, group_id: str, category_id: str, room_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group profiles."
-        resp = await self.groups_handler.delete_group_summary_room(
-            group_id, requester_user_id, room_id=room_id, category_id=category_id
-        )
-
-        return 200, resp
-
-
-class GroupCategoryServlet(RestServlet):
-    """Get/add/update/delete a group category"""
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str, category_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        category = await self.groups_handler.get_group_category(
-            group_id, requester_user_id, category_id=category_id
-        )
-
-        return 200, category
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, category_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        if not category_id:
-            raise SynapseError(400, "category_id cannot be empty", Codes.INVALID_PARAM)
-
-        if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
-            raise SynapseError(
-                400,
-                "category_id may not be longer than %s characters"
-                % (MAX_GROUP_CATEGORYID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group categories."
-        resp = await self.groups_handler.update_group_category(
-            group_id, requester_user_id, category_id=category_id, content=content
-        )
-
-        return 200, resp
-
-    @_validate_group_id
-    async def on_DELETE(
-        self, request: SynapseRequest, group_id: str, category_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group categories."
-        resp = await self.groups_handler.delete_group_category(
-            group_id, requester_user_id, category_id=category_id
-        )
-
-        return 200, resp
-
-
-class GroupCategoriesServlet(RestServlet):
-    """Get all group categories"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/categories/$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        category = await self.groups_handler.get_group_categories(
-            group_id, requester_user_id
-        )
-
-        return 200, category
-
-
-class GroupRoleServlet(RestServlet):
-    """Get/add/update/delete a group role"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str, role_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        category = await self.groups_handler.get_group_role(
-            group_id, requester_user_id, role_id=role_id
-        )
-
-        return 200, category
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, role_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        if not role_id:
-            raise SynapseError(400, "role_id cannot be empty", Codes.INVALID_PARAM)
-
-        if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
-            raise SynapseError(
-                400,
-                "role_id may not be longer than %s characters"
-                % (MAX_GROUP_ROLEID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group roles."
-        resp = await self.groups_handler.update_group_role(
-            group_id, requester_user_id, role_id=role_id, content=content
-        )
-
-        return 200, resp
-
-    @_validate_group_id
-    async def on_DELETE(
-        self, request: SynapseRequest, group_id: str, role_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group roles."
-        resp = await self.groups_handler.delete_group_role(
-            group_id, requester_user_id, role_id=role_id
-        )
-
-        return 200, resp
-
-
-class GroupRolesServlet(RestServlet):
-    """Get all group roles"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/roles/$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        category = await self.groups_handler.get_group_roles(
-            group_id, requester_user_id
-        )
-
-        return 200, category
-
-
-class GroupSummaryUsersRoleServlet(RestServlet):
-    """Update/delete a user's entry in the summary.
-
-    Matches both:
-        - /groups/:group/summary/users/:room_id
-        - /groups/:group/summary/roles/:role/users/:user_id
-    """
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/summary"
-        "(/roles/(?P<role_id>[^/]+))?"
-        "/users/(?P<user_id>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self,
-        request: SynapseRequest,
-        group_id: str,
-        role_id: Optional[str],
-        user_id: str,
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        if role_id == "":
-            raise SynapseError(400, "role_id cannot be empty", Codes.INVALID_PARAM)
-
-        if role_id and len(role_id) > MAX_GROUP_ROLEID_LENGTH:
-            raise SynapseError(
-                400,
-                "role_id may not be longer than %s characters"
-                % (MAX_GROUP_ROLEID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group summaries."
-        resp = await self.groups_handler.update_group_summary_user(
-            group_id,
-            requester_user_id,
-            user_id=user_id,
-            role_id=role_id,
-            content=content,
-        )
-
-        return 200, resp
-
-    @_validate_group_id
-    async def on_DELETE(
-        self, request: SynapseRequest, group_id: str, role_id: str, user_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group summaries."
-        resp = await self.groups_handler.delete_group_summary_user(
-            group_id, requester_user_id, user_id=user_id, role_id=role_id
-        )
-
-        return 200, resp
-
-
-class GroupRoomServlet(RestServlet):
-    """Get all rooms in a group"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/rooms$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        result = await self.groups_handler.get_rooms_in_group(
-            group_id, requester_user_id
-        )
-
-        return 200, result
-
-
-class GroupUsersServlet(RestServlet):
-    """Get all users in a group"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/users$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        result = await self.groups_handler.get_users_in_group(
-            group_id, requester_user_id
-        )
-
-        return 200, result
-
-
-class GroupInvitedUsersServlet(RestServlet):
-    """Get users invited to a group"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/invited_users$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_GET(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        result = await self.groups_handler.get_invited_users_in_group(
-            group_id, requester_user_id
-        )
-
-        return 200, result
-
-
-class GroupSettingJoinPolicyServlet(RestServlet):
-    """Set group join policy"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/settings/m.join_policy$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group join policy."
-        result = await self.groups_handler.set_group_join_policy(
-            group_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupCreateServlet(RestServlet):
-    """Create a group"""
-
-    PATTERNS = client_patterns("/create_group$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-        self.server_name = hs.hostname
-
-    async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        # TODO: Create group on remote server
-        content = parse_json_object_from_request(request)
-        localpart = content.pop("localpart")
-        group_id = GroupID(localpart, self.server_name).to_string()
-
-        if not localpart:
-            raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
-
-        if len(group_id) > MAX_GROUPID_LENGTH:
-            raise SynapseError(
-                400,
-                "Group ID may not be longer than %s characters" % (MAX_GROUPID_LENGTH,),
-                Codes.INVALID_PARAM,
-            )
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot create groups."
-        result = await self.groups_handler.create_group(
-            group_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupAdminRoomsServlet(RestServlet):
-    """Add a room to the group"""
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/admin/rooms/(?P<room_id>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, room_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify rooms in a group."
-        result = await self.groups_handler.add_room_to_group(
-            group_id, requester_user_id, room_id, content
-        )
-
-        return 200, result
-
-    @_validate_group_id
-    async def on_DELETE(
-        self, request: SynapseRequest, group_id: str, room_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group categories."
-        result = await self.groups_handler.remove_room_from_group(
-            group_id, requester_user_id, room_id
-        )
-
-        return 200, result
-
-
-class GroupAdminRoomsConfigServlet(RestServlet):
-    """Update the config of a room in a group"""
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/admin/rooms/(?P<room_id>[^/]*)"
-        "/config/(?P<config_key>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, room_id: str, config_key: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot modify group categories."
-        result = await self.groups_handler.update_room_in_group(
-            group_id, requester_user_id, room_id, config_key, content
-        )
-
-        return 200, result
-
-
-class GroupAdminUsersInviteServlet(RestServlet):
-    """Invite a user to the group"""
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/admin/users/invite/(?P<user_id>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-        self.store = hs.get_datastores().main
-        self.is_mine_id = hs.is_mine_id
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, user_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        config = content.get("config", {})
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot invite users to a group."
-        result = await self.groups_handler.invite(
-            group_id, user_id, requester_user_id, config
-        )
-
-        return 200, result
-
-
-class GroupAdminUsersKickServlet(RestServlet):
-    """Kick a user from the group"""
-
-    PATTERNS = client_patterns(
-        "/groups/(?P<group_id>[^/]*)/admin/users/remove/(?P<user_id>[^/]*)$"
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str, user_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot kick users from a group."
-        result = await self.groups_handler.remove_user_from_group(
-            group_id, user_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupSelfLeaveServlet(RestServlet):
-    """Leave a joined group"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/self/leave$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot leave a group for a users."
-        result = await self.groups_handler.remove_user_from_group(
-            group_id, requester_user_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupSelfJoinServlet(RestServlet):
-    """Attempt to join a group, or knock"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/self/join$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot join a user to a group."
-        result = await self.groups_handler.join_group(
-            group_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupSelfAcceptInviteServlet(RestServlet):
-    """Accept a group invite"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/self/accept_invite$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        assert isinstance(
-            self.groups_handler, GroupsLocalHandler
-        ), "Workers cannot accept an invite to a group."
-        result = await self.groups_handler.accept_invite(
-            group_id, requester_user_id, content
-        )
-
-        return 200, result
-
-
-class GroupSelfUpdatePublicityServlet(RestServlet):
-    """Update whether we publicise a users membership of a group"""
-
-    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/self/update_publicity$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.store = hs.get_datastores().main
-
-    @_validate_group_id
-    async def on_PUT(
-        self, request: SynapseRequest, group_id: str
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request)
-        requester_user_id = requester.user.to_string()
-
-        content = parse_json_object_from_request(request)
-        publicise = content["publicise"]
-        await self.store.update_group_publicity(group_id, requester_user_id, publicise)
-
-        return 200, {}
-
-
-class PublicisedGroupsForUserServlet(RestServlet):
-    """Get the list of groups a user is advertising"""
-
-    PATTERNS = client_patterns("/publicised_groups/(?P<user_id>[^/]*)$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.store = hs.get_datastores().main
-        self.groups_handler = hs.get_groups_local_handler()
-
-    async def on_GET(
-        self, request: SynapseRequest, user_id: str
-    ) -> Tuple[int, JsonDict]:
-        await self.auth.get_user_by_req(request, allow_guest=True)
-
-        result = await self.groups_handler.get_publicised_groups_for_user(user_id)
-
-        return 200, result
-
-
-class PublicisedGroupsForUsersServlet(RestServlet):
-    """Get the list of groups a user is advertising"""
-
-    PATTERNS = client_patterns("/publicised_groups$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.store = hs.get_datastores().main
-        self.groups_handler = hs.get_groups_local_handler()
-
-    async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
-        await self.auth.get_user_by_req(request, allow_guest=True)
-
-        content = parse_json_object_from_request(request)
-        user_ids = content["user_ids"]
-
-        result = await self.groups_handler.bulk_get_publicised_groups(user_ids)
-
-        return 200, result
-
-
-class GroupsForUserServlet(RestServlet):
-    """Get all groups the logged in user is joined to"""
-
-    PATTERNS = client_patterns("/joined_groups$")
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.clock = hs.get_clock()
-        self.groups_handler = hs.get_groups_local_handler()
-
-    async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-        requester_user_id = requester.user.to_string()
-
-        result = await self.groups_handler.get_joined_groups(requester_user_id)
-
-        return 200, result
-
-
-def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
-    GroupServlet(hs).register(http_server)
-    GroupSummaryServlet(hs).register(http_server)
-    GroupInvitedUsersServlet(hs).register(http_server)
-    GroupUsersServlet(hs).register(http_server)
-    GroupRoomServlet(hs).register(http_server)
-    GroupSettingJoinPolicyServlet(hs).register(http_server)
-    GroupCreateServlet(hs).register(http_server)
-    GroupAdminRoomsServlet(hs).register(http_server)
-    GroupAdminRoomsConfigServlet(hs).register(http_server)
-    GroupAdminUsersInviteServlet(hs).register(http_server)
-    GroupAdminUsersKickServlet(hs).register(http_server)
-    GroupSelfLeaveServlet(hs).register(http_server)
-    GroupSelfJoinServlet(hs).register(http_server)
-    GroupSelfAcceptInviteServlet(hs).register(http_server)
-    GroupsForUserServlet(hs).register(http_server)
-    GroupCategoryServlet(hs).register(http_server)
-    GroupCategoriesServlet(hs).register(http_server)
-    GroupSummaryRoomsCatServlet(hs).register(http_server)
-    GroupRoleServlet(hs).register(http_server)
-    GroupRolesServlet(hs).register(http_server)
-    GroupSelfUpdatePublicityServlet(hs).register(http_server)
-    GroupSummaryUsersRoleServlet(hs).register(http_server)
-    PublicisedGroupsForUserServlet(hs).register(http_server)
-    PublicisedGroupsForUsersServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 5a2361a2e6..7a5ce8ad0e 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -1193,12 +1193,7 @@ class TimestampLookupRestServlet(RestServlet):
 
 
 class RoomHierarchyRestServlet(RestServlet):
-    PATTERNS = (
-        re.compile(
-            "^/_matrix/client/(v1|unstable/org.matrix.msc2946)"
-            "/rooms/(?P<room_id>[^/]*)/hierarchy$"
-        ),
-    )
+    PATTERNS = (re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/hierarchy$"),)
 
     def __init__(self, hs: "HomeServer"):
         super().__init__()
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index e8772f86e7..f596b792fa 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -298,14 +298,6 @@ class SyncRestServlet(RestServlet):
         if archived:
             response["rooms"][Membership.LEAVE] = archived
 
-        if sync_result.groups is not None:
-            if sync_result.groups.join:
-                response["groups"][Membership.JOIN] = sync_result.groups.join
-            if sync_result.groups.invite:
-                response["groups"][Membership.INVITE] = sync_result.groups.invite
-            if sync_result.groups.leave:
-                response["groups"][Membership.LEAVE] = sync_result.groups.leave
-
         return response
 
     @staticmethod
diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py
index ca73965fc2..e72c8987cc 100644
--- a/synapse/rest/media/v1/preview_html.py
+++ b/synapse/rest/media/v1/preview_html.py
@@ -281,7 +281,7 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]:
 
 
 def _iterate_over_text(
-    tree: "etree.Element", *tags_to_ignore: Iterable[Union[str, "etree.Comment"]]
+    tree: "etree.Element", *tags_to_ignore: Union[str, "etree.Comment"]
 ) -> Generator[str, None, None]:
     """Iterate over the tree returning text nodes in a depth first fashion,
     skipping text nodes inside certain tags.
diff --git a/synapse/server.py b/synapse/server.py
index ee60cce8eb..3fd23aaf52 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -21,17 +21,7 @@
 import abc
 import functools
 import logging
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Callable,
-    Dict,
-    List,
-    Optional,
-    TypeVar,
-    Union,
-    cast,
-)
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, cast
 
 from twisted.internet.interfaces import IOpenSSLContextFactory
 from twisted.internet.tcp import Port
@@ -60,8 +50,6 @@ from synapse.federation.federation_server import (
 from synapse.federation.send_queue import FederationRemoteSendQueue
 from synapse.federation.sender import AbstractFederationSender, FederationSender
 from synapse.federation.transport.client import TransportLayerClient
-from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
-from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler
 from synapse.handlers.account import AccountHandler
 from synapse.handlers.account_data import AccountDataHandler
 from synapse.handlers.account_validity import AccountValidityHandler
@@ -79,7 +67,6 @@ from synapse.handlers.event_auth import EventAuthHandler
 from synapse.handlers.events import EventHandler, EventStreamHandler
 from synapse.handlers.federation import FederationHandler
 from synapse.handlers.federation_event import FederationEventHandler
-from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler
 from synapse.handlers.identity import IdentityHandler
 from synapse.handlers.initial_sync import InitialSyncHandler
 from synapse.handlers.message import EventCreationHandler, MessageHandler
@@ -652,30 +639,6 @@ class HomeServer(metaclass=abc.ABCMeta):
         return UserDirectoryHandler(self)
 
     @cache_in_self
-    def get_groups_local_handler(
-        self,
-    ) -> Union[GroupsLocalWorkerHandler, GroupsLocalHandler]:
-        if self.config.worker.worker_app:
-            return GroupsLocalWorkerHandler(self)
-        else:
-            return GroupsLocalHandler(self)
-
-    @cache_in_self
-    def get_groups_server_handler(self):
-        if self.config.worker.worker_app:
-            return GroupsServerWorkerHandler(self)
-        else:
-            return GroupsServerHandler(self)
-
-    @cache_in_self
-    def get_groups_attestation_signing(self) -> GroupAttestationSigning:
-        return GroupAttestationSigning(self)
-
-    @cache_in_self
-    def get_groups_attestation_renewer(self) -> GroupAttestionRenewer:
-        return GroupAttestionRenewer(self)
-
-    @cache_in_self
     def get_stats_handler(self) -> StatsHandler:
         return StatsHandler(self)
 
diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py
index 73018f2d00..95132c80b7 100644
--- a/synapse/spam_checker_api/__init__.py
+++ b/synapse/spam_checker_api/__init__.py
@@ -12,13 +12,38 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 from enum import Enum
+from typing import Union
+
+from synapse.api.errors import Codes
 
 
 class RegistrationBehaviour(Enum):
     """
-    Enum to define whether a registration request should allowed, denied, or shadow-banned.
+    Enum to define whether a registration request should be allowed, denied, or shadow-banned.
     """
 
     ALLOW = "allow"
     SHADOW_BAN = "shadow_ban"
     DENY = "deny"
+
+
+# We define the following singleton enum rather than a string to be able to
+# write `Union[Allow, ..., str]` in some of the callbacks for the spam-checker
+# API, where the `str` is required to maintain backwards compatibility with
+# previous versions of the API.
+class Allow(Enum):
+    """
+    Singleton to allow events to pass through in SpamChecker APIs.
+    """
+
+    ALLOW = "allow"
+
+
+Decision = Union[Allow, Codes]
+"""
+Union to define whether a request should be allowed or rejected.
+
+To accept a request, return `ALLOW`.
+
+To reject a request without any specific information, use `Codes.FORBIDDEN`.
+"""
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 4b4ed42cff..9c9d946f38 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -127,7 +127,7 @@ class StateHandler:
     def __init__(self, hs: "HomeServer"):
         self.clock = hs.get_clock()
         self.store = hs.get_datastores().main
-        self.state_store = hs.get_storage().state
+        self.state_storage = hs.get_storage().state
         self.hs = hs
         self._state_resolution_handler = hs.get_state_resolution_handler()
         self._storage = hs.get_storage()
@@ -261,7 +261,7 @@ class StateHandler:
     async def compute_event_context(
         self,
         event: EventBase,
-        old_state: Optional[Iterable[EventBase]] = None,
+        state_ids_before_event: Optional[StateMap[str]] = None,
         partial_state: bool = False,
     ) -> EventContext:
         """Build an EventContext structure for a non-outlier event.
@@ -273,12 +273,12 @@ class StateHandler:
 
         Args:
             event:
-            old_state: The state at the event if it can't be
-                calculated from existing events. This is normally only specified
-                when receiving an event from federation where we don't have the
-                prev events for, e.g. when backfilling.
-            partial_state: True if `old_state` is partial and omits non-critical
-                membership events
+            state_ids_before_event: The event ids of the state before the event if
+                it can't be calculated from existing events. This is normally
+                only specified when receiving an event from federation where we
+                don't have the prev events, e.g. when backfilling.
+            partial_state: True if `state_ids_before_event` is partial and omits
+                non-critical membership events
         Returns:
             The event context.
         """
@@ -286,13 +286,11 @@ class StateHandler:
         assert not event.internal_metadata.is_outlier()
 
         #
-        # first of all, figure out the state before the event
+        # first of all, figure out the state before the event, unless we
+        # already have it.
         #
-        if old_state:
+        if state_ids_before_event:
             # if we're given the state before the event, then we use that
-            state_ids_before_event: StateMap[str] = {
-                (s.type, s.state_key): s.event_id for s in old_state
-            }
             state_group_before_event = None
             state_group_before_event_prev_group = None
             deltas_to_state_group_before_event = None
@@ -339,7 +337,7 @@ class StateHandler:
         #
 
         if not state_group_before_event:
-            state_group_before_event = await self.state_store.store_state_group(
+            state_group_before_event = await self.state_storage.store_state_group(
                 event.event_id,
                 event.room_id,
                 prev_group=state_group_before_event_prev_group,
@@ -384,7 +382,7 @@ class StateHandler:
         state_ids_after_event[key] = event.event_id
         delta_ids = {key: event.event_id}
 
-        state_group_after_event = await self.state_store.store_state_group(
+        state_group_after_event = await self.state_storage.store_state_group(
             event.event_id,
             event.room_id,
             prev_group=state_group_before_event,
@@ -418,7 +416,7 @@ class StateHandler:
         """
         logger.debug("resolve_state_groups event_ids %s", event_ids)
 
-        state_groups = await self.state_store.get_state_group_for_events(event_ids)
+        state_groups = await self.state_storage.get_state_group_for_events(event_ids)
 
         state_group_ids = state_groups.values()
 
@@ -426,8 +424,8 @@ class StateHandler:
         state_group_ids_set = set(state_group_ids)
         if len(state_group_ids_set) == 1:
             (state_group_id,) = state_group_ids_set
-            state = await self.state_store.get_state_for_groups(state_group_ids_set)
-            prev_group, delta_ids = await self.state_store.get_state_group_delta(
+            state = await self.state_storage.get_state_for_groups(state_group_ids_set)
+            prev_group, delta_ids = await self.state_storage.get_state_group_delta(
                 state_group_id
             )
             return _StateCacheEntry(
@@ -441,7 +439,7 @@ class StateHandler:
 
         room_version = await self.store.get_room_version_id(room_id)
 
-        state_to_resolve = await self.state_store.get_state_for_groups(
+        state_to_resolve = await self.state_storage.get_state_for_groups(
             state_group_ids_set
         )
 
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 5ddb58a8a2..a78d68a9d7 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -90,6 +90,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
     "device_lists_remote_extremeties": "device_lists_remote_extremeties_unique_idx",
     "device_lists_remote_cache": "device_lists_remote_cache_unique_idx",
     "event_search": "event_search_event_id_idx",
+    "local_media_repository_thumbnails": "local_media_repository_thumbnails_method_idx",
+    "remote_media_cache_thumbnails": "remote_media_repository_thumbnails_method_idx",
 }
 
 
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index dcfe8caf47..562dcbe94d 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -1057,7 +1057,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             INNER JOIN batch_events AS c
             ON i.next_batch_id = c.batch_id
             /* Get the depth of the batch start event from the events table */
-            INNER JOIN events AS e USING (event_id)
+            INNER JOIN events AS e ON c.event_id = e.event_id
             /* Find an insertion event which matches the given event_id */
             WHERE i.event_id = ?
             LIMIT ?
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index b7c4c62222..b019979350 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -938,7 +938,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
         users can still get a list of recent highlights.
 
         Args:
-            txn: The transcation
+            txn: The transaction
             room_id: Room ID to delete from
             user_id: user ID to delete for
             stream_ordering: The lowest stream ordering which will
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 0df8ff5395..17e35cf63e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1828,6 +1828,10 @@ class PersistEventsStore:
             self.store.get_aggregation_groups_for_event.invalidate,
             (relation.parent_id,),
         )
+        txn.call_after(
+            self.store.get_mutual_event_relations_for_rel_type.invalidate,
+            (relation.parent_id,),
+        )
 
         if relation.rel_type == RelationTypes.REPLACE:
             txn.call_after(
@@ -2004,6 +2008,11 @@ class PersistEventsStore:
             self.store._invalidate_cache_and_stream(
                 txn, self.store.get_thread_participated, (redacted_relates_to,)
             )
+            self.store._invalidate_cache_and_stream(
+                txn,
+                self.store.get_mutual_event_relations_for_rel_type,
+                (redacted_relates_to,),
+            )
 
         self.db_pool.simple_delete_txn(
             txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index ad67901cc1..d5aefe02b6 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -61,6 +61,11 @@ def _is_experimental_rule_enabled(
         and not experimental_config.msc3786_enabled
     ):
         return False
+    if (
+        rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
+        and not experimental_config.msc3772_enabled
+    ):
+        return False
     return True
 
 
@@ -169,7 +174,7 @@ class PushRulesWorkerStore(
                 "conditions",
                 "actions",
             ),
-            desc="get_push_rules_enabled_for_user",
+            desc="get_push_rules_for_user",
         )
 
         rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"])))
@@ -183,10 +188,10 @@ class PushRulesWorkerStore(
         results = await self.db_pool.simple_select_list(
             table="push_rules_enable",
             keyvalues={"user_name": user_id},
-            retcols=("user_name", "rule_id", "enabled"),
+            retcols=("rule_id", "enabled"),
             desc="get_push_rules_enabled_for_user",
         )
-        return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results}
+        return {r["rule_id"]: bool(r["enabled"]) for r in results}
 
     async def have_push_rules_changed_for_user(
         self, user_id: str, last_id: int
@@ -208,11 +213,7 @@ class PushRulesWorkerStore(
                 "have_push_rules_changed", have_push_rules_changed_txn
             )
 
-    @cachedList(
-        cached_method_name="get_push_rules_for_user",
-        list_name="user_ids",
-        num_args=1,
-    )
+    @cachedList(cached_method_name="get_push_rules_for_user", list_name="user_ids")
     async def bulk_get_push_rules(
         self, user_ids: Collection[str]
     ) -> Dict[str, List[JsonDict]]:
@@ -244,9 +245,7 @@ class PushRulesWorkerStore(
         return results
 
     @cachedList(
-        cached_method_name="get_push_rules_enabled_for_user",
-        list_name="user_ids",
-        num_args=1,
+        cached_method_name="get_push_rules_enabled_for_user", list_name="user_ids"
     )
     async def bulk_get_push_rules_enabled(
         self, user_ids: Collection[str]
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index d035969a31..cfa4d4924d 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -673,8 +673,11 @@ class ReceiptsWorkerStore(SQLBaseStore):
             lock=False,
         )
 
+        # When updating a local users read receipt, remove any push actions
+        # which resulted from the receipt's event and all earlier events.
         if (
-            receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
+            self.hs.is_mine_id(user_id)
+            and receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
             and stream_ordering is not None
         ):
             self._remove_old_push_actions_before_txn(  # type: ignore[attr-defined]
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index fe8fded88b..b457bc189e 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -767,6 +767,59 @@ class RelationsWorkerStore(SQLBaseStore):
             "get_if_user_has_annotated_event", _get_if_user_has_annotated_event
         )
 
+    @cached(iterable=True)
+    async def get_mutual_event_relations_for_rel_type(
+        self, event_id: str, relation_type: str
+    ) -> Set[Tuple[str, str]]:
+        raise NotImplementedError()
+
+    @cachedList(
+        cached_method_name="get_mutual_event_relations_for_rel_type",
+        list_name="relation_types",
+    )
+    async def get_mutual_event_relations(
+        self, event_id: str, relation_types: Collection[str]
+    ) -> Dict[str, Set[Tuple[str, str]]]:
+        """
+        Fetch event metadata for events which related to the same event as the given event.
+
+        If the given event has no relation information, returns an empty dictionary.
+
+        Args:
+            event_id: The event ID which is targeted by relations.
+            relation_types: The relation types to check for mutual relations.
+
+        Returns:
+            A dictionary of relation type to:
+                A set of tuples of:
+                    The sender
+                    The event type
+        """
+        rel_type_sql, rel_type_args = make_in_list_sql_clause(
+            self.database_engine, "relation_type", relation_types
+        )
+
+        sql = f"""
+            SELECT DISTINCT relation_type, sender, type FROM event_relations
+            INNER JOIN events USING (event_id)
+            WHERE relates_to_id = ? AND {rel_type_sql}
+        """
+
+        def _get_event_relations(
+            txn: LoggingTransaction,
+        ) -> Dict[str, Set[Tuple[str, str]]]:
+            txn.execute(sql, [event_id] + rel_type_args)
+            result: Dict[str, Set[Tuple[str, str]]] = {
+                rel_type: set() for rel_type in relation_types
+            }
+            for rel_type, sender, type in txn.fetchall():
+                result[rel_type].add((sender, type))
+            return result
+
+        return await self.db_pool.runInteraction(
+            "get_event_relations", _get_event_relations
+        )
+
 
 class RelationsStore(RelationsWorkerStore):
     pass
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 87e9482c60..10f2ceb50b 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -45,7 +45,7 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 from synapse.storage.types import Cursor
 from synapse.storage.util.id_generators import IdGenerator
-from synapse.types import JsonDict, ThirdPartyInstanceID
+from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID
 from synapse.util import json_encoder
 from synapse.util.caches.descriptors import cached
 from synapse.util.stringutils import MXC_REGEX
@@ -233,24 +233,23 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
                     UNION SELECT room_id from appservice_room_list
             """
 
-            sql = """
+            sql = f"""
                 SELECT
                     COUNT(*)
                 FROM (
-                    %(published_sql)s
+                    {published_sql}
                 ) published
                 INNER JOIN room_stats_state USING (room_id)
                 INNER JOIN room_stats_current USING (room_id)
                 WHERE
                     (
-                        join_rules = 'public' OR join_rules = '%(knock_join_rule)s'
+                        join_rules = '{JoinRules.PUBLIC}'
+                        OR join_rules = '{JoinRules.KNOCK}'
+                        OR join_rules = '{JoinRules.KNOCK_RESTRICTED}'
                         OR history_visibility = 'world_readable'
                     )
                     AND joined_members > 0
-            """ % {
-                "published_sql": published_sql,
-                "knock_join_rule": JoinRules.KNOCK,
-            }
+            """
 
             txn.execute(sql, query_args)
             return cast(Tuple[int], txn.fetchone())[0]
@@ -369,29 +368,29 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         if where_clauses:
             where_clause = " AND " + " AND ".join(where_clauses)
 
-        sql = """
+        dir = "DESC" if forwards else "ASC"
+        sql = f"""
             SELECT
                 room_id, name, topic, canonical_alias, joined_members,
                 avatar, history_visibility, guest_access, join_rules
             FROM (
-                %(published_sql)s
+                {published_sql}
             ) published
             INNER JOIN room_stats_state USING (room_id)
             INNER JOIN room_stats_current USING (room_id)
             WHERE
                 (
-                    join_rules = 'public' OR join_rules = '%(knock_join_rule)s'
+                    join_rules = '{JoinRules.PUBLIC}'
+                    OR join_rules = '{JoinRules.KNOCK}'
+                    OR join_rules = '{JoinRules.KNOCK_RESTRICTED}'
                     OR history_visibility = 'world_readable'
                 )
                 AND joined_members > 0
-                %(where_clause)s
-            ORDER BY joined_members %(dir)s, room_id %(dir)s
-        """ % {
-            "published_sql": published_sql,
-            "where_clause": where_clause,
-            "dir": "DESC" if forwards else "ASC",
-            "knock_join_rule": JoinRules.KNOCK,
-        }
+                {where_clause}
+            ORDER BY
+                joined_members {dir},
+                room_id {dir}
+        """
 
         if limit is not None:
             query_args.append(limit)
@@ -699,7 +698,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn)
 
     @cached()
-    async def get_retention_policy_for_room(self, room_id: str) -> Dict[str, int]:
+    async def get_retention_policy_for_room(self, room_id: str) -> RetentionPolicy:
         """Get the retention policy for a given room.
 
         If no retention policy has been found for this room, returns a policy defined
@@ -707,12 +706,20 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         the 'max_lifetime' if no default policy has been defined in the server's
         configuration).
 
+        If support for retention policies is disabled, a policy with a 'min_lifetime' and
+        'max_lifetime' of None is returned.
+
         Args:
             room_id: The ID of the room to get the retention policy of.
 
         Returns:
             A dict containing "min_lifetime" and "max_lifetime" for this room.
         """
+        # If the room retention feature is disabled, return a policy with no minimum nor
+        # maximum. This prevents incorrectly filtering out events when sending to
+        # the client.
+        if not self.config.retention.retention_enabled:
+            return RetentionPolicy()
 
         def get_retention_policy_for_room_txn(
             txn: LoggingTransaction,
@@ -736,10 +743,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         # If we don't know this room ID, ret will be None, in this case return the default
         # policy.
         if not ret:
-            return {
-                "min_lifetime": self.config.retention.retention_default_min_lifetime,
-                "max_lifetime": self.config.retention.retention_default_max_lifetime,
-            }
+            return RetentionPolicy(
+                min_lifetime=self.config.retention.retention_default_min_lifetime,
+                max_lifetime=self.config.retention.retention_default_max_lifetime,
+            )
 
         min_lifetime = ret[0]["min_lifetime"]
         max_lifetime = ret[0]["max_lifetime"]
@@ -754,10 +761,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         if max_lifetime is None:
             max_lifetime = self.config.retention.retention_default_max_lifetime
 
-        return {
-            "min_lifetime": min_lifetime,
-            "max_lifetime": max_lifetime,
-        }
+        return RetentionPolicy(
+            min_lifetime=min_lifetime,
+            max_lifetime=max_lifetime,
+        )
 
     async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]:
         """Retrieves all the local and remote media MXC URIs in a given room
@@ -994,7 +1001,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
 
     async def get_rooms_for_retention_period_in_range(
         self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
-    ) -> Dict[str, Dict[str, Optional[int]]]:
+    ) -> Dict[str, RetentionPolicy]:
         """Retrieves all of the rooms within the given retention range.
 
         Optionally includes the rooms which don't have a retention policy.
@@ -1016,7 +1023,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
 
         def get_rooms_for_retention_period_in_range_txn(
             txn: LoggingTransaction,
-        ) -> Dict[str, Dict[str, Optional[int]]]:
+        ) -> Dict[str, RetentionPolicy]:
             range_conditions = []
             args = []
 
@@ -1047,10 +1054,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
             rooms_dict = {}
 
             for row in rows:
-                rooms_dict[row["room_id"]] = {
-                    "min_lifetime": row["min_lifetime"],
-                    "max_lifetime": row["max_lifetime"],
-                }
+                rooms_dict[row["room_id"]] = RetentionPolicy(
+                    min_lifetime=row["min_lifetime"],
+                    max_lifetime=row["max_lifetime"],
+                )
 
             if include_null:
                 # If required, do a second query that retrieves all of the rooms we know
@@ -1065,10 +1072,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
                 # policy in its state), add it with a null policy.
                 for row in rows:
                     if row["room_id"] not in rooms_dict:
-                        rooms_dict[row["room_id"]] = {
-                            "min_lifetime": None,
-                            "max_lifetime": None,
-                        }
+                        rooms_dict[row["room_id"]] = RetentionPolicy()
 
             return rooms_dict
 
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index 18ae8aee29..ea5cbdac08 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -16,6 +16,8 @@ import collections.abc
 import logging
 from typing import TYPE_CHECKING, Collection, Dict, Iterable, Optional, Set, Tuple
 
+import attr
+
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
@@ -26,6 +28,7 @@ from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
     LoggingTransaction,
+    make_in_list_sql_clause,
 )
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
@@ -33,6 +36,7 @@ from synapse.storage.state import StateFilter
 from synapse.types import JsonDict, JsonMapping, StateMap
 from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -43,6 +47,15 @@ logger = logging.getLogger(__name__)
 MAX_STATE_DELTA_HOPS = 100
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class EventMetadata:
+    """Returned by `get_metadata_for_events`"""
+
+    room_id: str
+    event_type: str
+    state_key: Optional[str]
+
+
 def _retrieve_and_check_room_version(room_id: str, room_version_id: str) -> RoomVersion:
     v = KNOWN_ROOM_VERSIONS.get(room_version_id)
     if not v:
@@ -133,6 +146,52 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return room_version
 
+    async def get_metadata_for_events(
+        self, event_ids: Collection[str]
+    ) -> Dict[str, EventMetadata]:
+        """Get some metadata (room_id, type, state_key) for the given events.
+
+        This method is a faster alternative than fetching the full events from
+        the DB, and should be used when the full event is not needed.
+
+        Returns metadata for rejected and redacted events. Events that have not
+        been persisted are omitted from the returned dict.
+        """
+
+        def get_metadata_for_events_txn(
+            txn: LoggingTransaction,
+            batch_ids: Collection[str],
+        ) -> Dict[str, EventMetadata]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "e.event_id", batch_ids
+            )
+
+            sql = f"""
+                SELECT e.event_id, e.room_id, e.type, e.state_key FROM events AS e
+                LEFT JOIN state_events USING (event_id)
+                WHERE {clause}
+            """
+
+            txn.execute(sql, args)
+            return {
+                event_id: EventMetadata(
+                    room_id=room_id, event_type=event_type, state_key=state_key
+                )
+                for event_id, room_id, event_type, state_key in txn
+            }
+
+        result_map: Dict[str, EventMetadata] = {}
+        for batch_ids in batch_iter(event_ids, 1000):
+            result_map.update(
+                await self.db_pool.runInteraction(
+                    "get_metadata_for_events",
+                    get_metadata_for_events_txn,
+                    batch_ids=batch_ids,
+                )
+            )
+
+        return result_map
+
     async def get_room_predecessor(self, room_id: str) -> Optional[JsonMapping]:
         """Get the predecessor of an upgraded room if it exists.
         Otherwise return None.
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 0fc282866b..a21dea91c8 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -313,7 +313,7 @@ class EventsPersistenceStorage:
             List of events persisted, the current position room stream position.
             The list of events persisted may not be the same as those passed in
             if they were deduplicated due to an event already existing that
-            matched the transcation ID; the existing event is returned in such
+            matched the transaction ID; the existing event is returned in such
             a case.
         """
         partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
diff --git a/synapse/types.py b/synapse/types.py
index bd8071d51d..091cc611ab 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -320,29 +320,6 @@ class EventID(DomainSpecificString):
     SIGIL = "$"
 
 
-@attr.s(slots=True, frozen=True, repr=False)
-class GroupID(DomainSpecificString):
-    """Structure representing a group ID."""
-
-    SIGIL = "+"
-
-    @classmethod
-    def from_string(cls: Type[DS], s: str) -> DS:
-        group_id: DS = super().from_string(s)  # type: ignore
-
-        if not group_id.localpart:
-            raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
-
-        if contains_invalid_mxid_characters(group_id.localpart):
-            raise SynapseError(
-                400,
-                "Group ID can only contain characters a-z, 0-9, or '=_-./'",
-                Codes.INVALID_PARAM,
-            )
-
-        return group_id
-
-
 mxid_localpart_allowed_characters = set(
     "_-./=" + string.ascii_lowercase + string.digits
 )
@@ -932,3 +909,9 @@ class UserProfile(TypedDict):
     user_id: str
     display_name: Optional[str]
     avatar_url: Optional[str]
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class RetentionPolicy:
+    min_lifetime: Optional[int] = None
+    max_lifetime: Optional[int] = None
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index eda92d864d..867f315b2a 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -595,13 +595,14 @@ def cached(
 def cachedList(
     *, cached_method_name: str, list_name: str, num_args: Optional[int] = None
 ) -> Callable[[F], _CachedFunction[F]]:
-    """Creates a descriptor that wraps a function in a `CacheListDescriptor`.
+    """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`.
 
-    Used to do batch lookups for an already created cache. A single argument
+    Used to do batch lookups for an already created cache. One of the arguments
     is specified as a list that is iterated through to lookup keys in the
     original cache. A new tuple consisting of the (deduplicated) keys that weren't in
-    the cache gets passed to the original function, the result of which is stored in the
-    cache.
+    the cache gets passed to the original function, which is expected to results
+    in a map of key to value for each passed value. THe new results are stored in the
+    original cache. Note that any missing values are cached as None.
 
     Args:
         cached_method_name: The name of the single-item lookup method.
@@ -614,11 +615,11 @@ def cachedList(
     Example:
 
         class Example:
-            @cached(num_args=2)
-            def do_something(self, first_arg):
+            @cached()
+            def do_something(self, first_arg, second_arg):
                 ...
 
-            @cachedList(do_something.cache, list_name="second_args", num_args=2)
+            @cachedList(cached_method_name="do_something", list_name="second_args")
             def batch_do_something(self, first_arg, second_args):
                 ...
     """
diff --git a/synapse/visibility.py b/synapse/visibility.py
index de6d2ffc52..da4af02796 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -22,7 +22,7 @@ from synapse.events import EventBase
 from synapse.events.utils import prune_event
 from synapse.storage import Storage
 from synapse.storage.state import StateFilter
-from synapse.types import StateMap, get_domain_from_id
+from synapse.types import RetentionPolicy, StateMap, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -94,7 +94,7 @@ async def filter_events_for_client(
 
     if filter_send_to_client:
         room_ids = {e.room_id for e in events}
-        retention_policies = {}
+        retention_policies: Dict[str, RetentionPolicy] = {}
 
         for room_id in room_ids:
             retention_policies[
@@ -137,7 +137,7 @@ async def filter_events_for_client(
             # events.
             if not event.is_state():
                 retention_policy = retention_policies[event.room_id]
-                max_lifetime = retention_policy.get("max_lifetime")
+                max_lifetime = retention_policy.max_lifetime
 
                 if max_lifetime is not None:
                     oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py
new file mode 100644
index 0000000000..3e0db4dd98
--- /dev/null
+++ b/tests/appservice/test_api.py
@@ -0,0 +1,102 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, List, Mapping
+from unittest.mock import Mock
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.appservice import ApplicationService
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests import unittest
+
+PROTOCOL = "myproto"
+TOKEN = "myastoken"
+URL = "http://mytestservice"
+
+
+class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+        self.api = hs.get_application_service_api()
+        self.service = ApplicationService(
+            id="unique_identifier",
+            sender="@as:test",
+            url=URL,
+            token="unused",
+            hs_token=TOKEN,
+            hostname="myserver",
+        )
+
+    def test_query_3pe_authenticates_token(self):
+        """
+        Tests that 3pe queries to the appservice are authenticated
+        with the appservice's token.
+        """
+
+        SUCCESS_RESULT_USER = [
+            {
+                "protocol": PROTOCOL,
+                "userid": "@a:user",
+                "fields": {
+                    "more": "fields",
+                },
+            }
+        ]
+        SUCCESS_RESULT_LOCATION = [
+            {
+                "protocol": PROTOCOL,
+                "alias": "#a:room",
+                "fields": {
+                    "more": "fields",
+                },
+            }
+        ]
+
+        URL_USER = f"{URL}/_matrix/app/unstable/thirdparty/user/{PROTOCOL}"
+        URL_LOCATION = f"{URL}/_matrix/app/unstable/thirdparty/location/{PROTOCOL}"
+
+        self.request_url = None
+
+        async def get_json(url: str, args: Mapping[Any, Any]) -> List[JsonDict]:
+            if not args.get(b"access_token"):
+                raise RuntimeError("Access token not provided")
+
+            self.assertEqual(args.get(b"access_token"), TOKEN)
+            self.request_url = url
+            if url == URL_USER:
+                return SUCCESS_RESULT_USER
+            elif url == URL_LOCATION:
+                return SUCCESS_RESULT_LOCATION
+            else:
+                raise RuntimeError(
+                    "URL provided was invalid. This should never be seen."
+                )
+
+        # We assign to a method, which mypy doesn't like.
+        self.api.get_json = Mock(side_effect=get_json)  # type: ignore[assignment]
+
+        result = self.get_success(
+            self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]})
+        )
+        self.assertEqual(self.request_url, URL_USER)
+        self.assertEqual(result, SUCCESS_RESULT_USER)
+        result = self.get_success(
+            self.api.query_3pe(
+                self.service, "location", PROTOCOL, {b"some": [b"field"]}
+            )
+        )
+        self.assertEqual(self.request_url, URL_LOCATION)
+        self.assertEqual(result, SUCCESS_RESULT_LOCATION)
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index edc584d0cf..7135362f76 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -23,7 +23,7 @@ from tests.test_utils import simple_async_mock
 
 
 def _regex(regex: str, exclusive: bool = True) -> Namespace:
-    return Namespace(exclusive, None, re.compile(regex))
+    return Namespace(exclusive, re.compile(regex))
 
 
 class ApplicationServiceTestCase(unittest.TestCase):
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index e95dfdce20..ec00900621 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -50,7 +50,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
         hs = self.setup_test_homeserver(federation_http_client=None)
         self.handler = hs.get_federation_handler()
         self.store = hs.get_datastores().main
-        self.state_store = hs.get_storage().state
+        self.state_storage = hs.get_storage().state
         self._event_auth_handler = hs.get_event_auth_handler()
         return hs
 
@@ -276,7 +276,11 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
             # federation handler wanting to backfill the fake event.
             self.get_success(
                 federation_event_handler._process_received_pdu(
-                    self.OTHER_SERVER_NAME, event, state=current_state
+                    self.OTHER_SERVER_NAME,
+                    event,
+                    state_ids={
+                        (e.type, e.state_key): e.event_id for e in current_state
+                    },
                 )
             )
 
@@ -334,7 +338,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
         # mapping from (type, state_key) -> state_event_id
         assert most_recent_prev_event_id is not None
         prev_state_map = self.get_success(
-            self.state_store.get_state_ids_for_event(most_recent_prev_event_id)
+            self.state_storage.get_state_ids_for_event(most_recent_prev_event_id)
         )
         # List of state event ID's
         prev_state_ids = list(prev_state_map.values())
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index e74eb71774..0546655690 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -179,7 +179,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
             result_children_ids.append(
                 [
                     (cs["room_id"], cs["state_key"])
-                    for cs in result_room.get("children_state")
+                    for cs in result_room["children_state"]
                 ]
             )
 
@@ -772,7 +772,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": public_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.PUBLIC,
+                    "join_rule": JoinRules.PUBLIC,
                 },
             ),
             (
@@ -780,7 +780,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": knock_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.KNOCK,
+                    "join_rule": JoinRules.KNOCK,
                 },
             ),
             (
@@ -788,7 +788,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": not_invited_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.INVITE,
+                    "join_rule": JoinRules.INVITE,
                 },
             ),
             (
@@ -796,7 +796,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": invited_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.INVITE,
+                    "join_rule": JoinRules.INVITE,
                 },
             ),
             (
@@ -804,7 +804,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": restricted_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.RESTRICTED,
+                    "join_rule": JoinRules.RESTRICTED,
                     "allowed_room_ids": [],
                 },
             ),
@@ -813,7 +813,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": restricted_accessible_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.RESTRICTED,
+                    "join_rule": JoinRules.RESTRICTED,
                     "allowed_room_ids": [self.room],
                 },
             ),
@@ -822,7 +822,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": world_readable_room,
                     "world_readable": True,
-                    "join_rules": JoinRules.INVITE,
+                    "join_rule": JoinRules.INVITE,
                 },
             ),
             (
@@ -830,7 +830,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
                 {
                     "room_id": joined_room,
                     "world_readable": False,
-                    "join_rules": JoinRules.INVITE,
+                    "join_rule": JoinRules.INVITE,
                 },
             ),
         )
@@ -911,7 +911,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
             {
                 "room_id": fed_room,
                 "world_readable": False,
-                "join_rules": JoinRules.INVITE,
+                "join_rule": JoinRules.INVITE,
             },
         )
 
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index 638babae69..006dbab093 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -26,7 +26,7 @@ from twisted.web.http import HTTPChannel
 
 from synapse.api.errors import RequestSendFailed
 from synapse.http.matrixfederationclient import (
-    MAX_RESPONSE_SIZE,
+    JsonParser,
     MatrixFederationHttpClient,
     MatrixFederationRequest,
 )
@@ -609,9 +609,9 @@ class FederationClientTests(HomeserverTestCase):
         while not test_d.called:
             protocol.dataReceived(b"a" * chunk_size)
             sent += chunk_size
-            self.assertLessEqual(sent, MAX_RESPONSE_SIZE)
+            self.assertLessEqual(sent, JsonParser.MAX_RESPONSE_SIZE)
 
-        self.assertEqual(sent, MAX_RESPONSE_SIZE)
+        self.assertEqual(sent, JsonParser.MAX_RESPONSE_SIZE)
 
         f = self.failureResultOf(test_d)
         self.assertIsInstance(f.value, RequestSendFailed)
diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py
index ad521525cf..b3655d7b44 100644
--- a/tests/http/test_servlet.py
+++ b/tests/http/test_servlet.py
@@ -49,19 +49,21 @@ class TestServletUtils(unittest.TestCase):
         """Basic tests for parse_json_value_from_request."""
         # Test round-tripping.
         obj = {"foo": 1}
-        result = parse_json_value_from_request(make_request(obj))
-        self.assertEqual(result, obj)
+        result1 = parse_json_value_from_request(make_request(obj))
+        self.assertEqual(result1, obj)
 
         # Results don't have to be objects.
-        result = parse_json_value_from_request(make_request(b'["foo"]'))
-        self.assertEqual(result, ["foo"])
+        result2 = parse_json_value_from_request(make_request(b'["foo"]'))
+        self.assertEqual(result2, ["foo"])
 
         # Test empty.
         with self.assertRaises(SynapseError):
             parse_json_value_from_request(make_request(b""))
 
-        result = parse_json_value_from_request(make_request(b""), allow_empty_body=True)
-        self.assertIsNone(result)
+        result3 = parse_json_value_from_request(
+            make_request(b""), allow_empty_body=True
+        )
+        self.assertIsNone(result3)
 
         # Invalid UTF-8.
         with self.assertRaises(SynapseError):
diff --git a/tests/http/test_site.py b/tests/http/test_site.py
index 8c13b4f693..b2dbf76d33 100644
--- a/tests/http/test_site.py
+++ b/tests/http/test_site.py
@@ -36,7 +36,7 @@ class SynapseRequestTestCase(HomeserverTestCase):
         # as a control case, first send a regular request.
 
         # complete the connection and wire it up to a fake transport
-        client_address = IPv6Address("TCP", "::1", "2345")
+        client_address = IPv6Address("TCP", "::1", 2345)
         protocol = factory.buildProtocol(client_address)
         transport = StringTransport()
         protocol.makeConnection(transport)
diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index 5dba187076..9b623d0033 100644
--- a/tests/push/test_push_rule_evaluator.py
+++ b/tests/push/test_push_rule_evaluator.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, Optional, Union
+from typing import Dict, Optional, Set, Tuple, Union
 
 import frozendict
 
@@ -26,7 +26,12 @@ from tests import unittest
 
 
 class PushRuleEvaluatorTestCase(unittest.TestCase):
-    def _get_evaluator(self, content: JsonDict) -> PushRuleEvaluatorForEvent:
+    def _get_evaluator(
+        self,
+        content: JsonDict,
+        relations: Optional[Dict[str, Set[Tuple[str, str]]]] = None,
+        relations_match_enabled: bool = False,
+    ) -> PushRuleEvaluatorForEvent:
         event = FrozenEvent(
             {
                 "event_id": "$event_id",
@@ -42,7 +47,12 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
         sender_power_level = 0
         power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
         return PushRuleEvaluatorForEvent(
-            event, room_member_count, sender_power_level, power_levels
+            event,
+            room_member_count,
+            sender_power_level,
+            power_levels,
+            relations or set(),
+            relations_match_enabled,
         )
 
     def test_display_name(self) -> None:
@@ -276,3 +286,71 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             push_rule_evaluator.tweaks_for_actions(actions),
             {"sound": "default", "highlight": True},
         )
+
+    def test_relation_match(self) -> None:
+        """Test the relation_match push rule kind."""
+
+        # Check if the experimental feature is disabled.
+        evaluator = self._get_evaluator(
+            {}, {"m.annotation": {("@user:test", "m.reaction")}}
+        )
+        condition = {"kind": "relation_match"}
+        # Oddly, an unknown condition always matches.
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+
+        # A push rule evaluator with the experimental rule enabled.
+        evaluator = self._get_evaluator(
+            {}, {"m.annotation": {("@user:test", "m.reaction")}}, True
+        )
+
+        # Check just relation type.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+        }
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+
+        # Check relation type and sender.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+            "sender": "@user:test",
+        }
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+            "sender": "@other:test",
+        }
+        self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
+
+        # Check relation type and event type.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+            "type": "m.reaction",
+        }
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+
+        # Check just sender, this fails since rel_type is required.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "sender": "@user:test",
+        }
+        self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
+
+        # Check sender glob.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+            "sender": "@*:test",
+        }
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+
+        # Check event type glob.
+        condition = {
+            "kind": "org.matrix.msc3772.relation_match",
+            "rel_type": "m.annotation",
+            "event_type": "*.reaction",
+        }
+        self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 40571b753a..82ac5991e6 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -14,7 +14,6 @@
 
 import urllib.parse
 from http import HTTPStatus
-from typing import List
 
 from parameterized import parameterized
 
@@ -23,7 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor
 import synapse.rest.admin
 from synapse.http.server import JsonResource
 from synapse.rest.admin import VersionServlet
-from synapse.rest.client import groups, login, room
+from synapse.rest.client import login, room
 from synapse.server import HomeServer
 from synapse.util import Clock
 
@@ -49,93 +48,6 @@ class VersionTestCase(unittest.HomeserverTestCase):
         )
 
 
-class DeleteGroupTestCase(unittest.HomeserverTestCase):
-    servlets = [
-        synapse.rest.admin.register_servlets_for_client_rest_resource,
-        login.register_servlets,
-        groups.register_servlets,
-    ]
-
-    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-        self.admin_user = self.register_user("admin", "pass", admin=True)
-        self.admin_user_tok = self.login("admin", "pass")
-
-        self.other_user = self.register_user("user", "pass")
-        self.other_user_token = self.login("user", "pass")
-
-    @unittest.override_config({"experimental_features": {"groups_enabled": True}})
-    def test_delete_group(self) -> None:
-        # Create a new group
-        channel = self.make_request(
-            "POST",
-            b"/create_group",
-            access_token=self.admin_user_tok,
-            content={"localpart": "test"},
-        )
-
-        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
-
-        group_id = channel.json_body["group_id"]
-
-        self._check_group(group_id, expect_code=HTTPStatus.OK)
-
-        # Invite/join another user
-
-        url = "/groups/%s/admin/users/invite/%s" % (group_id, self.other_user)
-        channel = self.make_request(
-            "PUT", url.encode("ascii"), access_token=self.admin_user_tok, content={}
-        )
-        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
-
-        url = "/groups/%s/self/accept_invite" % (group_id,)
-        channel = self.make_request(
-            "PUT", url.encode("ascii"), access_token=self.other_user_token, content={}
-        )
-        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
-
-        # Check other user knows they're in the group
-        self.assertIn(group_id, self._get_groups_user_is_in(self.admin_user_tok))
-        self.assertIn(group_id, self._get_groups_user_is_in(self.other_user_token))
-
-        # Now delete the group
-        url = "/_synapse/admin/v1/delete_group/" + group_id
-        channel = self.make_request(
-            "POST",
-            url.encode("ascii"),
-            access_token=self.admin_user_tok,
-            content={"localpart": "test"},
-        )
-
-        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
-
-        # Check group returns HTTPStatus.NOT_FOUND
-        self._check_group(group_id, expect_code=HTTPStatus.NOT_FOUND)
-
-        # Check users don't think they're in the group
-        self.assertNotIn(group_id, self._get_groups_user_is_in(self.admin_user_tok))
-        self.assertNotIn(group_id, self._get_groups_user_is_in(self.other_user_token))
-
-    def _check_group(self, group_id: str, expect_code: int) -> None:
-        """Assert that trying to fetch the given group results in the given
-        HTTP status code
-        """
-
-        url = "/groups/%s/profile" % (group_id,)
-        channel = self.make_request(
-            "GET", url.encode("ascii"), access_token=self.admin_user_tok
-        )
-
-        self.assertEqual(expect_code, channel.code, msg=channel.json_body)
-
-    def _get_groups_user_is_in(self, access_token: str) -> List[str]:
-        """Returns the list of groups the user is in (given their access token)"""
-        channel = self.make_request("GET", b"/joined_groups", access_token=access_token)
-
-        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
-
-        return channel.json_body["groups"]
-
-
 class QuarantineMediaTestCase(unittest.HomeserverTestCase):
     """Test /quarantine_media admin API."""
 
diff --git a/tests/rest/client/test_groups.py b/tests/rest/client/test_groups.py
deleted file mode 100644
index e067cf825c..0000000000
--- a/tests/rest/client/test_groups.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2021 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.rest.client import groups, room
-
-from tests import unittest
-from tests.unittest import override_config
-
-
-class GroupsTestCase(unittest.HomeserverTestCase):
-    user_id = "@alice:test"
-    room_creator_user_id = "@bob:test"
-
-    servlets = [room.register_servlets, groups.register_servlets]
-
-    @override_config({"enable_group_creation": True})
-    def test_rooms_limited_by_visibility(self) -> None:
-        group_id = "+spqr:test"
-
-        # Alice creates a group
-        channel = self.make_request("POST", "/create_group", {"localpart": "spqr"})
-        self.assertEqual(channel.code, 200, msg=channel.text_body)
-        self.assertEqual(channel.json_body, {"group_id": group_id})
-
-        # Bob creates a private room
-        room_id = self.helper.create_room_as(self.room_creator_user_id, is_public=False)
-        self.helper.auth_user_id = self.room_creator_user_id
-        self.helper.send_state(
-            room_id, "m.room.name", {"name": "bob's secret room"}, tok=None
-        )
-        self.helper.auth_user_id = self.user_id
-
-        # Alice adds the room to her group.
-        channel = self.make_request(
-            "PUT", f"/groups/{group_id}/admin/rooms/{room_id}", {}
-        )
-        self.assertEqual(channel.code, 200, msg=channel.text_body)
-        self.assertEqual(channel.json_body, {})
-
-        # Alice now tries to retrieve the room list of the space.
-        channel = self.make_request("GET", f"/groups/{group_id}/rooms")
-        self.assertEqual(channel.code, 200, msg=channel.text_body)
-        self.assertEqual(
-            channel.json_body, {"chunk": [], "total_room_count_estimate": 0}
-        )
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index 27dee8f697..bc9cc51b92 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -995,7 +995,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
                 bundled_aggregations,
             )
 
-        self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
+        self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 6)
 
     def test_annotation_to_annotation(self) -> None:
         """Any relation to an annotation should be ignored."""
@@ -1031,7 +1031,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
                 bundled_aggregations,
             )
 
-        self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
+        self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6)
 
     def test_thread(self) -> None:
         """
@@ -1060,7 +1060,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
                 bundled_aggregations.get("latest_event"),
             )
 
-        self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
+        self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
 
     def test_thread_with_bundled_aggregations_for_latest(self) -> None:
         """
@@ -1106,7 +1106,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
                 bundled_aggregations["latest_event"].get("unsigned"),
             )
 
-        self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
+        self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
 
     def test_nested_thread(self) -> None:
         """
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 7b8fe6d025..2cd7a9e6c5 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -11,6 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Any, Dict
 from unittest.mock import Mock
 
 from twisted.test.proto_helpers import MemoryReactor
@@ -252,16 +253,24 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-        config = self.default_config()
-        config["retention"] = {
+    def default_config(self) -> Dict[str, Any]:
+        config = super().default_config()
+
+        retention_config = {
             "enabled": True,
         }
 
+        # Update this config with what's in the default config so that
+        # override_config works as expected.
+        retention_config.update(config.get("retention", {}))
+        config["retention"] = retention_config
+
+        return config
+
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         mock_federation_client = Mock(spec=["backfill"])
 
         self.hs = self.setup_test_homeserver(
-            config=config,
             federation_client=mock_federation_client,
         )
         return self.hs
@@ -295,6 +304,24 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
 
         self._test_retention(room_id, expected_code_for_first_event=404)
 
+    @unittest.override_config({"retention": {"enabled": False}})
+    def test_visibility_when_disabled(self) -> None:
+        """Retention policies should be ignored when the retention feature is disabled."""
+        room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+        self.helper.send_state(
+            room_id=room_id,
+            event_type=EventTypes.Retention,
+            body={"max_lifetime": one_day_ms},
+            tok=self.token,
+        )
+
+        resp = self.helper.send(room_id=room_id, body="test", tok=self.token)
+
+        self.reactor.advance(one_day_ms * 2 / 1000)
+
+        self.get_event(room_id, resp["event_id"])
+
     def _test_retention(
         self, room_id: str, expected_code_for_first_event: int = 200
     ) -> None:
diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py
index 19a145eeb6..22f99c6ab1 100644
--- a/tests/scripts/test_new_matrix_user.py
+++ b/tests/scripts/test_new_matrix_user.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import List
 from unittest.mock import Mock, patch
 
 from synapse._scripts.register_new_matrix_user import request_registration
@@ -49,8 +50,8 @@ class RegisterTestCase(TestCase):
         requests.post = post
 
         # The fake stdout will be written here
-        out = []
-        err_code = []
+        out: List[str] = []
+        err_code: List[int] = []
 
         with patch("synapse._scripts.register_new_matrix_user.requests", requests):
             request_registration(
@@ -85,8 +86,8 @@ class RegisterTestCase(TestCase):
         requests.get = get
 
         # The fake stdout will be written here
-        out = []
-        err_code = []
+        out: List[str] = []
+        err_code: List[int] = []
 
         with patch("synapse._scripts.register_new_matrix_user.requests", requests):
             request_registration(
@@ -137,8 +138,8 @@ class RegisterTestCase(TestCase):
         requests.post = post
 
         # The fake stdout will be written here
-        out = []
-        err_code = []
+        out: List[str] = []
+        err_code: List[int] = []
 
         with patch("synapse._scripts.register_new_matrix_user.requests", requests):
             request_registration(
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index a8ffb52c05..cce8e75c74 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -60,7 +60,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine)
         db._db_pool = self.db_pool
 
-        self.datastore = SQLBaseStore(db, None, hs)
+        self.datastore = SQLBaseStore(db, None, hs)  # type: ignore[arg-type]
 
     @defer.inlineCallbacks
     def test_insert_1col(self):
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index ef5e25873c..aaa3189b16 100644
--- a/tests/storage/test_events.py
+++ b/tests/storage/test_events.py
@@ -69,7 +69,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
     def persist_event(self, event, state=None):
         """Persist the event, with optional state"""
         context = self.get_success(
-            self.state.compute_event_context(event, old_state=state)
+            self.state.compute_event_context(event, state_ids_before_event=state)
         )
         self.get_success(self.persistence.persist_event(event, context))
 
@@ -103,9 +103,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([remote_event_2.event_id])
@@ -135,13 +137,14 @@ class ExtremPruneTestCase(HomeserverTestCase):
         # setting. The state resolution across the old and new event will then
         # include it, and so the resolved state won't match the new state.
         state_before_gap = dict(
-            self.get_success(self.state.get_current_state(self.room_id))
+            self.get_success(self.state.get_current_state_ids(self.room_id))
         )
         state_before_gap.pop(("m.room.history_visibility", ""))
 
         context = self.get_success(
             self.state.compute_event_context(
-                remote_event_2, old_state=state_before_gap.values()
+                remote_event_2,
+                state_ids_before_event=state_before_gap,
             )
         )
 
@@ -177,9 +180,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([remote_event_2.event_id])
@@ -207,9 +212,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([self.remote_event_1.event_id, remote_event_2.event_id])
@@ -247,9 +254,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([remote_event_2.event_id])
@@ -289,9 +298,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([remote_event_2.event_id, local_message_event_id])
@@ -323,9 +334,11 @@ class ExtremPruneTestCase(HomeserverTestCase):
             RoomVersions.V6,
         )
 
-        state_before_gap = self.get_success(self.state.get_current_state(self.room_id))
+        state_before_gap = self.get_success(
+            self.state.get_current_state_ids(self.room_id)
+        )
 
-        self.persist_event(remote_event_2, state=state_before_gap.values())
+        self.persist_event(remote_event_2, state=state_before_gap)
 
         # Check the new extremity is just the new remote event.
         self.assert_extremities([local_message_event_id, remote_event_2.event_id])
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index a2a9c05f24..1218786d79 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -34,7 +34,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> None:
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> None:  # type: ignore[override]
 
         # We can't test the RoomMemberStore on its own without the other event
         # storage logic
diff --git a/tests/test_state.py b/tests/test_state.py
index c6baea3d76..84694d368d 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -442,7 +442,12 @@ class StateTestCase(unittest.TestCase):
         ]
 
         context = yield defer.ensureDeferred(
-            self.state.compute_event_context(event, old_state=old_state)
+            self.state.compute_event_context(
+                event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id for e in old_state
+                },
+            )
         )
 
         prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
@@ -467,7 +472,12 @@ class StateTestCase(unittest.TestCase):
         ]
 
         context = yield defer.ensureDeferred(
-            self.state.compute_event_context(event, old_state=old_state)
+            self.state.compute_event_context(
+                event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id for e in old_state
+                },
+            )
         )
 
         prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
diff --git a/tests/test_types.py b/tests/test_types.py
index 80888a744d..0b10dae848 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from synapse.api.errors import SynapseError
-from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart
+from synapse.types import RoomAlias, UserID, map_username_to_mxid_localpart
 
 from tests import unittest
 
@@ -62,25 +62,6 @@ class RoomAliasTestCase(unittest.HomeserverTestCase):
         self.assertFalse(RoomAlias.is_valid(id_string))
 
 
-class GroupIDTestCase(unittest.TestCase):
-    def test_parse(self):
-        group_id = GroupID.from_string("+group/=_-.123:my.domain")
-        self.assertEqual("group/=_-.123", group_id.localpart)
-        self.assertEqual("my.domain", group_id.domain)
-
-    def test_validate(self):
-        bad_ids = ["$badsigil:domain", "+:empty"] + [
-            "+group" + c + ":domain" for c in "A%?æ£"
-        ]
-        for id_string in bad_ids:
-            try:
-                GroupID.from_string(id_string)
-                self.fail("Parsing '%s' should raise exception" % id_string)
-            except SynapseError as exc:
-                self.assertEqual(400, exc.code)
-                self.assertEqual("M_INVALID_PARAM", exc.errcode)
-
-
 class MapUsernameTestCase(unittest.TestCase):
     def testPassThrough(self):
         self.assertEqual(map_username_to_mxid_localpart("test1234"), "test1234")