summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2024-07-08 10:35:57 +0100
committerErik Johnston <erik@matrix.org>2024-07-08 10:35:57 +0100
commit97795d84371ee8d623f9ceae0934c5240e6fbedc (patch)
tree9a204673a984b43c65e59414821ad7ab42cd075a
parentMerge remote-tracking branch 'origin/release-v1.110' into matrix-org-hotfixes (diff)
parentBump certifi from 2023.7.22 to 2024.7.4 (#17404) (diff)
downloadsynapse-97795d84371ee8d623f9ceae0934c5240e6fbedc.tar.xz
Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
-rw-r--r--.github/workflows/tests.yml22
-rw-r--r--CHANGES.md23
-rw-r--r--Cargo.lock8
-rw-r--r--README.rst6
-rw-r--r--changelog.d/17318.misc1
-rw-r--r--changelog.d/17320.feature1
-rw-r--r--changelog.d/17337.feature1
-rw-r--r--changelog.d/17342.feature1
-rw-r--r--changelog.d/17356.doc1
-rw-r--r--changelog.d/17362.bugfix1
-rw-r--r--changelog.d/17363.misc1
-rw-r--r--changelog.d/17365.feature1
-rw-r--r--changelog.d/17367.misc1
-rw-r--r--changelog.d/17371.misc1
-rw-r--r--changelog.d/17379.doc1
-rw-r--r--changelog.d/17381.misc1
-rw-r--r--changelog.d/17386.bugfix1
-rw-r--r--changelog.d/17388.feature3
-rw-r--r--changelog.d/17389.misc1
-rw-r--r--changelog.d/17390.misc1
-rw-r--r--changelog.d/17391.bugfix1
-rw-r--r--changelog.d/17392.misc1
-rw-r--r--changelog.d/17393.misc1
-rw-r--r--changelog.d/17399.doc1
-rw-r--r--changelog.d/17400.feature1
-rw-r--r--debian/changelog12
-rw-r--r--docker/build_debian.sh3
-rwxr-xr-xdocker/configure_workers_and_start.py3
-rw-r--r--docs/admin_api/experimental_features.md18
-rw-r--r--docs/development/contributing_guide.md6
-rw-r--r--docs/upgrade.md13
-rw-r--r--docs/usage/configuration/config_documentation.md21
-rw-r--r--docs/workers.md1
-rw-r--r--mypy.ini3
-rw-r--r--poetry.lock133
-rw-r--r--pyproject.toml7
-rwxr-xr-xscripts-dev/lint.sh2
-rwxr-xr-xscripts-dev/release.py25
-rwxr-xr-xsynapse/_scripts/generate_workers_map.py2
-rw-r--r--synapse/_scripts/update_synapse_database.py2
-rw-r--r--synapse/api/auth/__init__.py18
-rw-r--r--synapse/api/auth/internal.py29
-rw-r--r--synapse/api/auth/msc3861_delegated.py28
-rw-r--r--synapse/api/ratelimiting.py3
-rw-r--r--synapse/app/admin_cmd.py2
-rw-r--r--synapse/app/generic_worker.py2
-rw-r--r--synapse/app/homeserver.py2
-rw-r--r--synapse/config/experimental.py4
-rw-r--r--synapse/events/utils.py18
-rw-r--r--synapse/federation/federation_client.py46
-rw-r--r--synapse/federation/sender/per_destination_queue.py2
-rw-r--r--synapse/federation/transport/client.py25
-rw-r--r--synapse/federation/transport/server/__init__.py13
-rw-r--r--synapse/federation/transport/server/_base.py8
-rw-r--r--synapse/federation/transport/server/federation.py61
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/sliding_sync.py1011
-rw-r--r--synapse/handlers/sync.py22
-rw-r--r--synapse/http/client.py152
-rw-r--r--synapse/http/matrixfederationclient.py192
-rw-r--r--synapse/media/_base.py28
-rw-r--r--synapse/media/media_repository.py160
-rw-r--r--synapse/media/media_storage.py27
-rw-r--r--synapse/media/thumbnailer.py82
-rw-r--r--synapse/rest/__init__.py4
-rw-r--r--synapse/rest/admin/experimental_features.py14
-rw-r--r--synapse/rest/client/media.py112
-rw-r--r--synapse/rest/client/pusher.py29
-rw-r--r--synapse/rest/client/sync.py127
-rw-r--r--synapse/rest/client/versions.py20
-rw-r--r--synapse/rest/media/download_resource.py1
-rw-r--r--synapse/rest/media/thumbnail_resource.py19
-rw-r--r--synapse/server.py14
-rw-r--r--synapse/storage/controllers/state.py29
-rw-r--r--synapse/storage/databases/main/deviceinbox.py5
-rw-r--r--synapse/storage/databases/main/events_worker.py12
-rw-r--r--synapse/storage/databases/main/experimental_features.py64
-rw-r--r--synapse/storage/databases/main/stream.py282
-rw-r--r--synapse/storage/schema/main/delta/42/current_state_delta.sql5
-rw-r--r--synapse/types/__init__.py3
-rw-r--r--synapse/types/handlers/__init__.py57
-rw-r--r--synapse/types/rest/client/__init__.py13
-rw-r--r--tests/federation/test_federation_media.py123
-rw-r--r--tests/handlers/test_deactivate_account.py22
-rw-r--r--tests/handlers/test_sliding_sync.py1522
-rw-r--r--tests/http/test_client.py143
-rw-r--r--tests/media/test_media_storage.py34
-rw-r--r--tests/push/test_http.py82
-rw-r--r--tests/replication/test_multi_media_repo.py234
-rw-r--r--tests/rest/admin/test_admin.py14
-rw-r--r--tests/rest/client/test_media.py957
-rw-r--r--tests/rest/client/test_sync.py1792
-rw-r--r--tests/rest/client/utils.py4
-rw-r--r--tests/server.py2
-rw-r--r--tests/storage/test_stream.py874
-rw-r--r--tests/test_utils/event_injection.py12
96 files changed, 8249 insertions, 648 deletions
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index cdd881fbe1..767495101b 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -21,6 +21,7 @@ jobs:
       trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
       integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
       linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
+      linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
     steps:
     - uses: dorny/paths-filter@v3
       id: filter
@@ -73,6 +74,9 @@ jobs:
             - 'poetry.lock'
             - '.github/workflows/tests.yml'
 
+          linting_readme:
+            - 'README.rst'
+
   check-sampleconfig:
     runs-on: ubuntu-latest
     needs: changes
@@ -135,7 +139,7 @@ jobs:
 
       - name: Semantic checks (ruff)
         # --quiet suppresses the update check.
-        run: poetry run ruff --quiet .
+        run: poetry run ruff check --quiet .
 
   lint-mypy:
     runs-on: ubuntu-latest
@@ -269,6 +273,20 @@ jobs:
 
       - run: cargo fmt --check
 
+  # This is to detect issues with the rst file, which can otherwise cause issues
+  # when uploading packages to PyPi.
+  lint-readme:
+    runs-on: ubuntu-latest
+    needs: changes
+    if: ${{ needs.changes.outputs.linting_readme == 'true' }}
+    steps:
+      - uses: actions/checkout@v4
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.x"
+      - run: "pip install rstcheck"
+      - run: "rstcheck --report-level=WARNING README.rst"
+
   # Dummy step to gate other tests on without repeating the whole list
   linting-done:
     if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
@@ -284,6 +302,7 @@ jobs:
       - lint-clippy
       - lint-clippy-nightly
       - lint-rustfmt
+      - lint-readme
     runs-on: ubuntu-latest
     steps:
       - uses: matrix-org/done-action@v2
@@ -301,6 +320,7 @@ jobs:
             lint-clippy
             lint-clippy-nightly
             lint-rustfmt
+            lint-readme
 
 
   calculate-test-jobs:
diff --git a/CHANGES.md b/CHANGES.md
index 94f187b951..a40aa26d78 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,24 @@
+# Synapse 1.110.0 (2024-07-03)
+
+No significant changes since 1.110.0rc3.
+
+
+
+
+# Synapse 1.110.0rc3 (2024-07-02)
+
+### Bugfixes
+
+- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
+
+### Internal Changes
+
+- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
+- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
+
+
+
+
 # Synapse 1.110.0rc2 (2024-06-26)
 
 ### Internal Changes
@@ -27,7 +48,7 @@
   This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
 - Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
 - Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
-- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
 
 ### Bugfixes
 
diff --git a/Cargo.lock b/Cargo.lock
index 1955c1a4e7..4353e55977 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -234,9 +234,9 @@ dependencies = [
 
 [[package]]
 name = "log"
-version = "0.4.21"
+version = "0.4.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
 
 [[package]]
 name = "memchr"
@@ -505,9 +505,9 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.117"
+version = "1.0.119"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
+checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0"
 dependencies = [
  "itoa",
  "ryu",
diff --git a/README.rst b/README.rst
index 9ecb6b5816..a52e0c193d 100644
--- a/README.rst
+++ b/README.rst
@@ -179,10 +179,10 @@ desired ``localpart`` in the 'User name' box.
 -----------------------
 
 Enterprise quality support for Synapse including SLAs is available as part of an
-`Element Server Suite (ESS) <https://element.io/pricing>` subscription.
+`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
 
-If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`
-and access the `knowledge base <https://ems-docs.element.io>`.
+If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
+and access the `knowledge base <https://ems-docs.element.io>`_.
 
 🤝 Community support
 --------------------
diff --git a/changelog.d/17318.misc b/changelog.d/17318.misc
new file mode 100644
index 0000000000..b0b21da23b
--- /dev/null
+++ b/changelog.d/17318.misc
@@ -0,0 +1 @@
+Make the release script create a release branch for Complement as well.
diff --git a/changelog.d/17320.feature b/changelog.d/17320.feature
new file mode 100644
index 0000000000..1e524f3eca
--- /dev/null
+++ b/changelog.d/17320.feature
@@ -0,0 +1 @@
+Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17337.feature b/changelog.d/17337.feature
new file mode 100644
index 0000000000..bc8f437dbe
--- /dev/null
+++ b/changelog.d/17337.feature
@@ -0,0 +1 @@
+Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17342.feature b/changelog.d/17342.feature
new file mode 100644
index 0000000000..b2671ea14a
--- /dev/null
+++ b/changelog.d/17342.feature
@@ -0,0 +1 @@
+Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17356.doc b/changelog.d/17356.doc
new file mode 100644
index 0000000000..b393d8d147
--- /dev/null
+++ b/changelog.d/17356.doc
@@ -0,0 +1 @@
+Clarify `url_preview_url_blacklist` is a usability feature.
diff --git a/changelog.d/17362.bugfix b/changelog.d/17362.bugfix
new file mode 100644
index 0000000000..a91ce9fc06
--- /dev/null
+++ b/changelog.d/17362.bugfix
@@ -0,0 +1 @@
+Fix rare race which causes no new to-device messages to be received from remote server.
diff --git a/changelog.d/17363.misc b/changelog.d/17363.misc
new file mode 100644
index 0000000000..555e2225ba
--- /dev/null
+++ b/changelog.d/17363.misc
@@ -0,0 +1 @@
+Fix uploading packages to PyPi.
\ No newline at end of file
diff --git a/changelog.d/17365.feature b/changelog.d/17365.feature
new file mode 100644
index 0000000000..61acc32f32
--- /dev/null
+++ b/changelog.d/17365.feature
@@ -0,0 +1 @@
+Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint.
\ No newline at end of file
diff --git a/changelog.d/17367.misc b/changelog.d/17367.misc
new file mode 100644
index 0000000000..361731b8ae
--- /dev/null
+++ b/changelog.d/17367.misc
@@ -0,0 +1 @@
+Add CI check for the README.
\ No newline at end of file
diff --git a/changelog.d/17371.misc b/changelog.d/17371.misc
deleted file mode 100644
index 0fbf19f4fb..0000000000
--- a/changelog.d/17371.misc
+++ /dev/null
@@ -1 +0,0 @@
-Limit size of presence EDUs to 50 entries.
diff --git a/changelog.d/17379.doc b/changelog.d/17379.doc
new file mode 100644
index 0000000000..08c2544426
--- /dev/null
+++ b/changelog.d/17379.doc
@@ -0,0 +1 @@
+Fix broken links in README.
diff --git a/changelog.d/17381.misc b/changelog.d/17381.misc
new file mode 100644
index 0000000000..ca9830c136
--- /dev/null
+++ b/changelog.d/17381.misc
@@ -0,0 +1 @@
+Fix linting errors from new `ruff` version.
diff --git a/changelog.d/17386.bugfix b/changelog.d/17386.bugfix
deleted file mode 100644
index 9686b5c276..0000000000
--- a/changelog.d/17386.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0.
diff --git a/changelog.d/17388.feature b/changelog.d/17388.feature
new file mode 100644
index 0000000000..f04f49f085
--- /dev/null
+++ b/changelog.d/17388.feature
@@ -0,0 +1,3 @@
+Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
+by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the
+remaining `_matrix/client/v1/media` endpoints.
\ No newline at end of file
diff --git a/changelog.d/17389.misc b/changelog.d/17389.misc
deleted file mode 100644
index 7022ed93d9..0000000000
--- a/changelog.d/17389.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix building debian package for debian sid.
diff --git a/changelog.d/17390.misc b/changelog.d/17390.misc
new file mode 100644
index 0000000000..6a4e344c5c
--- /dev/null
+++ b/changelog.d/17390.misc
@@ -0,0 +1 @@
+Fix building debian packages on non-clean checkouts.
diff --git a/changelog.d/17391.bugfix b/changelog.d/17391.bugfix
deleted file mode 100644
index 9686b5c276..0000000000
--- a/changelog.d/17391.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0.
diff --git a/changelog.d/17392.misc b/changelog.d/17392.misc
new file mode 100644
index 0000000000..76e3976e28
--- /dev/null
+++ b/changelog.d/17392.misc
@@ -0,0 +1 @@
+Finish up work to allow per-user feature flags.
diff --git a/changelog.d/17393.misc b/changelog.d/17393.misc
new file mode 100644
index 0000000000..e131225276
--- /dev/null
+++ b/changelog.d/17393.misc
@@ -0,0 +1 @@
+Allow enabling sliding sync per-user.
diff --git a/changelog.d/17399.doc b/changelog.d/17399.doc
new file mode 100644
index 0000000000..7a3fcf24c0
--- /dev/null
+++ b/changelog.d/17399.doc
@@ -0,0 +1 @@
+Clarify that changelog content *and file extension* need to match in order for entries to merge.
diff --git a/changelog.d/17400.feature b/changelog.d/17400.feature
new file mode 100644
index 0000000000..4dca90890c
--- /dev/null
+++ b/changelog.d/17400.feature
@@ -0,0 +1 @@
+Forget all of a user's rooms upon deactivation, enabling future purges.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 59aa841650..c285cc79eb 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,15 @@
+matrix-synapse-py3 (1.110.0) stable; urgency=medium
+
+  * New Synapse release 1.110.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 03 Jul 2024 09:08:59 -0600
+
+matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
+
+  * New Synapse release 1.110.0rc3.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 02 Jul 2024 08:28:56 -0600
+
 matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
 
   * New Synapse release 1.110.0rc2.
diff --git a/docker/build_debian.sh b/docker/build_debian.sh
index 9eae38af91..00e0856c7d 100644
--- a/docker/build_debian.sh
+++ b/docker/build_debian.sh
@@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
 cp -aT /synapse/source /synapse/build
 cd /synapse/build
 
+# Delete any existing `.so` files to ensure a clean build.
+rm -f /synapse/build/synapse/*.so
+
 # if this is a prerelease, set the Section accordingly.
 #
 # When the package is later added to the package repo, reprepro will use the
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 063f3727f9..b6690f3404 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
     },
     "media_repository": {
         "app": "synapse.app.generic_worker",
-        "listener_resources": ["media"],
+        "listener_resources": ["media", "client"],
         "endpoint_patterns": [
             "^/_matrix/media/",
             "^/_synapse/admin/v1/purge_media_cache$",
@@ -125,6 +125,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
             "^/_synapse/admin/v1/user/.*/media.*$",
             "^/_synapse/admin/v1/media/.*$",
             "^/_synapse/admin/v1/quarantine_media/.*$",
+            "^/_matrix/client/v1/media/.*$",
         ],
         # The first configured media worker will run the media background jobs
         "shared_extra_conf": {
diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md
index 07b630915d..ef1b58c9ba 100644
--- a/docs/admin_api/experimental_features.md
+++ b/docs/admin_api/experimental_features.md
@@ -1,21 +1,17 @@
 # Experimental Features API
 
 This API allows a server administrator to enable or disable some experimental features on a per-user
-basis. The currently supported features are: 
-- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy 
-presence state enabled
-- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications 
-for another client 
-- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
-UIA when first uploading cross-signing keys. 
-
+basis. The currently supported features are:
+- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
+for another client
+- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
 
 To use it, you will need to authenticate by providing an `access_token`
 for a server admin: see [Admin API](../usage/administration/admin_api/).
 
 ## Enabling/Disabling Features
 
-This API allows a server administrator to enable experimental features for a given user. The request must 
+This API allows a server administrator to enable experimental features for a given user. The request must
 provide a body containing the user id and listing the features to enable/disable in the following format:
 ```json
 {
@@ -35,7 +31,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
 ```
 
 ## Listing Enabled Features
- 
+
 To list which features are enabled/disabled for a given user send a request to the following API:
 
 ```
@@ -52,4 +48,4 @@ user like so:
       "msc3967": false
    }
 }
-```
\ No newline at end of file
+```
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 76c3e790cd..f079f61b48 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
 > The security levels of Florbs are now validated when received
 > via the `/federation/florb` endpoint. Contributed by Jane Matrix.
 
-If there are multiple pull requests involved in a single bugfix/feature/etc,
-then the content for each `changelog.d` file should be the same. Towncrier will
-merge the matching files together into a single changelog entry when we come to
+If there are multiple pull requests involved in a single bugfix/feature/etc, then the
+content for each `changelog.d` file and file extension should be the same. Towncrier
+will merge the matching files together into a single changelog entry when we come to
 release.
 
 ### How do I know what to call the changelog file before I create the PR?
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 99be4122bb..cf53f56b06 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -117,6 +117,19 @@ each upgrade are complete before moving on to the next upgrade, to avoid
 stacking them up. You can monitor the currently running background updates with
 [the Admin API](usage/administration/admin_api/background_updates.html#status).
 
+# Upgrading to v1.111.0
+
+## New worker endpoints for authenticated client media
+
+[Media repository workers](./workers.md#synapseappmedia_repository) handling
+Media APIs can now handle the following endpoint pattern:
+
+```
+^/_matrix/client/v1/media/.*$
+```
+
+Please update your reverse proxy configuration.
+
 # Upgrading to v1.106.0
 
 ## Minimum supported Rust version
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 80a7bf9d21..65b03ad0f8 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1976,9 +1976,10 @@ This will not prevent the listed domains from accessing media themselves.
 It simply prevents users on this server from downloading media originating
 from the listed servers.
 
-This will have no effect on media originating from the local server.
-This only affects media downloaded from other Matrix servers, to
-block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
+This will have no effect on media originating from the local server. This only
+affects media downloaded from other Matrix servers, to control URL previews see
+[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
+[`url_preview_url_blacklist`](#url_preview_url_blacklist).
 
 Defaults to an empty list (nothing blocked).
 
@@ -2130,12 +2131,14 @@ url_preview_ip_range_whitelist:
 ---
 ### `url_preview_url_blacklist`
 
-Optional list of URL matches that the URL preview spider is
-denied from accessing.  You should use `url_preview_ip_range_blacklist`
-in preference to this, otherwise someone could define a public DNS
-entry that points to a private IP address and circumvent the blacklist.
-This is more useful if you know there is an entire shape of URL that
-you know that will never want synapse to try to spider.
+Optional list of URL matches that the URL preview spider is denied from
+accessing.  This is a usability feature, not a security one. You should use
+`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
+define a public DNS entry that points to a private IP address and circumvent
+the blacklist. Applications that perform redirects or serve different content
+when detecting that Synapse is accessing them can also bypass the blacklist.
+This is more useful if you know there is an entire shape of URL that you know
+that you do not want Synapse to preview.
 
 Each list entry is a dictionary of url component attributes as returned
 by urlparse.urlsplit as applied to the absolute form of the URL.  See
diff --git a/docs/workers.md b/docs/workers.md
index 1f6bfd9e7f..22fde488a9 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -739,6 +739,7 @@ An example for a federation sender instance:
 Handles the media repository. It can handle all endpoints starting with:
 
     /_matrix/media/
+    /_matrix/client/v1/media/
 
 ... and the following regular expressions matching media-specific administration APIs:
 
diff --git a/mypy.ini b/mypy.ini
index 1a2b9ea410..3fca15c01b 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -96,3 +96,6 @@ ignore_missing_imports = True
 # https://github.com/twisted/treq/pull/366
 [mypy-treq.*]
 ignore_missing_imports = True
+
+[mypy-multipart.*]
+ignore_missing_imports = True
diff --git a/poetry.lock b/poetry.lock
index 1bae0ea388..7838c3d308 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
 
 [[package]]
 name = "annotated-types"
@@ -182,13 +182,13 @@ files = [
 
 [[package]]
 name = "certifi"
-version = "2023.7.22"
+version = "2024.7.4"
 description = "Python package for providing Mozilla's CA Bundle."
 optional = false
 python-versions = ">=3.6"
 files = [
-    {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
-    {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+    {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
+    {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
 ]
 
 [[package]]
@@ -403,43 +403,43 @@ files = [
 
 [[package]]
 name = "cryptography"
-version = "42.0.7"
+version = "42.0.8"
 description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"},
-    {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"},
-    {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"},
-    {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"},
-    {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"},
-    {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"},
-    {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"},
-    {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"},
-    {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"},
-    {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"},
-    {file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"},
-    {file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"},
-    {file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"},
-    {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"},
-    {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"},
-    {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"},
-    {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"},
-    {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"},
-    {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"},
-    {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"},
-    {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"},
-    {file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"},
-    {file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"},
-    {file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"},
-    {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"},
-    {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"},
-    {file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"},
-    {file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"},
-    {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"},
-    {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"},
-    {file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"},
-    {file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"},
+    {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"},
+    {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"},
+    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"},
+    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"},
+    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"},
+    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"},
+    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"},
+    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"},
+    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"},
+    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"},
+    {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"},
+    {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"},
+    {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"},
+    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"},
+    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"},
+    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"},
+    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"},
+    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"},
+    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"},
+    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"},
+    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"},
+    {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"},
+    {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"},
+    {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"},
+    {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"},
+    {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"},
+    {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"},
+    {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"},
+    {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"},
+    {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"},
+    {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"},
+    {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"},
 ]
 
 [package.dependencies]
@@ -2040,6 +2040,20 @@ files = [
 six = ">=1.5"
 
 [[package]]
+name = "python-multipart"
+version = "0.0.9"
+description = "A streaming multipart parser for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+    {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"},
+    {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"},
+]
+
+[package.extras]
+dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"]
+
+[[package]]
 name = "pytz"
 version = "2022.7.1"
 description = "World timezone definitions, modern and historical"
@@ -2331,28 +2345,29 @@ files = [
 
 [[package]]
 name = "ruff"
-version = "0.3.7"
+version = "0.5.0"
 description = "An extremely fast Python linter and code formatter, written in Rust."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e8377cccb2f07abd25e84fc5b2cbe48eeb0fea9f1719cad7caedb061d70e5ce"},
-    {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:15a4d1cc1e64e556fa0d67bfd388fed416b7f3b26d5d1c3e7d192c897e39ba4b"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28bdf3d7dc71dd46929fafeec98ba89b7c3550c3f0978e36389b5631b793663"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:379b67d4f49774ba679593b232dcd90d9e10f04d96e3c8ce4a28037ae473f7bb"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c060aea8ad5ef21cdfbbe05475ab5104ce7827b639a78dd55383a6e9895b7c51"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ebf8f615dde968272d70502c083ebf963b6781aacd3079081e03b32adfe4d58a"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48098bd8f5c38897b03604f5428901b65e3c97d40b3952e38637b5404b739a2"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8a4fda219bf9024692b1bc68c9cff4b80507879ada8769dc7e985755d662ea"},
-    {file = "ruff-0.3.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c44e0149f1d8b48c4d5c33d88c677a4aa22fd09b1683d6a7ff55b816b5d074f"},
-    {file = "ruff-0.3.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3050ec0af72b709a62ecc2aca941b9cd479a7bf2b36cc4562f0033d688e44fa1"},
-    {file = "ruff-0.3.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a29cc38e4c1ab00da18a3f6777f8b50099d73326981bb7d182e54a9a21bb4ff7"},
-    {file = "ruff-0.3.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b15cc59c19edca917f51b1956637db47e200b0fc5e6e1878233d3a938384b0b"},
-    {file = "ruff-0.3.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e491045781b1e38b72c91247cf4634f040f8d0cb3e6d3d64d38dcf43616650b4"},
-    {file = "ruff-0.3.7-py3-none-win32.whl", hash = "sha256:bc931de87593d64fad3a22e201e55ad76271f1d5bfc44e1a1887edd0903c7d9f"},
-    {file = "ruff-0.3.7-py3-none-win_amd64.whl", hash = "sha256:5ef0e501e1e39f35e03c2acb1d1238c595b8bb36cf7a170e7c1df1b73da00e74"},
-    {file = "ruff-0.3.7-py3-none-win_arm64.whl", hash = "sha256:789e144f6dc7019d1f92a812891c645274ed08af6037d11fc65fcbc183b7d59f"},
-    {file = "ruff-0.3.7.tar.gz", hash = "sha256:d5c1aebee5162c2226784800ae031f660c350e7a3402c4d1f8ea4e97e232e3ba"},
+    {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
+    {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
+    {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
+    {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
+    {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
+    {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
+    {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
+    {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
+    {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
+    {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
+    {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
+    {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
 ]
 
 [[package]]
@@ -2906,13 +2921,13 @@ urllib3 = ">=2"
 
 [[package]]
 name = "types-setuptools"
-version = "69.5.0.20240423"
+version = "70.1.0.20240627"
 description = "Typing stubs for setuptools"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-setuptools-69.5.0.20240423.tar.gz", hash = "sha256:a7ba908f1746c4337d13f027fa0f4a5bcad6d1d92048219ba792b3295c58586d"},
-    {file = "types_setuptools-69.5.0.20240423-py3-none-any.whl", hash = "sha256:a4381e041510755a6c9210e26ad55b1629bc10237aeb9cb8b6bd24996b73db48"},
+    {file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"},
+    {file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"},
 ]
 
 [[package]]
@@ -3187,4 +3202,4 @@ user-search = ["pyicu"]
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.8.0"
-content-hash = "107c8fb5c67360340854fbdba3c085fc5f9c7be24bcb592596a914eea621faea"
+content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d"
diff --git a/pyproject.toml b/pyproject.toml
index bbf9c78420..2d1481f263 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -43,6 +43,7 @@ target-version = ['py38', 'py39', 'py310', 'py311']
 [tool.ruff]
 line-length = 88
 
+[tool.ruff.lint]
 # See https://beta.ruff.rs/docs/rules/#error-e
 # for error codes. The ones we ignore are:
 #  E501: Line too long (black enforces this for us)
@@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
 
 [tool.poetry]
 name = "matrix-synapse"
-version = "1.110.0rc2"
+version = "1.110.0"
 description = "Homeserver for the Matrix decentralised comms protocol"
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 license = "AGPL-3.0-or-later"
@@ -224,6 +225,8 @@ pydantic = ">=1.7.4, <3"
 # needed.
 setuptools_rust = ">=1.3"
 
+# This is used for parsing multipart responses
+python-multipart = ">=0.0.9"
 
 # Optional Dependencies
 # ---------------------
@@ -319,7 +322,7 @@ all = [
 # This helps prevents merge conflicts when running a batch of dependabot updates.
 isort = ">=5.10.1"
 black = ">=22.7.0"
-ruff = "0.3.7"
+ruff = "0.5.0"
 # Type checking only works with the pydantic.v1 compat module from pydantic v2
 pydantic = "^2"
 
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 9e4ed3246e..8acf0a6fb8 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
 
 # Catch any common programming mistakes in Python code.
 # --quiet suppresses the update check.
-ruff --quiet --fix "${files[@]}"
+ruff check --quiet --fix "${files[@]}"
 
 # Catch any common programming mistakes in Rust code.
 #
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 9ea9b41505..5e519bb758 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -70,6 +70,7 @@ def cli() -> None:
             pip install -e .[dev]
 
       - A checkout of the sytest repository at ../sytest
+      - A checkout of the complement repository at ../complement
 
     Then to use:
 
@@ -112,10 +113,12 @@ def _prepare() -> None:
     # Make sure we're in a git repo.
     synapse_repo = get_repo_and_check_clean_checkout()
     sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
+    complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
 
     click.secho("Updating Synapse and Sytest git repos...")
     synapse_repo.remote().fetch()
     sytest_repo.remote().fetch()
+    complement_repo.remote().fetch()
 
     # Get the current version and AST from root Synapse module.
     current_version = get_package_version()
@@ -208,7 +211,15 @@ def _prepare() -> None:
             "Which branch should the release be based on?", default=default
         )
 
-        for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
+        for repo_name, repo in {
+            "synapse": synapse_repo,
+            "sytest": sytest_repo,
+            "complement": complement_repo,
+        }.items():
+            # Special case for Complement: `develop` maps to `main`
+            if repo_name == "complement" and branch_name == "develop":
+                branch_name = "main"
+
             base_branch = find_ref(repo, branch_name)
             if not base_branch:
                 print(f"Could not find base branch {branch_name} for {repo_name}!")
@@ -231,6 +242,12 @@ def _prepare() -> None:
         if click.confirm("Push new SyTest branch?", default=True):
             sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
 
+        # Same for Complement
+        if click.confirm("Push new Complement branch?", default=True):
+            complement_repo.git.push(
+                "-u", complement_repo.remote().name, release_branch_name
+            )
+
     # Switch to the release branch and ensure it's up to date.
     synapse_repo.git.checkout(release_branch_name)
     update_branch(synapse_repo)
@@ -630,6 +647,9 @@ def _merge_back() -> None:
     else:
         # Full release
         sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
+        complement_repo = get_repo_and_check_clean_checkout(
+            "../complement", "complement"
+        )
 
         if click.confirm(f"Merge {branch_name} → master?", default=True):
             _merge_into(synapse_repo, branch_name, "master")
@@ -643,6 +663,9 @@ def _merge_back() -> None:
         if click.confirm("On SyTest, merge master → develop?", default=True):
             _merge_into(sytest_repo, "master", "develop")
 
+        if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
+            _merge_into(complement_repo, branch_name, "main")
+
 
 @cli.command()
 def announce() -> None:
diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py
index 5b6c8f6837..715c7ddc17 100755
--- a/synapse/_scripts/generate_workers_map.py
+++ b/synapse/_scripts/generate_workers_map.py
@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
 
 
 class MockHomeserver(HomeServer):
-    DATASTORE_CLASS = DataStore  # type: ignore
+    DATASTORE_CLASS = DataStore
 
     def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
         super().__init__(config.server.server_name, config=config)
diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py
index 8d22bf53d4..d8b4dbd6c6 100644
--- a/synapse/_scripts/update_synapse_database.py
+++ b/synapse/_scripts/update_synapse_database.py
@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
 
 
 class MockHomeserver(HomeServer):
-    DATASTORE_CLASS = DataStore  # type: ignore [assignment]
+    DATASTORE_CLASS = DataStore
 
     def __init__(self, config: HomeServerConfig):
         super().__init__(
diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py
index 234dcf1ca4..d5241afe73 100644
--- a/synapse/api/auth/__init__.py
+++ b/synapse/api/auth/__init__.py
@@ -18,7 +18,7 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from typing import Optional, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
 
 from typing_extensions import Protocol
 
@@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
 from synapse.http.site import SynapseRequest
 from synapse.types import Requester
 
+if TYPE_CHECKING:
+    from synapse.rest.admin.experimental_features import ExperimentalFeature
+
 # guests always get this device id.
 GUEST_DEVICE_ID = "guest_device"
 
@@ -87,6 +90,19 @@ class Auth(Protocol):
             AuthError if access is denied for the user in the access token
         """
 
+    async def get_user_by_req_experimental_feature(
+        self,
+        request: SynapseRequest,
+        feature: "ExperimentalFeature",
+        allow_guest: bool = False,
+        allow_expired: bool = False,
+        allow_locked: bool = False,
+    ) -> Requester:
+        """Like `get_user_by_req`, except also checks if the user has access to
+        the experimental feature. If they don't returns a 404 unrecognized
+        request.
+        """
+
     async def validate_appservice_can_control_user_id(
         self, app_service: ApplicationService, user_id: str
     ) -> None:
diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py
index 2878f3e6e9..9fd4db68e1 100644
--- a/synapse/api/auth/internal.py
+++ b/synapse/api/auth/internal.py
@@ -28,6 +28,7 @@ from synapse.api.errors import (
     Codes,
     InvalidClientTokenError,
     MissingClientTokenError,
+    UnrecognizedRequestError,
 )
 from synapse.http.site import SynapseRequest
 from synapse.logging.opentracing import active_span, force_tracing, start_active_span
@@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
 from .base import BaseAuth
 
 if TYPE_CHECKING:
+    from synapse.rest.admin.experimental_features import ExperimentalFeature
     from synapse.server import HomeServer
 
+
 logger = logging.getLogger(__name__)
 
 
@@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
                     parent_span.set_tag("appservice_id", requester.app_service.id)
             return requester
 
+    async def get_user_by_req_experimental_feature(
+        self,
+        request: SynapseRequest,
+        feature: "ExperimentalFeature",
+        allow_guest: bool = False,
+        allow_expired: bool = False,
+        allow_locked: bool = False,
+    ) -> Requester:
+        try:
+            requester = await self.get_user_by_req(
+                request,
+                allow_guest=allow_guest,
+                allow_expired=allow_expired,
+                allow_locked=allow_locked,
+            )
+            if await self.store.is_feature_enabled(requester.user.to_string(), feature):
+                return requester
+
+            raise UnrecognizedRequestError(code=404)
+        except (AuthError, InvalidClientTokenError):
+            if feature.is_globally_enabled(self.hs.config):
+                # If its globally enabled then return the auth error
+                raise
+
+            raise UnrecognizedRequestError(code=404)
+
     @cancellable
     async def _wrapped_get_user_by_req(
         self,
diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py
index 3146e1577c..f61b39ded7 100644
--- a/synapse/api/auth/msc3861_delegated.py
+++ b/synapse/api/auth/msc3861_delegated.py
@@ -40,6 +40,7 @@ from synapse.api.errors import (
     OAuthInsufficientScopeError,
     StoreError,
     SynapseError,
+    UnrecognizedRequestError,
 )
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import make_deferred_yieldable
@@ -48,6 +49,7 @@ from synapse.util import json_decoder
 from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
 
 if TYPE_CHECKING:
+    from synapse.rest.admin.experimental_features import ExperimentalFeature
     from synapse.server import HomeServer
 
 logger = logging.getLogger(__name__)
@@ -245,6 +247,32 @@ class MSC3861DelegatedAuth(BaseAuth):
 
         return requester
 
+    async def get_user_by_req_experimental_feature(
+        self,
+        request: SynapseRequest,
+        feature: "ExperimentalFeature",
+        allow_guest: bool = False,
+        allow_expired: bool = False,
+        allow_locked: bool = False,
+    ) -> Requester:
+        try:
+            requester = await self.get_user_by_req(
+                request,
+                allow_guest=allow_guest,
+                allow_expired=allow_expired,
+                allow_locked=allow_locked,
+            )
+            if await self.store.is_feature_enabled(requester.user.to_string(), feature):
+                return requester
+
+            raise UnrecognizedRequestError(code=404)
+        except (AuthError, InvalidClientTokenError):
+            if feature.is_globally_enabled(self.hs.config):
+                # If its globally enabled then return the auth error
+                raise
+
+            raise UnrecognizedRequestError(code=404)
+
     async def get_user_by_access_token(
         self,
         token: str,
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index a99a9e09fc..26b8711851 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -130,7 +130,8 @@ class Ratelimiter:
                 Overrides the value set during instantiation if set.
             burst_count: How many actions that can be performed before being limited.
                 Overrides the value set during instantiation if set.
-            update: Whether to count this check as performing the action
+            update: Whether to count this check as performing the action. If the action
+                cannot be performed, the user's action count is not incremented at all.
             n_actions: The number of times the user wants to do this action. If the user
                 cannot do all of the actions, the user's action count is not incremented
                 at all.
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 3990eda0fa..7c9b94c65c 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -110,7 +110,7 @@ class AdminCmdStore(
 
 
 class AdminCmdServer(HomeServer):
-    DATASTORE_CLASS = AdminCmdStore  # type: ignore
+    DATASTORE_CLASS = AdminCmdStore
 
 
 async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 8c2a74a723..599f95466b 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -163,7 +163,7 @@ class GenericWorkerStore(
 
 
 class GenericWorkerServer(HomeServer):
-    DATASTORE_CLASS = GenericWorkerStore  # type: ignore
+    DATASTORE_CLASS = GenericWorkerStore
 
     def _listen_http(self, listener_config: ListenerConfig) -> None:
         assert listener_config.http_options is not None
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 8a545a86c1..2b111847b7 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
 
 
 class SynapseHomeServer(HomeServer):
-    DATASTORE_CLASS = DataStore  # type: ignore
+    DATASTORE_CLASS = DataStore
 
     def _listener_http(
         self,
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 1b72727b75..c21b7eb37e 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -437,10 +437,6 @@ class ExperimentalConfig(Config):
             "msc3823_account_suspension", False
         )
 
-        self.msc3916_authenticated_media_enabled = experimental.get(
-            "msc3916_authenticated_media_enabled", False
-        )
-
         # MSC4151: Report room API (Client-Server API)
         self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
 
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index b997d82d71..f937fd4698 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -836,3 +836,21 @@ def maybe_upsert_event_field(
             del container[key]
 
     return upsert_okay
+
+
+def strip_event(event: EventBase) -> JsonDict:
+    """
+    Used for "stripped state" events which provide a simplified view of the state of a
+    room intended to help a potential joiner identify the room (relevant when the user
+    is invited or knocked).
+
+    Stripped state events can only have the `sender`, `type`, `state_key` and `content`
+    properties present.
+    """
+
+    return {
+        "type": event.type,
+        "state_key": event.state_key,
+        "content": event.content,
+        "sender": event.sender,
+    }
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index f0f5a37a57..7d80ff6998 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -1871,6 +1871,52 @@ class FederationClient(FederationBase):
 
         return filtered_statuses, filtered_failures
 
+    async def federation_download_media(
+        self,
+        destination: str,
+        media_id: str,
+        output_stream: BinaryIO,
+        max_size: int,
+        max_timeout_ms: int,
+        download_ratelimiter: Ratelimiter,
+        ip_address: str,
+    ) -> Union[
+        Tuple[int, Dict[bytes, List[bytes]], bytes],
+        Tuple[int, Dict[bytes, List[bytes]]],
+    ]:
+        try:
+            return await self.transport_layer.federation_download_media(
+                destination,
+                media_id,
+                output_stream=output_stream,
+                max_size=max_size,
+                max_timeout_ms=max_timeout_ms,
+                download_ratelimiter=download_ratelimiter,
+                ip_address=ip_address,
+            )
+        except HttpResponseException as e:
+            # If an error is received that is due to an unrecognised endpoint,
+            # fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
+            # and raise.
+            if not is_unknown_endpoint(e):
+                raise
+
+        logger.debug(
+            "Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
+            destination,
+            media_id,
+        )
+
+        return await self.transport_layer.download_media_v3(
+            destination,
+            media_id,
+            output_stream=output_stream,
+            max_size=max_size,
+            max_timeout_ms=max_timeout_ms,
+            download_ratelimiter=download_ratelimiter,
+            ip_address=ip_address,
+        )
+
     async def download_media(
         self,
         destination: str,
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 9f1c2fe22a..b435588da0 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -322,7 +322,6 @@ class PerDestinationQueue:
         )
 
     async def _transaction_transmission_loop(self) -> None:
-        pending_pdus: List[EventBase] = []
         try:
             self.transmission_loop_running = True
 
@@ -338,7 +337,6 @@ class PerDestinationQueue:
                     # not caught up yet
                     return
 
-            pending_pdus = []
             while True:
                 self._new_data_to_send = False
 
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 0d6cb0c691..87d1799590 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -824,7 +824,6 @@ class TransportLayerClient:
         ip_address: str,
     ) -> Tuple[int, Dict[bytes, List[bytes]]]:
         path = f"/_matrix/media/r0/download/{destination}/{media_id}"
-
         return await self.client.get_file(
             destination,
             path,
@@ -852,7 +851,6 @@ class TransportLayerClient:
         ip_address: str,
     ) -> Tuple[int, Dict[bytes, List[bytes]]]:
         path = f"/_matrix/media/v3/download/{destination}/{media_id}"
-
         return await self.client.get_file(
             destination,
             path,
@@ -873,6 +871,29 @@ class TransportLayerClient:
             ip_address=ip_address,
         )
 
+    async def federation_download_media(
+        self,
+        destination: str,
+        media_id: str,
+        output_stream: BinaryIO,
+        max_size: int,
+        max_timeout_ms: int,
+        download_ratelimiter: Ratelimiter,
+        ip_address: str,
+    ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
+        path = f"/_matrix/federation/v1/media/download/{media_id}"
+        return await self.client.federation_get_file(
+            destination,
+            path,
+            output_stream=output_stream,
+            max_size=max_size,
+            args={
+                "timeout_ms": str(max_timeout_ms),
+            },
+            download_ratelimiter=download_ratelimiter,
+            ip_address=ip_address,
+        )
+
 
 def _create_path(federation_prefix: str, path: str, *args: str) -> str:
     """
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index edaf0196d6..5f997040d0 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -32,8 +32,9 @@ from synapse.federation.transport.server._base import (
 from synapse.federation.transport.server.federation import (
     FEDERATION_SERVLET_CLASSES,
     FederationAccountStatusServlet,
+    FederationMediaDownloadServlet,
+    FederationMediaThumbnailServlet,
     FederationUnstableClientKeysClaimServlet,
-    FederationUnstableMediaDownloadServlet,
 )
 from synapse.http.server import HttpServer, JsonResource
 from synapse.http.servlet import (
@@ -316,11 +317,11 @@ def register_servlets(
             ):
                 continue
 
-            if servletclass == FederationUnstableMediaDownloadServlet:
-                if (
-                    not hs.config.server.enable_media_repo
-                    or not hs.config.experimental.msc3916_authenticated_media_enabled
-                ):
+            if (
+                servletclass == FederationMediaDownloadServlet
+                or servletclass == FederationMediaThumbnailServlet
+            ):
+                if not hs.config.server.enable_media_repo:
                     continue
 
             servletclass(
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 4e2717b565..9094201da0 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -362,7 +362,9 @@ class BaseFederationServlet:
                                 return None
                             if (
                                 func.__self__.__class__.__name__  # type: ignore
-                                == "FederationUnstableMediaDownloadServlet"
+                                == "FederationMediaDownloadServlet"
+                                or func.__self__.__class__.__name__  # type: ignore
+                                == "FederationMediaThumbnailServlet"
                             ):
                                 response = await func(
                                     origin, content, request, *args, **kwargs
@@ -374,7 +376,9 @@ class BaseFederationServlet:
                     else:
                         if (
                             func.__self__.__class__.__name__  # type: ignore
-                            == "FederationUnstableMediaDownloadServlet"
+                            == "FederationMediaDownloadServlet"
+                            or func.__self__.__class__.__name__  # type: ignore
+                            == "FederationMediaThumbnailServlet"
                         ):
                             response = await func(
                                 origin, content, request, *args, **kwargs
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 67bb907050..b075a86f68 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -46,11 +46,13 @@ from synapse.http.servlet import (
     parse_boolean_from_args,
     parse_integer,
     parse_integer_from_args,
+    parse_string,
     parse_string_from_args,
     parse_strings_from_args,
 )
 from synapse.http.site import SynapseRequest
 from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
+from synapse.media.thumbnailer import ThumbnailProvider
 from synapse.types import JsonDict
 from synapse.util import SYNAPSE_VERSION
 from synapse.util.ratelimitutils import FederationRateLimiter
@@ -790,7 +792,7 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
         return 200, {"account_statuses": statuses, "failures": failures}
 
 
-class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
+class FederationMediaDownloadServlet(BaseFederationServerServlet):
     """
     Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
     a multipart/mixed response consisting of a JSON object and the requested media
@@ -798,7 +800,6 @@ class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
     """
 
     PATH = "/media/download/(?P<media_id>[^/]*)"
-    PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
     RATELIMIT = True
 
     def __init__(
@@ -827,6 +828,59 @@ class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
         )
 
 
+class FederationMediaThumbnailServlet(BaseFederationServerServlet):
+    """
+    Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
+    a multipart/mixed response consisting of a JSON object and the requested media
+    item. This endpoint only returns local media.
+    """
+
+    PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
+    RATELIMIT = True
+
+    def __init__(
+        self,
+        hs: "HomeServer",
+        ratelimiter: FederationRateLimiter,
+        authenticator: Authenticator,
+        server_name: str,
+    ):
+        super().__init__(hs, authenticator, ratelimiter, server_name)
+        self.media_repo = self.hs.get_media_repository()
+        self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+        self.thumbnail_provider = ThumbnailProvider(
+            hs, self.media_repo, self.media_repo.media_storage
+        )
+
+    async def on_GET(
+        self,
+        origin: Optional[str],
+        content: Literal[None],
+        request: SynapseRequest,
+        media_id: str,
+    ) -> None:
+
+        width = parse_integer(request, "width", required=True)
+        height = parse_integer(request, "height", required=True)
+        method = parse_string(request, "method", "scale")
+        # TODO Parse the Accept header to get an prioritised list of thumbnail types.
+        m_type = "image/png"
+        max_timeout_ms = parse_integer(
+            request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+        )
+        max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+
+        if self.dynamic_thumbnails:
+            await self.thumbnail_provider.select_or_generate_local_thumbnail(
+                request, media_id, width, height, method, m_type, max_timeout_ms, True
+            )
+        else:
+            await self.thumbnail_provider.respond_local_thumbnail(
+                request, media_id, width, height, method, m_type, max_timeout_ms, True
+            )
+        self.media_repo.mark_recently_accessed(None, media_id)
+
+
 FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
     FederationSendServlet,
     FederationEventServlet,
@@ -858,5 +912,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
     FederationV1SendKnockServlet,
     FederationMakeKnockServlet,
     FederationAccountStatusServlet,
-    FederationUnstableMediaDownloadServlet,
+    FederationMediaDownloadServlet,
+    FederationMediaThumbnailServlet,
 )
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 11ac377680..12a7cace55 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -283,6 +283,10 @@ class DeactivateAccountHandler:
                     ratelimit=False,
                     require_consent=False,
                 )
+
+                # Mark the room forgotten too, because they won't be able to do this
+                # for us. This may lead to the room being purged eventually.
+                await self._room_member_handler.forget(user, room_id)
             except Exception:
                 logger.exception(
                     "Failed to part user %r from room %r: ignoring and continuing",
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
index 847a638bba..a1ddac903e 100644
--- a/synapse/handlers/sliding_sync.py
+++ b/synapse/handlers/sliding_sync.py
@@ -18,22 +18,35 @@
 #
 #
 import logging
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple
 
+import attr
 from immutabledict import immutabledict
 
-from synapse.api.constants import AccountDataTypes, EventTypes, Membership
+from synapse.api.constants import (
+    AccountDataTypes,
+    Direction,
+    EventContentFields,
+    EventTypes,
+    Membership,
+)
 from synapse.events import EventBase
-from synapse.storage.roommember import RoomsForUser
+from synapse.events.utils import strip_event
+from synapse.handlers.relations import BundledAggregations
+from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
 from synapse.types import (
+    JsonDict,
     PersistedEventPosition,
     Requester,
     RoomStreamToken,
+    StateMap,
+    StreamKeyType,
     StreamToken,
     UserID,
 )
 from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
 from synapse.types.state import StateFilter
+from synapse.visibility import filter_events_for_client
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -41,28 +54,9 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
-def convert_event_to_rooms_for_user(event: EventBase) -> RoomsForUser:
-    """
-    Quick helper to convert an event to a `RoomsForUser` object.
-    """
-    # These fields should be present for all persisted events
-    assert event.internal_metadata.stream_ordering is not None
-    assert event.internal_metadata.instance_name is not None
-
-    return RoomsForUser(
-        room_id=event.room_id,
-        sender=event.sender,
-        membership=event.membership,
-        event_id=event.event_id,
-        event_pos=PersistedEventPosition(
-            event.internal_metadata.instance_name,
-            event.internal_metadata.stream_ordering,
-        ),
-        room_version_id=event.room_version.identifier,
-    )
-
-
-def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> bool:
+def filter_membership_for_sync(
+    *, membership: str, user_id: str, sender: Optional[str]
+) -> bool:
     """
     Returns True if the membership event should be included in the sync response,
     otherwise False.
@@ -79,7 +73,226 @@ def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) ->
     #
     # This logic includes kicks (leave events where the sender is not the same user) and
     # can be read as "anything that isn't a leave or a leave with a different sender".
-    return membership != Membership.LEAVE or sender != user_id
+    #
+    # When `sender=None` and `membership=Membership.LEAVE`, it means that a state reset
+    # happened that removed the user from the room, or the user was the last person
+    # locally to leave the room which caused the server to leave the room. In both
+    # cases, we can just remove the rooms since they are no longer relevant to the user.
+    # They could still be added back later if they are `newly_left`.
+    return membership != Membership.LEAVE or sender not in (user_id, None)
+
+
+# We can't freeze this class because we want to update it in place with the
+# de-duplicated data.
+@attr.s(slots=True, auto_attribs=True)
+class RoomSyncConfig:
+    """
+    Holds the config for what data we should fetch for a room in the sync response.
+
+    Attributes:
+        timeline_limit: The maximum number of events to return in the timeline.
+
+        required_state_map: Map from state event type to state_keys requested for the
+            room. The values are close to `StateKey` but actually use a syntax where you
+            can provide `*` wildcard and `$LAZY` for lazy-loading room members.
+    """
+
+    timeline_limit: int
+    required_state_map: Dict[str, Set[str]]
+
+    @classmethod
+    def from_room_config(
+        cls,
+        room_params: SlidingSyncConfig.CommonRoomParameters,
+    ) -> "RoomSyncConfig":
+        """
+        Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config.
+
+        Args:
+            room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription`
+        """
+        required_state_map: Dict[str, Set[str]] = {}
+        for (
+            state_type,
+            state_key,
+        ) in room_params.required_state:
+            # If we already have a wildcard for this specific `state_key`, we don't need
+            # to add it since the wildcard already covers it.
+            if state_key in required_state_map.get(StateValues.WILDCARD, set()):
+                continue
+
+            # If we already have a wildcard `state_key` for this `state_type`, we don't need
+            # to add anything else
+            if StateValues.WILDCARD in required_state_map.get(state_type, set()):
+                continue
+
+            # If we're getting wildcards for the `state_type` and `state_key`, that's
+            # all that matters so get rid of any other entries
+            if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD:
+                required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}}
+                # We can break, since we don't need to add anything else
+                break
+
+            # If we're getting a wildcard for the `state_type`, get rid of any other
+            # entries with the same `state_key`, since the wildcard will cover it already.
+            elif state_type == StateValues.WILDCARD:
+                # Get rid of any entries that match the `state_key`
+                #
+                # Make a copy so we don't run into an error: `dictionary changed size
+                # during iteration`, when we remove items
+                for (
+                    existing_state_type,
+                    existing_state_key_set,
+                ) in list(required_state_map.items()):
+                    # Make a copy so we don't run into an error: `Set changed size during
+                    # iteration`, when we filter out and remove items
+                    for existing_state_key in existing_state_key_set.copy():
+                        if existing_state_key == state_key:
+                            existing_state_key_set.remove(state_key)
+
+                    # If we've the left the `set()` empty, remove it from the map
+                    if existing_state_key_set == set():
+                        required_state_map.pop(existing_state_type, None)
+
+            # If we're getting a wildcard `state_key`, get rid of any other state_keys
+            # for this `state_type` since the wildcard will cover it already.
+            if state_key == StateValues.WILDCARD:
+                required_state_map[state_type] = {state_key}
+            # Otherwise, just add it to the set
+            else:
+                if required_state_map.get(state_type) is None:
+                    required_state_map[state_type] = {state_key}
+                else:
+                    required_state_map[state_type].add(state_key)
+
+        return cls(
+            timeline_limit=room_params.timeline_limit,
+            required_state_map=required_state_map,
+        )
+
+    def deep_copy(self) -> "RoomSyncConfig":
+        required_state_map: Dict[str, Set[str]] = {
+            state_type: state_key_set.copy()
+            for state_type, state_key_set in self.required_state_map.items()
+        }
+
+        return RoomSyncConfig(
+            timeline_limit=self.timeline_limit,
+            required_state_map=required_state_map,
+        )
+
+    def combine_room_sync_config(
+        self, other_room_sync_config: "RoomSyncConfig"
+    ) -> None:
+        """
+        Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the
+        superset union of the two.
+        """
+        # Take the highest timeline limit
+        if self.timeline_limit < other_room_sync_config.timeline_limit:
+            self.timeline_limit = other_room_sync_config.timeline_limit
+
+        # Union the required state
+        for (
+            state_type,
+            state_key_set,
+        ) in other_room_sync_config.required_state_map.items():
+            # If we already have a wildcard for everything, we don't need to add
+            # anything else
+            if StateValues.WILDCARD in self.required_state_map.get(
+                StateValues.WILDCARD, set()
+            ):
+                break
+
+            # If we already have a wildcard `state_key` for this `state_type`, we don't need
+            # to add anything else
+            if StateValues.WILDCARD in self.required_state_map.get(state_type, set()):
+                continue
+
+            # If we're getting wildcards for the `state_type` and `state_key`, that's
+            # all that matters so get rid of any other entries
+            if (
+                state_type == StateValues.WILDCARD
+                and StateValues.WILDCARD in state_key_set
+            ):
+                self.required_state_map = {state_type: {StateValues.WILDCARD}}
+                # We can break, since we don't need to add anything else
+                break
+
+            for state_key in state_key_set:
+                # If we already have a wildcard for this specific `state_key`, we don't need
+                # to add it since the wildcard already covers it.
+                if state_key in self.required_state_map.get(
+                    StateValues.WILDCARD, set()
+                ):
+                    continue
+
+                # If we're getting a wildcard for the `state_type`, get rid of any other
+                # entries with the same `state_key`, since the wildcard will cover it already.
+                if state_type == StateValues.WILDCARD:
+                    # Get rid of any entries that match the `state_key`
+                    #
+                    # Make a copy so we don't run into an error: `dictionary changed size
+                    # during iteration`, when we remove items
+                    for existing_state_type, existing_state_key_set in list(
+                        self.required_state_map.items()
+                    ):
+                        # Make a copy so we don't run into an error: `Set changed size during
+                        # iteration`, when we filter out and remove items
+                        for existing_state_key in existing_state_key_set.copy():
+                            if existing_state_key == state_key:
+                                existing_state_key_set.remove(state_key)
+
+                        # If we've the left the `set()` empty, remove it from the map
+                        if existing_state_key_set == set():
+                            self.required_state_map.pop(existing_state_type, None)
+
+                # If we're getting a wildcard `state_key`, get rid of any other state_keys
+                # for this `state_type` since the wildcard will cover it already.
+                if state_key == StateValues.WILDCARD:
+                    self.required_state_map[state_type] = {state_key}
+                    break
+                # Otherwise, just add it to the set
+                else:
+                    if self.required_state_map.get(state_type) is None:
+                        self.required_state_map[state_type] = {state_key}
+                    else:
+                        self.required_state_map[state_type].add(state_key)
+
+
+class StateValues:
+    """
+    Understood values of the (type, state_key) tuple in `required_state`.
+    """
+
+    # Include all state events of the given type
+    WILDCARD: Final = "*"
+    # Lazy-load room membership events (include room membership events for any event
+    # `sender` in the timeline). We only give special meaning to this value when it's a
+    # `state_key`.
+    LAZY: Final = "$LAZY"
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _RoomMembershipForUser:
+    """
+    Attributes:
+        event_id: The event ID of the membership event
+        event_pos: The stream position of the membership event
+        membership: The membership state of the user in the room
+        sender: The person who sent the membership event
+        newly_joined: Whether the user newly joined the room during the given token
+            range
+    """
+
+    event_id: Optional[str]
+    event_pos: PersistedEventPosition
+    membership: str
+    sender: Optional[str]
+    newly_joined: bool
+
+    def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser":
+        return attr.evolve(self, **kwds)
 
 
 class SlidingSyncHandler:
@@ -90,6 +303,7 @@ class SlidingSyncHandler:
         self.auth_blocking = hs.get_auth_blocking()
         self.notifier = hs.get_notifier()
         self.event_sources = hs.get_event_sources()
+        self.relations_handler = hs.get_relations_handler()
         self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
 
     async def wait_for_sync_for_user(
@@ -201,6 +415,9 @@ class SlidingSyncHandler:
 
         # Assemble sliding window lists
         lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
+        # Keep track of the rooms that we're going to display and need to fetch more
+        # info about
+        relevant_room_map: Dict[str, RoomSyncConfig] = {}
         if sync_config.lists:
             # Get all of the room IDs that the user should be able to see in the sync
             # response
@@ -218,23 +435,73 @@ class SlidingSyncHandler:
                         sync_config.user, sync_room_map, list_config.filters, to_token
                     )
 
+                # Sort the list
                 sorted_room_info = await self.sort_rooms(
                     filtered_sync_room_map, to_token
                 )
 
+                # Find which rooms are partially stated and may need to be filtered out
+                # depending on the `required_state` requested (see below).
+                partial_state_room_map = await self.store.is_partial_state_room_batched(
+                    filtered_sync_room_map.keys()
+                )
+
+                # Since creating the `RoomSyncConfig` takes some work, let's just do it
+                # once and make a copy whenever we need it.
+                room_sync_config = RoomSyncConfig.from_room_config(list_config)
+                membership_state_keys = room_sync_config.required_state_map.get(
+                    EventTypes.Member
+                )
+                lazy_loading = (
+                    membership_state_keys is not None
+                    and len(membership_state_keys) == 1
+                    and StateValues.LAZY in membership_state_keys
+                )
+
                 ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
                 if list_config.ranges:
                     for range in list_config.ranges:
+                        room_ids_in_list: List[str] = []
+
+                        # We're going to loop through the sorted list of rooms starting
+                        # at the range start index and keep adding rooms until we fill
+                        # up the range or run out of rooms.
+                        #
+                        # Both sides of range are inclusive so we `+ 1`
+                        max_num_rooms = range[1] - range[0] + 1
+                        for room_id, _ in sorted_room_info[range[0] :]:
+                            if len(room_ids_in_list) >= max_num_rooms:
+                                break
+
+                            # Exclude partially-stated rooms unless the `required_state`
+                            # only has `["m.room.member", "$LAZY"]` for membership
+                            # (lazy-loading room members).
+                            if partial_state_room_map.get(room_id) and not lazy_loading:
+                                continue
+
+                            # Take the superset of the `RoomSyncConfig` for each room.
+                            #
+                            # Update our `relevant_room_map` with the room we're going
+                            # to display and need to fetch more info about.
+                            existing_room_sync_config = relevant_room_map.get(room_id)
+                            if existing_room_sync_config is not None:
+                                existing_room_sync_config.combine_room_sync_config(
+                                    room_sync_config
+                                )
+                            else:
+                                # Make a copy so if we modify it later, it doesn't
+                                # affect all references.
+                                relevant_room_map[room_id] = (
+                                    room_sync_config.deep_copy()
+                                )
+
+                            room_ids_in_list.append(room_id)
+
                         ops.append(
                             SlidingSyncResult.SlidingWindowList.Operation(
                                 op=OperationType.SYNC,
                                 range=range,
-                                room_ids=[
-                                    room_id
-                                    for room_id, _ in sorted_room_info[
-                                        range[0] : range[1]
-                                    ]
-                                ],
+                                room_ids=room_ids_in_list,
                             )
                         )
 
@@ -243,11 +510,26 @@ class SlidingSyncHandler:
                     ops=ops,
                 )
 
+        # TODO: if (sync_config.room_subscriptions):
+
+        # Fetch room data
+        rooms: Dict[str, SlidingSyncResult.RoomResult] = {}
+        for room_id, room_sync_config in relevant_room_map.items():
+            room_sync_result = await self.get_room_sync_data(
+                user=sync_config.user,
+                room_id=room_id,
+                room_sync_config=room_sync_config,
+                rooms_membership_for_user_at_to_token=sync_room_map[room_id],
+                from_token=from_token,
+                to_token=to_token,
+            )
+
+            rooms[room_id] = room_sync_result
+
         return SlidingSyncResult(
             next_pos=to_token,
             lists=lists,
-            # TODO: Gather room data for rooms in lists and `sync_config.room_subscriptions`
-            rooms={},
+            rooms=rooms,
             extensions={},
         )
 
@@ -256,7 +538,7 @@ class SlidingSyncHandler:
         user: UserID,
         to_token: StreamToken,
         from_token: Optional[StreamToken] = None,
-    ) -> Dict[str, RoomsForUser]:
+    ) -> Dict[str, _RoomMembershipForUser]:
         """
         Fetch room IDs that should be listed for this user in the sync response (the
         full room list that will be filtered, sorted, and sliced).
@@ -305,13 +587,17 @@ class SlidingSyncHandler:
 
         # Our working list of rooms that can show up in the sync response
         sync_room_id_set = {
-            room_for_user.room_id: room_for_user
-            for room_for_user in room_for_user_list
-            if filter_membership_for_sync(
+            # Note: The `room_for_user` we're assigning here will need to be fixed up
+            # (below) because they are potentially from the current snapshot time
+            # instead from the time of the `to_token`.
+            room_for_user.room_id: _RoomMembershipForUser(
+                event_id=room_for_user.event_id,
+                event_pos=room_for_user.event_pos,
                 membership=room_for_user.membership,
-                user_id=user_id,
                 sender=room_for_user.sender,
+                newly_joined=False,
             )
+            for room_for_user in room_for_user_list
         }
 
         # Get the `RoomStreamToken` that represents the spot we queried up to when we got
@@ -346,14 +632,9 @@ class SlidingSyncHandler:
         #
         # - 1a) Remove rooms that the user joined after the `to_token`
         # - 1b) Add back rooms that the user left after the `to_token`
+        # - 1c) Update room membership events to the point in time of the `to_token`
         # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`)
-        #
-        # Below, we're doing two separate lookups for membership changes. We could
-        # request everything for both fixups in one range, [`from_token.room_key`,
-        # `membership_snapshot_token`), but we want to avoid raw `stream_ordering`
-        # comparison without `instance_name` (which is flawed). We could refactor
-        # `event.internal_metadata` to include `instance_name` but it might turn out a
-        # little difficult and a bigger, broader Synapse change than we want to make.
+        # - 3) Figure out which rooms are `newly_joined`
 
         # 1) -----------------------------------------------------
 
@@ -363,159 +644,198 @@ class SlidingSyncHandler:
         # If our `to_token` is already the same or ahead of the latest room membership
         # for the user, we don't need to do any "2)" fix-ups and can just straight-up
         # use the room list from the snapshot as a base (nothing has changed)
-        membership_change_events_after_to_token = []
+        current_state_delta_membership_changes_after_to_token = []
         if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
-            membership_change_events_after_to_token = (
-                await self.store.get_membership_changes_for_user(
+            current_state_delta_membership_changes_after_to_token = (
+                await self.store.get_current_state_delta_membership_changes_for_user(
                     user_id,
                     from_key=to_token.room_key,
                     to_key=membership_snapshot_token,
-                    excluded_rooms=self.rooms_to_exclude_globally,
+                    excluded_room_ids=self.rooms_to_exclude_globally,
                 )
             )
 
-        # 1) Assemble a list of the last membership events in some given ranges. Someone
-        # could have left and joined multiple times during the given range but we only
-        # care about end-result so we grab the last one.
-        last_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
-        # We also need the first membership event after the `to_token` so we can step
-        # backward to the previous membership that would apply to the from/to range.
-        first_membership_change_by_room_id_after_to_token: Dict[str, EventBase] = {}
-        for event in membership_change_events_after_to_token:
-            last_membership_change_by_room_id_after_to_token[event.room_id] = event
+        # 1) Assemble a list of the first membership event after the `to_token` so we can
+        # step backward to the previous membership that would apply to the from/to
+        # range.
+        first_membership_change_by_room_id_after_to_token: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        for membership_change in current_state_delta_membership_changes_after_to_token:
             # Only set if we haven't already set it
             first_membership_change_by_room_id_after_to_token.setdefault(
-                event.room_id, event
+                membership_change.room_id, membership_change
             )
 
         # 1) Fixup
+        #
+        # Since we fetched a snapshot of the users room list at some point in time after
+        # the from/to tokens, we need to revert/rewind some membership changes to match
+        # the point in time of the `to_token`.
         for (
-            last_membership_change_after_to_token
-        ) in last_membership_change_by_room_id_after_to_token.values():
-            room_id = last_membership_change_after_to_token.room_id
-
-            # We want to find the first membership change after the `to_token` then step
-            # backward to know the membership in the from/to range.
-            first_membership_change_after_to_token = (
-                first_membership_change_by_room_id_after_to_token.get(room_id)
-            )
-            assert first_membership_change_after_to_token is not None, (
-                "If there was a `last_membership_change_after_to_token` that we're iterating over, "
-                + "then there should be corresponding a first change. For example, even if there "
-                + "is only one event after the `to_token`, the first and last event will be same event. "
-                + "This is probably a mistake in assembling the `last_membership_change_by_room_id_after_to_token`"
-                + "/`first_membership_change_by_room_id_after_to_token` dicts above."
-            )
-            # TODO: Instead of reading from `unsigned`, refactor this to use the
-            # `current_state_delta_stream` table in the future. Probably a new
-            # `get_membership_changes_for_user()` function that uses
-            # `current_state_delta_stream` with a join to `room_memberships`. This would
-            # help in state reset scenarios since `prev_content` is looking at the
-            # current branch vs the current room state. This is all just data given to
-            # the client so no real harm to data integrity, but we'd like to be nice to
-            # the client. Since the `current_state_delta_stream` table is new, it
-            # doesn't have all events in it. Since this is Sliding Sync, if we ever need
-            # to, we can signal the client to throw all of their state away by sending
-            # "operation: RESET".
-            prev_content = first_membership_change_after_to_token.unsigned.get(
-                "prev_content", {}
-            )
-            prev_membership = prev_content.get("membership", None)
-            prev_sender = first_membership_change_after_to_token.unsigned.get(
-                "prev_sender", None
-            )
-
-            # Check if the previous membership (membership that applies to the from/to
-            # range) should be included in our `sync_room_id_set`
-            should_prev_membership_be_included = (
-                prev_membership is not None
-                and prev_sender is not None
-                and filter_membership_for_sync(
-                    membership=prev_membership,
-                    user_id=user_id,
-                    sender=prev_sender,
-                )
-            )
-
-            # Check if the last membership (membership that applies to our snapshot) was
-            # already included in our `sync_room_id_set`
-            was_last_membership_already_included = filter_membership_for_sync(
-                membership=last_membership_change_after_to_token.membership,
+            room_id,
+            first_membership_change_after_to_token,
+        ) in first_membership_change_by_room_id_after_to_token.items():
+            # 1a) Remove rooms that the user joined after the `to_token`
+            if first_membership_change_after_to_token.prev_event_id is None:
+                sync_room_id_set.pop(room_id, None)
+            # 1b) 1c) From the first membership event after the `to_token`, step backward to the
+            # previous membership that would apply to the from/to range.
+            else:
+                # We don't expect these fields to be `None` if we have a `prev_event_id`
+                # but we're being defensive since it's possible that the prev event was
+                # culled from the database.
+                if (
+                    first_membership_change_after_to_token.prev_event_pos is not None
+                    and first_membership_change_after_to_token.prev_membership
+                    is not None
+                ):
+                    sync_room_id_set[room_id] = _RoomMembershipForUser(
+                        event_id=first_membership_change_after_to_token.prev_event_id,
+                        event_pos=first_membership_change_after_to_token.prev_event_pos,
+                        membership=first_membership_change_after_to_token.prev_membership,
+                        sender=first_membership_change_after_to_token.prev_sender,
+                        newly_joined=False,
+                    )
+                else:
+                    # If we can't find the previous membership event, we shouldn't
+                    # include the room in the sync response since we can't determine the
+                    # exact membership state and shouldn't rely on the current snapshot.
+                    sync_room_id_set.pop(room_id, None)
+
+        # Filter the rooms that that we have updated room membership events to the point
+        # in time of the `to_token` (from the "1)" fixups)
+        filtered_sync_room_id_set = {
+            room_id: room_membership_for_user
+            for room_id, room_membership_for_user in sync_room_id_set.items()
+            if filter_membership_for_sync(
+                membership=room_membership_for_user.membership,
                 user_id=user_id,
-                sender=last_membership_change_after_to_token.sender,
+                sender=room_membership_for_user.sender,
             )
-
-            # 1a) Add back rooms that the user left after the `to_token`
-            #
-            # For example, if the last membership event after the `to_token` is a leave
-            # event, then the room was excluded from `sync_room_id_set` when we first
-            # crafted it above. We should add these rooms back as long as the user also
-            # was part of the room before the `to_token`.
-            if (
-                not was_last_membership_already_included
-                and should_prev_membership_be_included
-            ):
-                sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
-                    last_membership_change_after_to_token
-                )
-            # 1b) Remove rooms that the user joined (hasn't left) after the `to_token`
-            #
-            # For example, if the last membership event after the `to_token` is a "join"
-            # event, then the room was included `sync_room_id_set` when we first crafted
-            # it above. We should remove these rooms as long as the user also wasn't
-            # part of the room before the `to_token`.
-            elif (
-                was_last_membership_already_included
-                and not should_prev_membership_be_included
-            ):
-                del sync_room_id_set[room_id]
+        }
 
         # 2) -----------------------------------------------------
         # We fix-up newly_left rooms after the first fixup because it may have removed
-        # some left rooms that we can figure out our newly_left in the following code
+        # some left rooms that we can figure out are newly_left in the following code
 
         # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token`
-        membership_change_events_in_from_to_range = []
+        current_state_delta_membership_changes_in_from_to_range = []
         if from_token:
-            membership_change_events_in_from_to_range = (
-                await self.store.get_membership_changes_for_user(
+            current_state_delta_membership_changes_in_from_to_range = (
+                await self.store.get_current_state_delta_membership_changes_for_user(
                     user_id,
                     from_key=from_token.room_key,
                     to_key=to_token.room_key,
-                    excluded_rooms=self.rooms_to_exclude_globally,
+                    excluded_room_ids=self.rooms_to_exclude_globally,
                 )
             )
 
         # 2) Assemble a list of the last membership events in some given ranges. Someone
         # could have left and joined multiple times during the given range but we only
         # care about end-result so we grab the last one.
-        last_membership_change_by_room_id_in_from_to_range: Dict[str, EventBase] = {}
-        for event in membership_change_events_in_from_to_range:
-            last_membership_change_by_room_id_in_from_to_range[event.room_id] = event
+        last_membership_change_by_room_id_in_from_to_range: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        # We also want to assemble a list of the first membership events during the token
+        # range so we can step backward to the previous membership that would apply to
+        # before the token range to see if we have `newly_joined` the room.
+        first_membership_change_by_room_id_in_from_to_range: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        # Keep track if the room has a non-join event in the token range so we can later
+        # tell if it was a `newly_joined` room. If the last membership event in the
+        # token range is a join and there is also some non-join in the range, we know
+        # they `newly_joined`.
+        has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {}
+        for (
+            membership_change
+        ) in current_state_delta_membership_changes_in_from_to_range:
+            room_id = membership_change.room_id
+
+            last_membership_change_by_room_id_in_from_to_range[room_id] = (
+                membership_change
+            )
+            # Only set if we haven't already set it
+            first_membership_change_by_room_id_in_from_to_range.setdefault(
+                room_id, membership_change
+            )
+
+            if membership_change.membership != Membership.JOIN:
+                has_non_join_event_by_room_id_in_from_to_range[room_id] = True
 
         # 2) Fixup
+        #
+        # 3) We also want to assemble a list of possibly newly joined rooms. Someone
+        # could have left and joined multiple times during the given range but we only
+        # care about whether they are joined at the end of the token range so we are
+        # working with the last membership even in the token range.
+        possibly_newly_joined_room_ids = set()
         for (
             last_membership_change_in_from_to_range
         ) in last_membership_change_by_room_id_in_from_to_range.values():
             room_id = last_membership_change_in_from_to_range.room_id
 
+            # 3)
+            if last_membership_change_in_from_to_range.membership == Membership.JOIN:
+                possibly_newly_joined_room_ids.add(room_id)
+
             # 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We
             # include newly_left rooms because the last event that the user should see
             # is their own leave event
             if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
-                sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
-                    last_membership_change_in_from_to_range
+                filtered_sync_room_id_set[room_id] = _RoomMembershipForUser(
+                    event_id=last_membership_change_in_from_to_range.event_id,
+                    event_pos=last_membership_change_in_from_to_range.event_pos,
+                    membership=last_membership_change_in_from_to_range.membership,
+                    sender=last_membership_change_in_from_to_range.sender,
+                    newly_joined=False,
                 )
 
-        return sync_room_id_set
+        # 3) Figure out `newly_joined`
+        for room_id in possibly_newly_joined_room_ids:
+            has_non_join_in_from_to_range = (
+                has_non_join_event_by_room_id_in_from_to_range.get(room_id, False)
+            )
+            # If the last membership event in the token range is a join and there is
+            # also some non-join in the range, we know they `newly_joined`.
+            if has_non_join_in_from_to_range:
+                # We found a `newly_joined` room (we left and joined within the token range)
+                filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
+                    room_id
+                ].copy_and_replace(newly_joined=True)
+            else:
+                prev_event_id = first_membership_change_by_room_id_in_from_to_range[
+                    room_id
+                ].prev_event_id
+                prev_membership = first_membership_change_by_room_id_in_from_to_range[
+                    room_id
+                ].prev_membership
+
+                if prev_event_id is None:
+                    # We found a `newly_joined` room (we are joining the room for the
+                    # first time within the token range)
+                    filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
+                        room_id
+                    ].copy_and_replace(newly_joined=True)
+                # Last resort, we need to step back to the previous membership event
+                # just before the token range to see if we're joined then or not.
+                elif prev_membership != Membership.JOIN:
+                    # We found a `newly_joined` room (we left before the token range
+                    # and joined within the token range)
+                    filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
+                        room_id
+                    ].copy_and_replace(newly_joined=True)
+
+        return filtered_sync_room_id_set
 
     async def filter_rooms(
         self,
         user: UserID,
-        sync_room_map: Dict[str, RoomsForUser],
+        sync_room_map: Dict[str, _RoomMembershipForUser],
         filters: SlidingSyncConfig.SlidingSyncList.Filters,
         to_token: StreamToken,
-    ) -> Dict[str, RoomsForUser]:
+    ) -> Dict[str, _RoomMembershipForUser]:
         """
         Filter rooms based on the sync request.
 
@@ -533,9 +853,6 @@ class SlidingSyncHandler:
         user_id = user.to_string()
 
         # TODO: Apply filters
-        #
-        # TODO: Exclude partially stated rooms unless the `required_state` has
-        # `["m.room.member", "$LAZY"]`
 
         filtered_room_id_set = set(sync_room_map.keys())
 
@@ -576,13 +893,19 @@ class SlidingSyncHandler:
         if filters.is_encrypted is not None:
             # Make a copy so we don't run into an error: `Set changed size during
             # iteration`, when we filter out and remove items
-            for room_id in list(filtered_room_id_set):
+            for room_id in filtered_room_id_set.copy():
                 state_at_to_token = await self.storage_controllers.state.get_state_at(
                     room_id,
                     to_token,
                     state_filter=StateFilter.from_types(
                         [(EventTypes.RoomEncryption, "")]
                     ),
+                    # Partially-stated rooms should have all state events except for the
+                    # membership events so we don't need to wait because we only care
+                    # about retrieving the `EventTypes.RoomEncryption` state event here.
+                    # Plus we don't want to block the whole sync waiting for this one
+                    # room.
+                    await_full_state=False,
                 )
                 is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, ""))
 
@@ -597,7 +920,7 @@ class SlidingSyncHandler:
         if filters.is_invite is not None:
             # Make a copy so we don't run into an error: `Set changed size during
             # iteration`, when we filter out and remove items
-            for room_id in list(filtered_room_id_set):
+            for room_id in filtered_room_id_set.copy():
                 room_for_user = sync_room_map[room_id]
                 # If we're looking for invite rooms, filter out rooms that the user is
                 # not invited to and vice versa
@@ -609,11 +932,26 @@ class SlidingSyncHandler:
                 ):
                     filtered_room_id_set.remove(room_id)
 
-        if filters.room_types:
-            raise NotImplementedError()
+        # Filter by room type (space vs room, etc). A room must match one of the types
+        # provided in the list. `None` is a valid type for rooms which do not have a
+        # room type.
+        if filters.room_types is not None or filters.not_room_types is not None:
+            # Make a copy so we don't run into an error: `Set changed size during
+            # iteration`, when we filter out and remove items
+            for room_id in filtered_room_id_set.copy():
+                create_event = await self.store.get_create_event_for_room(room_id)
+                room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
+                if (
+                    filters.room_types is not None
+                    and room_type not in filters.room_types
+                ):
+                    filtered_room_id_set.remove(room_id)
 
-        if filters.not_room_types:
-            raise NotImplementedError()
+                if (
+                    filters.not_room_types is not None
+                    and room_type in filters.not_room_types
+                ):
+                    filtered_room_id_set.remove(room_id)
 
         if filters.room_name_like:
             raise NotImplementedError()
@@ -629,9 +967,9 @@ class SlidingSyncHandler:
 
     async def sort_rooms(
         self,
-        sync_room_map: Dict[str, RoomsForUser],
+        sync_room_map: Dict[str, _RoomMembershipForUser],
         to_token: StreamToken,
-    ) -> List[Tuple[str, RoomsForUser]]:
+    ) -> List[Tuple[str, _RoomMembershipForUser]]:
         """
         Sort by `stream_ordering` of the last event that the user should see in the
         room. `stream_ordering` is unique so we get a stable sort.
@@ -678,3 +1016,354 @@ class SlidingSyncHandler:
             # We want descending order
             reverse=True,
         )
+
+    async def get_room_sync_data(
+        self,
+        user: UserID,
+        room_id: str,
+        room_sync_config: RoomSyncConfig,
+        rooms_membership_for_user_at_to_token: _RoomMembershipForUser,
+        from_token: Optional[StreamToken],
+        to_token: StreamToken,
+    ) -> SlidingSyncResult.RoomResult:
+        """
+        Fetch room data for the sync response.
+
+        We fetch data according to the token range (> `from_token` and <= `to_token`).
+
+        Args:
+            user: User to fetch data for
+            room_id: The room ID to fetch data for
+            room_sync_config: Config for what data we should fetch for a room in the
+                sync response.
+            rooms_membership_for_user_at_to_token: Membership information for the user
+                in the room at the time of `to_token`.
+            from_token: The point in the stream to sync from.
+            to_token: The point in the stream to sync up to.
+        """
+
+        # Assemble the list of timeline events
+        #
+        # FIXME: It would be nice to make the `rooms` response more uniform regardless of
+        # membership. Currently, we have to make all of these optional because
+        # `invite`/`knock` rooms only have `stripped_state`. See
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
+        timeline_events: Optional[List[EventBase]] = None
+        bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None
+        limited: Optional[bool] = None
+        prev_batch_token: Optional[StreamToken] = None
+        num_live: Optional[int] = None
+        if (
+            room_sync_config.timeline_limit > 0
+            # No timeline for invite/knock rooms (just `stripped_state`)
+            and rooms_membership_for_user_at_to_token.membership
+            not in (Membership.INVITE, Membership.KNOCK)
+        ):
+            limited = False
+            # We want to start off using the `to_token` (vs `from_token`) because we look
+            # backwards from the `to_token` up to the `timeline_limit` and we might not
+            # reach the `from_token` before we hit the limit. We will update the room stream
+            # position once we've fetched the events to point to the earliest event fetched.
+            prev_batch_token = to_token
+
+            # We're going to paginate backwards from the `to_token`
+            from_bound = to_token.room_key
+            # People shouldn't see past their leave/ban event
+            if rooms_membership_for_user_at_to_token.membership in (
+                Membership.LEAVE,
+                Membership.BAN,
+            ):
+                from_bound = (
+                    rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token()
+                )
+
+            # Determine whether we should limit the timeline to the token range.
+            #
+            # We should return historical messages (before token range) in the
+            # following cases because we want clients to be able to show a basic
+            # screen of information:
+            #  - Initial sync (because no `from_token` to limit us anyway)
+            #  - When users `newly_joined`
+            #  - TODO: For an incremental sync where we haven't sent it down this
+            #    connection before
+            to_bound = (
+                from_token.room_key
+                if from_token is not None
+                and not rooms_membership_for_user_at_to_token.newly_joined
+                else None
+            )
+
+            timeline_events, new_room_key = await self.store.paginate_room_events(
+                room_id=room_id,
+                from_key=from_bound,
+                to_key=to_bound,
+                direction=Direction.BACKWARDS,
+                # We add one so we can determine if there are enough events to saturate
+                # the limit or not (see `limited`)
+                limit=room_sync_config.timeline_limit + 1,
+                event_filter=None,
+            )
+
+            # We want to return the events in ascending order (the last event is the
+            # most recent).
+            timeline_events.reverse()
+
+            # Determine our `limited` status based on the timeline. We do this before
+            # filtering the events so we can accurately determine if there is more to
+            # paginate even if we filter out some/all events.
+            if len(timeline_events) > room_sync_config.timeline_limit:
+                limited = True
+                # Get rid of that extra "+ 1" event because we only used it to determine
+                # if we hit the limit or not
+                timeline_events = timeline_events[-room_sync_config.timeline_limit :]
+                assert timeline_events[0].internal_metadata.stream_ordering
+                new_room_key = RoomStreamToken(
+                    stream=timeline_events[0].internal_metadata.stream_ordering - 1
+                )
+
+            # Make sure we don't expose any events that the client shouldn't see
+            timeline_events = await filter_events_for_client(
+                self.storage_controllers,
+                user.to_string(),
+                timeline_events,
+                is_peeking=rooms_membership_for_user_at_to_token.membership
+                != Membership.JOIN,
+                filter_send_to_client=True,
+            )
+            # TODO: Filter out `EventTypes.CallInvite` in public rooms,
+            # see https://github.com/element-hq/synapse/issues/17359
+
+            # TODO: Handle timeline gaps (`get_timeline_gaps()`)
+
+            # Determine how many "live" events we have (events within the given token range).
+            #
+            # This is mostly useful to determine whether a given @mention event should
+            # make a noise or not. Clients cannot rely solely on the absence of
+            # `initial: true` to determine live events because if a room not in the
+            # sliding window bumps into the window because of an @mention it will have
+            # `initial: true` yet contain a single live event (with potentially other
+            # old events in the timeline)
+            num_live = 0
+            if from_token is not None:
+                for timeline_event in reversed(timeline_events):
+                    # This fields should be present for all persisted events
+                    assert timeline_event.internal_metadata.stream_ordering is not None
+                    assert timeline_event.internal_metadata.instance_name is not None
+
+                    persisted_position = PersistedEventPosition(
+                        instance_name=timeline_event.internal_metadata.instance_name,
+                        stream=timeline_event.internal_metadata.stream_ordering,
+                    )
+                    if persisted_position.persisted_after(from_token.room_key):
+                        num_live += 1
+                    else:
+                        # Since we're iterating over the timeline events in
+                        # reverse-chronological order, we can break once we hit an event
+                        # that's not live. In the future, we could potentially optimize
+                        # this more with a binary search (bisect).
+                        break
+
+            # If the timeline is `limited=True`, the client does not have all events
+            # necessary to calculate aggregations themselves.
+            if limited:
+                bundled_aggregations = (
+                    await self.relations_handler.get_bundled_aggregations(
+                        timeline_events, user.to_string()
+                    )
+                )
+
+            # Update the `prev_batch_token` to point to the position that allows us to
+            # keep paginating backwards from the oldest event we return in the timeline.
+            prev_batch_token = prev_batch_token.copy_and_replace(
+                StreamKeyType.ROOM, new_room_key
+            )
+
+        # Figure out any stripped state events for invite/knocks. This allows the
+        # potential joiner to identify the room.
+        stripped_state: List[JsonDict] = []
+        if rooms_membership_for_user_at_to_token.membership in (
+            Membership.INVITE,
+            Membership.KNOCK,
+        ):
+            # This should never happen. If someone is invited/knocked on room, then
+            # there should be an event for it.
+            assert rooms_membership_for_user_at_to_token.event_id is not None
+
+            invite_or_knock_event = await self.store.get_event(
+                rooms_membership_for_user_at_to_token.event_id
+            )
+
+            stripped_state = []
+            if invite_or_knock_event.membership == Membership.INVITE:
+                stripped_state.extend(
+                    invite_or_knock_event.unsigned.get("invite_room_state", [])
+                )
+            elif invite_or_knock_event.membership == Membership.KNOCK:
+                stripped_state.extend(
+                    invite_or_knock_event.unsigned.get("knock_room_state", [])
+                )
+
+            stripped_state.append(strip_event(invite_or_knock_event))
+
+        # TODO: Handle state resets. For example, if we see
+        # `rooms_membership_for_user_at_to_token.membership = Membership.LEAVE` but
+        # `required_state` doesn't include it, we should indicate to the client that a
+        # state reset happened. Perhaps we should indicate this by setting `initial:
+        # True` and empty `required_state`.
+
+        # TODO: Since we can't determine whether we've already sent a room down this
+        # Sliding Sync connection before (we plan to add this optimization in the
+        # future), we're always returning the requested room state instead of
+        # updates.
+        initial = True
+
+        # Fetch the required state for the room
+        #
+        # No `required_state` for invite/knock rooms (just `stripped_state`)
+        #
+        # FIXME: It would be nice to make the `rooms` response more uniform regardless
+        # of membership. Currently, we have to make this optional because
+        # `invite`/`knock` rooms only have `stripped_state`. See
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
+        room_state: Optional[StateMap[EventBase]] = None
+        if rooms_membership_for_user_at_to_token.membership not in (
+            Membership.INVITE,
+            Membership.KNOCK,
+        ):
+            # Calculate the `StateFilter` based on the `required_state` for the room
+            state_filter: Optional[StateFilter] = StateFilter.none()
+            # If we have a double wildcard ("*", "*") in the `required_state`, we need
+            # to fetch all state for the room
+            #
+            # Note: MSC3575 describes different behavior to how we're handling things
+            # here but since it's not wrong to return more state than requested
+            # (`required_state` is just the minimum requested), it doesn't matter if we
+            # include more than client wanted. This complexity is also under scrutiny,
+            # see
+            # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050
+            #
+            # > One unique exception is when you request all state events via ["*", "*"]. When used,
+            # > all state events are returned by default, and additional entries FILTER OUT the returned set
+            # > of state events. These additional entries cannot use '*' themselves.
+            # > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member
+            # > event _except_ for @alice:example.com, and include every other state event.
+            # > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not
+            # > required as it would have been returned anyway.
+            # >
+            # > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575)
+            if StateValues.WILDCARD in room_sync_config.required_state_map.get(
+                StateValues.WILDCARD, set()
+            ):
+                state_filter = StateFilter.all()
+            # TODO: `StateFilter` currently doesn't support wildcard event types. We're
+            # currently working around this by returning all state to the client but it
+            # would be nice to fetch less from the database and return just what the
+            # client wanted.
+            elif (
+                room_sync_config.required_state_map.get(StateValues.WILDCARD)
+                is not None
+            ):
+                state_filter = StateFilter.all()
+            else:
+                required_state_types: List[Tuple[str, Optional[str]]] = []
+                for (
+                    state_type,
+                    state_key_set,
+                ) in room_sync_config.required_state_map.items():
+                    for state_key in state_key_set:
+                        if state_key == StateValues.WILDCARD:
+                            # `None` is a wildcard in the `StateFilter`
+                            required_state_types.append((state_type, None))
+                        # We need to fetch all relevant people when we're lazy-loading membership
+                        elif (
+                            state_type == EventTypes.Member
+                            and state_key == StateValues.LAZY
+                        ):
+                            # Everyone in the timeline is relevant
+                            timeline_membership: Set[str] = set()
+                            if timeline_events is not None:
+                                for timeline_event in timeline_events:
+                                    timeline_membership.add(timeline_event.sender)
+
+                            for user_id in timeline_membership:
+                                required_state_types.append(
+                                    (EventTypes.Member, user_id)
+                                )
+
+                            # FIXME: We probably also care about invite, ban, kick, targets, etc
+                            # but the spec only mentions "senders".
+                        else:
+                            required_state_types.append((state_type, state_key))
+
+                state_filter = StateFilter.from_types(required_state_types)
+
+            # We can skip fetching state if we don't need any
+            if state_filter != StateFilter.none():
+                # We can return all of the state that was requested if we're doing an
+                # initial sync
+                if initial:
+                    # People shouldn't see past their leave/ban event
+                    if rooms_membership_for_user_at_to_token.membership in (
+                        Membership.LEAVE,
+                        Membership.BAN,
+                    ):
+                        room_state = await self.storage_controllers.state.get_state_at(
+                            room_id,
+                            stream_position=to_token.copy_and_replace(
+                                StreamKeyType.ROOM,
+                                rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token(),
+                            ),
+                            state_filter=state_filter,
+                            # Partially-stated rooms should have all state events except for
+                            # the membership events and since we've already excluded
+                            # partially-stated rooms unless `required_state` only has
+                            # `["m.room.member", "$LAZY"]` for membership, we should be able
+                            # to retrieve everything requested. Plus we don't want to block
+                            # the whole sync waiting for this one room.
+                            await_full_state=False,
+                        )
+                    # Otherwise, we can get the latest current state in the room
+                    else:
+                        room_state = await self.storage_controllers.state.get_current_state(
+                            room_id,
+                            state_filter,
+                            # Partially-stated rooms should have all state events except for
+                            # the membership events and since we've already excluded
+                            # partially-stated rooms unless `required_state` only has
+                            # `["m.room.member", "$LAZY"]` for membership, we should be able
+                            # to retrieve everything requested. Plus we don't want to block
+                            # the whole sync waiting for this one room.
+                            await_full_state=False,
+                        )
+                        # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token`
+                else:
+                    # TODO: Once we can figure out if we've sent a room down this connection before,
+                    # we can return updates instead of the full required state.
+                    raise NotImplementedError()
+
+        return SlidingSyncResult.RoomResult(
+            # TODO: Dummy value
+            name=None,
+            # TODO: Dummy value
+            avatar=None,
+            # TODO: Dummy value
+            heroes=None,
+            # TODO: Dummy value
+            is_dm=False,
+            initial=initial,
+            required_state=list(room_state.values()) if room_state else None,
+            timeline_events=timeline_events,
+            bundled_aggregations=bundled_aggregations,
+            stripped_state=stripped_state,
+            prev_batch=prev_batch_token,
+            limited=limited,
+            num_live=num_live,
+            # TODO: Dummy values
+            joined_count=0,
+            invited_count=0,
+            # TODO: These are just dummy values. We could potentially just remove these
+            # since notifications can only really be done correctly on the client anyway
+            # (encrypted rooms).
+            notification_count=0,
+            highlight_count=0,
+        )
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index e2563428d2..de227faec3 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1352,7 +1352,7 @@ class SyncHandler:
             await_full_state = True
             lazy_load_members = False
 
-        state_at_timeline_end = await self._state_storage_controller.get_state_at(
+        state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
             room_id,
             stream_position=end_token,
             state_filter=state_filter,
@@ -1480,11 +1480,13 @@ class SyncHandler:
         else:
             # We can get here if the user has ignored the senders of all
             # the recent events.
-            state_at_timeline_start = await self._state_storage_controller.get_state_at(
-                room_id,
-                stream_position=end_token,
-                state_filter=state_filter,
-                await_full_state=await_full_state,
+            state_at_timeline_start = (
+                await self._state_storage_controller.get_state_ids_at(
+                    room_id,
+                    stream_position=end_token,
+                    state_filter=state_filter,
+                    await_full_state=await_full_state,
+                )
             )
 
         if batch.limited:
@@ -1502,14 +1504,14 @@ class SyncHandler:
             # about them).
             state_filter = StateFilter.all()
 
-        state_at_previous_sync = await self._state_storage_controller.get_state_at(
+        state_at_previous_sync = await self._state_storage_controller.get_state_ids_at(
             room_id,
             stream_position=since_token,
             state_filter=state_filter,
             await_full_state=await_full_state,
         )
 
-        state_at_timeline_end = await self._state_storage_controller.get_state_at(
+        state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
             room_id,
             stream_position=end_token,
             state_filter=state_filter,
@@ -2508,7 +2510,7 @@ class SyncHandler:
                 continue
 
             if room_id in sync_result_builder.joined_room_ids or has_join:
-                old_state_ids = await self._state_storage_controller.get_state_at(
+                old_state_ids = await self._state_storage_controller.get_state_ids_at(
                     room_id,
                     since_token,
                     state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
@@ -2539,7 +2541,7 @@ class SyncHandler:
                 else:
                     if not old_state_ids:
                         old_state_ids = (
-                            await self._state_storage_controller.get_state_at(
+                            await self._state_storage_controller.get_state_ids_at(
                                 room_id,
                                 since_token,
                                 state_filter=StateFilter.from_types(
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 4718517c97..56ad28eabf 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -35,6 +35,8 @@ from typing import (
     Union,
 )
 
+import attr
+import multipart
 import treq
 from canonicaljson import encode_canonical_json
 from netaddr import AddrFormatError, IPAddress, IPSet
@@ -1006,6 +1008,130 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
         self._maybe_fail()
 
 
+@attr.s(auto_attribs=True, slots=True)
+class MultipartResponse:
+    """
+    A small class to hold parsed values of a multipart response.
+    """
+
+    json: bytes = b"{}"
+    length: Optional[int] = None
+    content_type: Optional[bytes] = None
+    disposition: Optional[bytes] = None
+    url: Optional[bytes] = None
+
+
+class _MultipartParserProtocol(protocol.Protocol):
+    """
+    Protocol to read and parse a MSC3916 multipart/mixed response
+    """
+
+    transport: Optional[ITCPTransport] = None
+
+    def __init__(
+        self,
+        stream: ByteWriteable,
+        deferred: defer.Deferred,
+        boundary: str,
+        max_length: Optional[int],
+    ) -> None:
+        self.stream = stream
+        self.deferred = deferred
+        self.boundary = boundary
+        self.max_length = max_length
+        self.parser = None
+        self.multipart_response = MultipartResponse()
+        self.has_redirect = False
+        self.in_json = False
+        self.json_done = False
+        self.file_length = 0
+        self.total_length = 0
+        self.in_disposition = False
+        self.in_content_type = False
+
+    def dataReceived(self, incoming_data: bytes) -> None:
+        if self.deferred.called:
+            return
+
+        # we don't have a parser yet, instantiate it
+        if not self.parser:
+
+            def on_header_field(data: bytes, start: int, end: int) -> None:
+                if data[start:end] == b"Location":
+                    self.has_redirect = True
+                if data[start:end] == b"Content-Disposition":
+                    self.in_disposition = True
+                if data[start:end] == b"Content-Type":
+                    self.in_content_type = True
+
+            def on_header_value(data: bytes, start: int, end: int) -> None:
+                # the first header should be content-type for application/json
+                if not self.in_json and not self.json_done:
+                    assert data[start:end] == b"application/json"
+                    self.in_json = True
+                elif self.has_redirect:
+                    self.multipart_response.url = data[start:end]
+                elif self.in_content_type:
+                    self.multipart_response.content_type = data[start:end]
+                    self.in_content_type = False
+                elif self.in_disposition:
+                    self.multipart_response.disposition = data[start:end]
+                    self.in_disposition = False
+
+            def on_part_data(data: bytes, start: int, end: int) -> None:
+                # we've seen json header but haven't written the json data
+                if self.in_json and not self.json_done:
+                    self.multipart_response.json = data[start:end]
+                    self.json_done = True
+                # we have a redirect header rather than a file, and have already captured it
+                elif self.has_redirect:
+                    return
+                # otherwise we are in the file part
+                else:
+                    logger.info("Writing multipart file data to stream")
+                    try:
+                        self.stream.write(data[start:end])
+                    except Exception as e:
+                        logger.warning(
+                            f"Exception encountered writing file data to stream: {e}"
+                        )
+                        self.deferred.errback()
+                    self.file_length += end - start
+
+            callbacks = {
+                "on_header_field": on_header_field,
+                "on_header_value": on_header_value,
+                "on_part_data": on_part_data,
+            }
+            self.parser = multipart.MultipartParser(self.boundary, callbacks)
+
+        self.total_length += len(incoming_data)
+        if self.max_length is not None and self.total_length >= self.max_length:
+            self.deferred.errback(BodyExceededMaxSize())
+            # Close the connection (forcefully) since all the data will get
+            # discarded anyway.
+            assert self.transport is not None
+            self.transport.abortConnection()
+
+        try:
+            self.parser.write(incoming_data)  # type: ignore[attr-defined]
+        except Exception as e:
+            logger.warning(f"Exception writing to multipart parser: {e}")
+            self.deferred.errback()
+            return
+
+    def connectionLost(self, reason: Failure = connectionDone) -> None:
+        # If the maximum size was already exceeded, there's nothing to do.
+        if self.deferred.called:
+            return
+
+        if reason.check(ResponseDone):
+            self.multipart_response.length = self.file_length
+            self.deferred.callback(self.multipart_response)
+        else:
+            self.deferred.errback(reason)
+
+
 class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
     """A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
 
@@ -1091,6 +1217,32 @@ def read_body_with_max_size(
     return d
 
 
+def read_multipart_response(
+    response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
+) -> "defer.Deferred[MultipartResponse]":
+    """
+    Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
+    the stream passed in and returning a deferred resolving to a MultipartResponse
+
+    Args:
+        response: The HTTP response to read from.
+        stream: The file-object to write to.
+        boundary: the multipart/mixed boundary string
+        max_length: maximum allowable length of the response
+    """
+    d: defer.Deferred[MultipartResponse] = defer.Deferred()
+
+    # If the Content-Length header gives a size larger than the maximum allowed
+    # size, do not bother downloading the body.
+    if max_length is not None and response.length != UNKNOWN_LENGTH:
+        if response.length > max_length:
+            response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
+            return d
+
+    response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
+    return d
+
+
 def encode_query_args(args: Optional[QueryParams]) -> bytes:
     """
     Encodes a map of query arguments to bytes which can be appended to a URL.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 104b803b0f..749b01dd0e 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -75,9 +75,11 @@ from synapse.http.client import (
     BlocklistingAgentWrapper,
     BodyExceededMaxSize,
     ByteWriteable,
+    SimpleHttpClient,
     _make_scheduler,
     encode_query_args,
     read_body_with_max_size,
+    read_multipart_response,
 )
 from synapse.http.connectproxyclient import BearerProxyCredentials
 from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
@@ -466,6 +468,13 @@ class MatrixFederationHttpClient:
 
         self._sleeper = AwakenableSleeper(self.reactor)
 
+        self._simple_http_client = SimpleHttpClient(
+            hs,
+            ip_blocklist=hs.config.server.federation_ip_range_blocklist,
+            ip_allowlist=hs.config.server.federation_ip_range_allowlist,
+            use_proxy=True,
+        )
+
     def wake_destination(self, destination: str) -> None:
         """Called when the remote server may have come back online."""
 
@@ -1553,6 +1562,189 @@ class MatrixFederationHttpClient:
         )
         return length, headers
 
+    async def federation_get_file(
+        self,
+        destination: str,
+        path: str,
+        output_stream: BinaryIO,
+        download_ratelimiter: Ratelimiter,
+        ip_address: str,
+        max_size: int,
+        args: Optional[QueryParams] = None,
+        retry_on_dns_fail: bool = True,
+        ignore_backoff: bool = False,
+    ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
+        """GETs a file from a given homeserver over the federation /download endpoint
+        Args:
+            destination: The remote server to send the HTTP request to.
+            path: The HTTP path to GET.
+            output_stream: File to write the response body to.
+            download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
+                requester IP
+            ip_address: IP address of the requester
+            max_size: maximum allowable size in bytes of the file
+            args: Optional dictionary used to create the query string.
+            ignore_backoff: true to ignore the historical backoff data
+                and try the request anyway.
+
+        Returns:
+            Resolves to an (int, dict, bytes) tuple of
+            the file length, a dict of the response headers, and the file json
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
+            SynapseError: If the requested file exceeds ratelimits or the response from the
+            remote server is not a multipart response
+            AssertionError: if the resolved multipart response's length is None
+        """
+        request = MatrixFederationRequest(
+            method="GET", destination=destination, path=path, query=args
+        )
+
+        # check for a minimum balance of 1MiB in ratelimiter before initiating request
+        send_req, _ = await download_ratelimiter.can_do_action(
+            requester=None, key=ip_address, n_actions=1048576, update=False
+        )
+
+        if not send_req:
+            msg = "Requested file size exceeds ratelimits"
+            logger.warning(
+                "{%s} [%s] %s",
+                request.txn_id,
+                request.destination,
+                msg,
+            )
+            raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+
+        response = await self._send_request(
+            request,
+            retry_on_dns_fail=retry_on_dns_fail,
+            ignore_backoff=ignore_backoff,
+        )
+
+        headers = dict(response.headers.getAllRawHeaders())
+
+        expected_size = response.length
+        # if we don't get an expected length then use the max length
+        if expected_size == UNKNOWN_LENGTH:
+            expected_size = max_size
+            logger.debug(
+                f"File size unknown, assuming file is max allowable size: {max_size}"
+            )
+
+        read_body, _ = await download_ratelimiter.can_do_action(
+            requester=None,
+            key=ip_address,
+            n_actions=expected_size,
+        )
+        if not read_body:
+            msg = "Requested file size exceeds ratelimits"
+            logger.warning(
+                "{%s} [%s] %s",
+                request.txn_id,
+                request.destination,
+                msg,
+            )
+            raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+
+        # this should be a multipart/mixed response with the boundary string in the header
+        try:
+            raw_content_type = headers.get(b"Content-Type")
+            assert raw_content_type is not None
+            content_type = raw_content_type[0].decode("UTF-8")
+            content_type_parts = content_type.split("boundary=")
+            boundary = content_type_parts[1]
+        except Exception:
+            msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
+            logger.warning(
+                "{%s} [%s] %s",
+                request.txn_id,
+                request.destination,
+                msg,
+            )
+            raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
+
+        try:
+            # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
+            deferred = read_multipart_response(
+                response, output_stream, boundary, expected_size + 1
+            )
+            deferred.addTimeout(self.default_timeout_seconds, self.reactor)
+        except BodyExceededMaxSize:
+            msg = "Requested file is too large > %r bytes" % (expected_size,)
+            logger.warning(
+                "{%s} [%s] %s",
+                request.txn_id,
+                request.destination,
+                msg,
+            )
+            raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
+        except defer.TimeoutError as e:
+            logger.warning(
+                "{%s} [%s] Timed out reading response - %s %s",
+                request.txn_id,
+                request.destination,
+                request.method,
+                request.uri.decode("ascii"),
+            )
+            raise RequestSendFailed(e, can_retry=True) from e
+        except ResponseFailed as e:
+            logger.warning(
+                "{%s} [%s] Failed to read response - %s %s",
+                request.txn_id,
+                request.destination,
+                request.method,
+                request.uri.decode("ascii"),
+            )
+            raise RequestSendFailed(e, can_retry=True) from e
+        except Exception as e:
+            logger.warning(
+                "{%s} [%s] Error reading response: %s",
+                request.txn_id,
+                request.destination,
+                e,
+            )
+            raise
+
+        multipart_response = await make_deferred_yieldable(deferred)
+        if not multipart_response.url:
+            assert multipart_response.length is not None
+            length = multipart_response.length
+            headers[b"Content-Type"] = [multipart_response.content_type]
+            headers[b"Content-Disposition"] = [multipart_response.disposition]
+
+        # the response contained a redirect url to download the file from
+        else:
+            str_url = multipart_response.url.decode("utf-8")
+            logger.info(
+                "{%s} [%s] File download redirected, now downloading from: %s",
+                request.txn_id,
+                request.destination,
+                str_url,
+            )
+            length, headers, _, _ = await self._simple_http_client.get_file(
+                str_url, output_stream, expected_size
+            )
+
+        logger.info(
+            "{%s} [%s] Completed: %d %s [%d bytes] %s %s",
+            request.txn_id,
+            request.destination,
+            response.code,
+            response.phrase.decode("ascii", errors="replace"),
+            length,
+            request.method,
+            request.uri.decode("ascii"),
+        )
+        return length, headers, multipart_response.json
+
 
 def _flatten_response_never_received(e: BaseException) -> str:
     if hasattr(e, "reasons"):
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 7ad0b7c3cf..1b268ce4d4 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -221,6 +221,7 @@ def add_file_headers(
     # select private. don't bother setting Expires as all our
     # clients are smart enough to be happy with Cache-Control
     request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
+
     if file_size is not None:
         request.setHeader(b"Content-Length", b"%d" % (file_size,))
 
@@ -302,12 +303,37 @@ async def respond_with_multipart_responder(
             )
             return
 
+        if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
+            disposition = "inline"
+        else:
+            disposition = "attachment"
+
+        def _quote(x: str) -> str:
+            return urllib.parse.quote(x.encode("utf-8"))
+
+        if media_info.upload_name:
+            if _can_encode_filename_as_token(media_info.upload_name):
+                disposition = "%s; filename=%s" % (
+                    disposition,
+                    media_info.upload_name,
+                )
+            else:
+                disposition = "%s; filename*=utf-8''%s" % (
+                    disposition,
+                    _quote(media_info.upload_name),
+                )
+
         from synapse.media.media_storage import MultipartFileConsumer
 
         # note that currently the json_object is just {}, this will change when linked media
         # is implemented
         multipart_consumer = MultipartFileConsumer(
-            clock, request, media_info.media_type, {}, media_info.media_length
+            clock,
+            request,
+            media_info.media_type,
+            {},
+            disposition,
+            media_info.media_length,
         )
 
         logger.debug("Responding to media request with responder %s", responder)
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 1436329fad..87c929eb20 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -480,6 +480,7 @@ class MediaRepository:
         name: Optional[str],
         max_timeout_ms: int,
         ip_address: str,
+        use_federation_endpoint: bool,
     ) -> None:
         """Respond to requests for remote media.
 
@@ -492,6 +493,8 @@ class MediaRepository:
             max_timeout_ms: the maximum number of milliseconds to wait for the
                 media to be uploaded.
             ip_address: the IP address of the requester
+            use_federation_endpoint: whether to request the remote media over the new
+                federation `/download` endpoint
 
         Returns:
             Resolves once a response has successfully been written to request
@@ -522,6 +525,7 @@ class MediaRepository:
                 max_timeout_ms,
                 self.download_ratelimiter,
                 ip_address,
+                use_federation_endpoint,
             )
 
         # We deliberately stream the file outside the lock
@@ -538,7 +542,12 @@ class MediaRepository:
             respond_404(request)
 
     async def get_remote_media_info(
-        self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
+        self,
+        server_name: str,
+        media_id: str,
+        max_timeout_ms: int,
+        ip_address: str,
+        use_federation: bool,
     ) -> RemoteMedia:
         """Gets the media info associated with the remote file, downloading
         if necessary.
@@ -549,6 +558,8 @@ class MediaRepository:
             max_timeout_ms: the maximum number of milliseconds to wait for the
                 media to be uploaded.
             ip_address: IP address of the requester
+            use_federation: if a download is necessary, whether to request the remote file
+                over the federation `/download` endpoint
 
         Returns:
             The media info of the file
@@ -569,6 +580,7 @@ class MediaRepository:
                 max_timeout_ms,
                 self.download_ratelimiter,
                 ip_address,
+                use_federation,
             )
 
         # Ensure we actually use the responder so that it releases resources
@@ -585,6 +597,7 @@ class MediaRepository:
         max_timeout_ms: int,
         download_ratelimiter: Ratelimiter,
         ip_address: str,
+        use_federation_endpoint: bool,
     ) -> Tuple[Optional[Responder], RemoteMedia]:
         """Looks for media in local cache, if not there then attempt to
         download from remote server.
@@ -598,6 +611,8 @@ class MediaRepository:
             download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
                 requester IP.
             ip_address: the IP address of the requester
+            use_federation_endpoint: whether to request the remote media over the new federation
+            /download endpoint
 
         Returns:
             A tuple of responder and the media info of the file.
@@ -629,9 +644,23 @@ class MediaRepository:
         # Failed to find the file anywhere, lets download it.
 
         try:
-            media_info = await self._download_remote_file(
-                server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
-            )
+            if not use_federation_endpoint:
+                media_info = await self._download_remote_file(
+                    server_name,
+                    media_id,
+                    max_timeout_ms,
+                    download_ratelimiter,
+                    ip_address,
+                )
+            else:
+                media_info = await self._federation_download_remote_file(
+                    server_name,
+                    media_id,
+                    max_timeout_ms,
+                    download_ratelimiter,
+                    ip_address,
+                )
+
         except SynapseError:
             raise
         except Exception as e:
@@ -775,6 +804,129 @@ class MediaRepository:
             quarantined_by=None,
         )
 
+    async def _federation_download_remote_file(
+        self,
+        server_name: str,
+        media_id: str,
+        max_timeout_ms: int,
+        download_ratelimiter: Ratelimiter,
+        ip_address: str,
+    ) -> RemoteMedia:
+        """Attempt to download the remote file from the given server name.
+        Uses the given file_id as the local id and downloads the file over the federation
+        v1 download endpoint
+
+        Args:
+            server_name: Originating server
+            media_id: The media ID of the content (as defined by the
+                remote server). This is different than the file_id, which is
+                locally generated.
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
+            download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
+                requester IP
+            ip_address: the IP address of the requester
+
+        Returns:
+            The media info of the file.
+        """
+
+        file_id = random_string(24)
+
+        file_info = FileInfo(server_name=server_name, file_id=file_id)
+
+        async with self.media_storage.store_into_file(file_info) as (f, fname):
+            try:
+                res = await self.client.federation_download_media(
+                    server_name,
+                    media_id,
+                    output_stream=f,
+                    max_size=self.max_upload_size,
+                    max_timeout_ms=max_timeout_ms,
+                    download_ratelimiter=download_ratelimiter,
+                    ip_address=ip_address,
+                )
+                # if we had to fall back to the _matrix/media endpoint it will only return
+                # the headers and length, check the length of the tuple before unpacking
+                if len(res) == 3:
+                    length, headers, json = res
+                else:
+                    length, headers = res
+            except RequestSendFailed as e:
+                logger.warning(
+                    "Request failed fetching remote media %s/%s: %r",
+                    server_name,
+                    media_id,
+                    e,
+                )
+                raise SynapseError(502, "Failed to fetch remote media")
+
+            except HttpResponseException as e:
+                logger.warning(
+                    "HTTP error fetching remote media %s/%s: %s",
+                    server_name,
+                    media_id,
+                    e.response,
+                )
+                if e.code == twisted.web.http.NOT_FOUND:
+                    raise e.to_synapse_error()
+                raise SynapseError(502, "Failed to fetch remote media")
+
+            except SynapseError:
+                logger.warning(
+                    "Failed to fetch remote media %s/%s", server_name, media_id
+                )
+                raise
+            except NotRetryingDestination:
+                logger.warning("Not retrying destination %r", server_name)
+                raise SynapseError(502, "Failed to fetch remote media")
+            except Exception:
+                logger.exception(
+                    "Failed to fetch remote media %s/%s", server_name, media_id
+                )
+                raise SynapseError(502, "Failed to fetch remote media")
+
+            if b"Content-Type" in headers:
+                media_type = headers[b"Content-Type"][0].decode("ascii")
+            else:
+                media_type = "application/octet-stream"
+            upload_name = get_filename_from_headers(headers)
+            time_now_ms = self.clock.time_msec()
+
+            # Multiple remote media download requests can race (when using
+            # multiple media repos), so this may throw a violation constraint
+            # exception. If it does we'll delete the newly downloaded file from
+            # disk (as we're in the ctx manager).
+            #
+            # However: we've already called `finish()` so we may have also
+            # written to the storage providers. This is preferable to the
+            # alternative where we call `finish()` *after* this, where we could
+            # end up having an entry in the DB but fail to write the files to
+            # the storage providers.
+            await self.store.store_cached_remote_media(
+                origin=server_name,
+                media_id=media_id,
+                media_type=media_type,
+                time_now_ms=time_now_ms,
+                upload_name=upload_name,
+                media_length=length,
+                filesystem_id=file_id,
+            )
+
+        logger.debug("Stored remote media in file %r", fname)
+
+        return RemoteMedia(
+            media_origin=server_name,
+            media_id=media_id,
+            media_type=media_type,
+            media_length=length,
+            upload_name=upload_name,
+            created_ts=time_now_ms,
+            filesystem_id=file_id,
+            last_access_ts=time_now_ms,
+            quarantined_by=None,
+        )
+
     def _get_thumbnail_requirements(
         self, media_type: str
     ) -> Tuple[ThumbnailRequirement, ...]:
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index 1be2c9b5f5..2a106bb0eb 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -401,13 +401,14 @@ class MultipartFileConsumer:
         wrapped_consumer: interfaces.IConsumer,
         file_content_type: str,
         json_object: JsonDict,
-        content_length: Optional[int] = None,
+        disposition: str,
+        content_length: Optional[int],
     ) -> None:
         self.clock = clock
         self.wrapped_consumer = wrapped_consumer
         self.json_field = json_object
         self.json_field_written = False
-        self.content_type_written = False
+        self.file_headers_written = False
         self.file_content_type = file_content_type
         self.boundary = uuid4().hex.encode("ascii")
 
@@ -420,6 +421,7 @@ class MultipartFileConsumer:
         self.paused = False
 
         self.length = content_length
+        self.disposition = disposition
 
     ### IConsumer APIs ###
 
@@ -488,11 +490,13 @@ class MultipartFileConsumer:
             self.json_field_written = True
 
         # if we haven't written the content type yet, do so
-        if not self.content_type_written:
+        if not self.file_headers_written:
             type = self.file_content_type.encode("utf-8")
             content_type = Header(b"Content-Type", type)
-            self.wrapped_consumer.write(bytes(content_type) + CRLF + CRLF)
-            self.content_type_written = True
+            self.wrapped_consumer.write(bytes(content_type) + CRLF)
+            disp_header = Header(b"Content-Disposition", self.disposition)
+            self.wrapped_consumer.write(bytes(disp_header) + CRLF + CRLF)
+            self.file_headers_written = True
 
         self.wrapped_consumer.write(data)
 
@@ -506,7 +510,6 @@ class MultipartFileConsumer:
         producing data for good.
         """
         assert self.producer is not None
-
         self.paused = True
         self.producer.stopProducing()
 
@@ -518,7 +521,6 @@ class MultipartFileConsumer:
         the time being, and to stop until C{resumeProducing()} is called.
         """
         assert self.producer is not None
-
         self.paused = True
 
         if self.streaming:
@@ -549,7 +551,7 @@ class MultipartFileConsumer:
         """
         if not self.length:
             return None
-        # calculate length of json field and content-type header
+        # calculate length of json field and content-type, disposition headers
         json_field = json.dumps(self.json_field)
         json_bytes = json_field.encode("utf-8")
         json_length = len(json_bytes)
@@ -558,9 +560,13 @@ class MultipartFileConsumer:
         content_type = Header(b"Content-Type", type)
         type_length = len(bytes(content_type))
 
-        # 154 is the length of the elements that aren't variable, ie
+        disp = self.disposition.encode("utf-8")
+        disp_header = Header(b"Content-Disposition", disp)
+        disp_length = len(bytes(disp_header))
+
+        # 156 is the length of the elements that aren't variable, ie
         # CRLFs and boundary strings, etc
-        self.length += json_length + type_length + 154
+        self.length += json_length + type_length + disp_length + 156
 
         return self.length
 
@@ -569,7 +575,6 @@ class MultipartFileConsumer:
     async def _resumeProducingRepeatedly(self) -> None:
         assert self.producer is not None
         assert not self.streaming
-
         producer = cast("interfaces.IPullProducer", self.producer)
 
         self.paused = False
diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py
index f8a9560784..413a720e40 100644
--- a/synapse/media/thumbnailer.py
+++ b/synapse/media/thumbnailer.py
@@ -36,9 +36,11 @@ from synapse.media._base import (
     ThumbnailInfo,
     respond_404,
     respond_with_file,
+    respond_with_multipart_responder,
     respond_with_responder,
 )
-from synapse.media.media_storage import MediaStorage
+from synapse.media.media_storage import FileResponder, MediaStorage
+from synapse.storage.databases.main.media_repository import LocalMedia
 
 if TYPE_CHECKING:
     from synapse.media.media_repository import MediaRepository
@@ -271,6 +273,7 @@ class ThumbnailProvider:
         method: str,
         m_type: str,
         max_timeout_ms: int,
+        for_federation: bool,
     ) -> None:
         media_info = await self.media_repo.get_local_media_info(
             request, media_id, max_timeout_ms
@@ -290,6 +293,8 @@ class ThumbnailProvider:
             media_id,
             url_cache=bool(media_info.url_cache),
             server_name=None,
+            for_federation=for_federation,
+            media_info=media_info,
         )
 
     async def select_or_generate_local_thumbnail(
@@ -301,6 +306,7 @@ class ThumbnailProvider:
         desired_method: str,
         desired_type: str,
         max_timeout_ms: int,
+        for_federation: bool,
     ) -> None:
         media_info = await self.media_repo.get_local_media_info(
             request, media_id, max_timeout_ms
@@ -326,10 +332,16 @@ class ThumbnailProvider:
 
                 responder = await self.media_storage.fetch_media(file_info)
                 if responder:
-                    await respond_with_responder(
-                        request, responder, info.type, info.length
-                    )
-                    return
+                    if for_federation:
+                        await respond_with_multipart_responder(
+                            self.hs.get_clock(), request, responder, media_info
+                        )
+                        return
+                    else:
+                        await respond_with_responder(
+                            request, responder, info.type, info.length
+                        )
+                        return
 
         logger.debug("We don't have a thumbnail of that size. Generating")
 
@@ -344,7 +356,15 @@ class ThumbnailProvider:
         )
 
         if file_path:
-            await respond_with_file(request, desired_type, file_path)
+            if for_federation:
+                await respond_with_multipart_responder(
+                    self.hs.get_clock(),
+                    request,
+                    FileResponder(open(file_path, "rb")),
+                    media_info,
+                )
+            else:
+                await respond_with_file(request, desired_type, file_path)
         else:
             logger.warning("Failed to generate thumbnail")
             raise SynapseError(400, "Failed to generate thumbnail.")
@@ -360,9 +380,10 @@ class ThumbnailProvider:
         desired_type: str,
         max_timeout_ms: int,
         ip_address: str,
+        use_federation: bool,
     ) -> None:
         media_info = await self.media_repo.get_remote_media_info(
-            server_name, media_id, max_timeout_ms, ip_address
+            server_name, media_id, max_timeout_ms, ip_address, use_federation
         )
         if not media_info:
             respond_404(request)
@@ -424,12 +445,13 @@ class ThumbnailProvider:
         m_type: str,
         max_timeout_ms: int,
         ip_address: str,
+        use_federation: bool,
     ) -> None:
         # TODO: Don't download the whole remote file
         # We should proxy the thumbnail from the remote server instead of
         # downloading the remote file and generating our own thumbnails.
         media_info = await self.media_repo.get_remote_media_info(
-            server_name, media_id, max_timeout_ms, ip_address
+            server_name, media_id, max_timeout_ms, ip_address, use_federation
         )
         if not media_info:
             return
@@ -448,6 +470,7 @@ class ThumbnailProvider:
             media_info.filesystem_id,
             url_cache=False,
             server_name=server_name,
+            for_federation=False,
         )
 
     async def _select_and_respond_with_thumbnail(
@@ -461,7 +484,9 @@ class ThumbnailProvider:
         media_id: str,
         file_id: str,
         url_cache: bool,
+        for_federation: bool,
         server_name: Optional[str] = None,
+        media_info: Optional[LocalMedia] = None,
     ) -> None:
         """
         Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
@@ -476,6 +501,8 @@ class ThumbnailProvider:
             file_id: The ID of the media that a thumbnail is being requested for.
             url_cache: True if this is from a URL cache.
             server_name: The server name, if this is a remote thumbnail.
+            for_federation: whether the request is from the federation /thumbnail request
+            media_info: metadata about the media being requested.
         """
         logger.debug(
             "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
@@ -511,13 +538,20 @@ class ThumbnailProvider:
 
             responder = await self.media_storage.fetch_media(file_info)
             if responder:
-                await respond_with_responder(
-                    request,
-                    responder,
-                    file_info.thumbnail.type,
-                    file_info.thumbnail.length,
-                )
-                return
+                if for_federation:
+                    assert media_info is not None
+                    await respond_with_multipart_responder(
+                        self.hs.get_clock(), request, responder, media_info
+                    )
+                    return
+                else:
+                    await respond_with_responder(
+                        request,
+                        responder,
+                        file_info.thumbnail.type,
+                        file_info.thumbnail.length,
+                    )
+                    return
 
             # If we can't find the thumbnail we regenerate it. This can happen
             # if e.g. we've deleted the thumbnails but still have the original
@@ -558,12 +592,18 @@ class ThumbnailProvider:
                 )
 
             responder = await self.media_storage.fetch_media(file_info)
-            await respond_with_responder(
-                request,
-                responder,
-                file_info.thumbnail.type,
-                file_info.thumbnail.length,
-            )
+            if for_federation:
+                assert media_info is not None
+                await respond_with_multipart_responder(
+                    self.hs.get_clock(), request, responder, media_info
+                )
+            else:
+                await respond_with_responder(
+                    request,
+                    responder,
+                    file_info.thumbnail.type,
+                    file_info.thumbnail.length,
+                )
         else:
             # This might be because:
             # 1. We can't create thumbnails for the given media (corrupted or
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 0024ccf708..c94d454a28 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -145,6 +145,10 @@ class ClientRestResource(JsonResource):
         password_policy.register_servlets(hs, client_resource)
         knock.register_servlets(hs, client_resource)
         appservice_ping.register_servlets(hs, client_resource)
+        if hs.config.server.enable_media_repo:
+            from synapse.rest.client import media
+
+            media.register_servlets(hs, client_resource)
 
         # moving to /_synapse/admin
         if is_main_process:
diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py
index c5a00c490c..d7913896d9 100644
--- a/synapse/rest/admin/experimental_features.py
+++ b/synapse/rest/admin/experimental_features.py
@@ -31,7 +31,9 @@ from synapse.rest.admin import admin_patterns, assert_requester_is_admin
 from synapse.types import JsonDict, UserID
 
 if TYPE_CHECKING:
-    from synapse.server import HomeServer
+    from typing_extensions import assert_never
+
+    from synapse.server import HomeServer, HomeServerConfig
 
 
 class ExperimentalFeature(str, Enum):
@@ -39,8 +41,16 @@ class ExperimentalFeature(str, Enum):
     Currently supported per-user features
     """
 
-    MSC3026 = "msc3026"
     MSC3881 = "msc3881"
+    MSC3575 = "msc3575"
+
+    def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
+        if self is ExperimentalFeature.MSC3881:
+            return config.experimental.msc3881_enabled
+        if self is ExperimentalFeature.MSC3575:
+            return config.experimental.msc3575_enabled
+
+        assert_never(self)
 
 
 class ExperimentalFeaturesRestServlet(RestServlet):
diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
index 0c089163c1..c30e3022de 100644
--- a/synapse/rest/client/media.py
+++ b/synapse/rest/client/media.py
@@ -22,6 +22,7 @@
 
 import logging
 import re
+from typing import Optional
 
 from synapse.http.server import (
     HttpServer,
@@ -46,7 +47,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
 logger = logging.getLogger(__name__)
 
 
-class UnstablePreviewURLServlet(RestServlet):
+class PreviewURLServlet(RestServlet):
     """
     Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API
     for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
@@ -64,9 +65,7 @@ class UnstablePreviewURLServlet(RestServlet):
       * Matrix cannot be used to distribute the metadata between homeservers.
     """
 
-    PATTERNS = [
-        re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$")
-    ]
+    PATTERNS = [re.compile(r"^/_matrix/client/v1/media/preview_url$")]
 
     def __init__(
         self,
@@ -94,10 +93,8 @@ class UnstablePreviewURLServlet(RestServlet):
         respond_with_json_bytes(request, 200, og, send_cors=True)
 
 
-class UnstableMediaConfigResource(RestServlet):
-    PATTERNS = [
-        re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$")
-    ]
+class MediaConfigResource(RestServlet):
+    PATTERNS = [re.compile(r"^/_matrix/client/v1/media/config$")]
 
     def __init__(self, hs: "HomeServer"):
         super().__init__()
@@ -111,10 +108,10 @@ class UnstableMediaConfigResource(RestServlet):
         respond_with_json(request, 200, self.limits_dict, send_cors=True)
 
 
-class UnstableThumbnailResource(RestServlet):
+class ThumbnailResource(RestServlet):
     PATTERNS = [
         re.compile(
-            "/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
+            "/_matrix/client/v1/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
         )
     ]
 
@@ -158,11 +155,25 @@ class UnstableThumbnailResource(RestServlet):
         if self._is_mine_server_name(server_name):
             if self.dynamic_thumbnails:
                 await self.thumbnailer.select_or_generate_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             else:
                 await self.thumbnailer.respond_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             self.media_repo.mark_recently_accessed(None, media_id)
         else:
@@ -190,18 +201,79 @@ class UnstableThumbnailResource(RestServlet):
                 m_type,
                 max_timeout_ms,
                 ip_address,
+                True,
             )
             self.media_repo.mark_recently_accessed(server_name, media_id)
 
 
+class DownloadResource(RestServlet):
+    PATTERNS = [
+        re.compile(
+            "/_matrix/client/v1/media/download/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)(/(?P<file_name>[^/]*))?$"
+        )
+    ]
+
+    def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
+        super().__init__()
+        self.media_repo = media_repo
+        self._is_mine_server_name = hs.is_mine_server_name
+        self.auth = hs.get_auth()
+
+    async def on_GET(
+        self,
+        request: SynapseRequest,
+        server_name: str,
+        media_id: str,
+        file_name: Optional[str] = None,
+    ) -> None:
+        # Validate the server name, raising if invalid
+        parse_and_validate_server_name(server_name)
+
+        await self.auth.get_user_by_req(request)
+
+        set_cors_headers(request)
+        set_corp_headers(request)
+        request.setHeader(
+            b"Content-Security-Policy",
+            b"sandbox;"
+            b" default-src 'none';"
+            b" script-src 'none';"
+            b" plugin-types application/pdf;"
+            b" style-src 'unsafe-inline';"
+            b" media-src 'self';"
+            b" object-src 'self';",
+        )
+        # Limited non-standard form of CSP for IE11
+        request.setHeader(b"X-Content-Security-Policy", b"sandbox;")
+        request.setHeader(b"Referrer-Policy", b"no-referrer")
+        max_timeout_ms = parse_integer(
+            request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+        )
+        max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+
+        if self._is_mine_server_name(server_name):
+            await self.media_repo.get_local_media(
+                request, media_id, file_name, max_timeout_ms
+            )
+        else:
+            ip_address = request.getClientAddress().host
+            await self.media_repo.get_remote_media(
+                request,
+                server_name,
+                media_id,
+                file_name,
+                max_timeout_ms,
+                ip_address,
+                True,
+            )
+
+
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
-    if hs.config.experimental.msc3916_authenticated_media_enabled:
-        media_repo = hs.get_media_repository()
-        if hs.config.media.url_preview_enabled:
-            UnstablePreviewURLServlet(
-                hs, media_repo, media_repo.media_storage
-            ).register(http_server)
-        UnstableMediaConfigResource(hs).register(http_server)
-        UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register(
+    media_repo = hs.get_media_repository()
+    if hs.config.media.url_preview_enabled:
+        PreviewURLServlet(hs, media_repo, media_repo.media_storage).register(
             http_server
         )
+    MediaConfigResource(hs).register(http_server)
+    ThumbnailResource(hs, media_repo, media_repo.media_storage).register(http_server)
+    DownloadResource(hs, media_repo).register(http_server)
diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py
index 9957d2fcbe..a455f95a26 100644
--- a/synapse/rest/client/pusher.py
+++ b/synapse/rest/client/pusher.py
@@ -32,6 +32,7 @@ from synapse.http.servlet import (
 )
 from synapse.http.site import SynapseRequest
 from synapse.push import PusherConfigException
+from synapse.rest.admin.experimental_features import ExperimentalFeature
 from synapse.rest.client._base import client_patterns
 from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource
 from synapse.types import JsonDict
@@ -49,20 +50,22 @@ class PushersRestServlet(RestServlet):
         super().__init__()
         self.hs = hs
         self.auth = hs.get_auth()
-        self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
+        self._store = hs.get_datastores().main
 
     async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
         requester = await self.auth.get_user_by_req(request)
-        user = requester.user
+        user_id = requester.user.to_string()
 
-        pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(
-            user.to_string()
+        msc3881_enabled = await self._store.is_feature_enabled(
+            user_id, ExperimentalFeature.MSC3881
         )
 
+        pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(user_id)
+
         pusher_dicts = [p.as_dict() for p in pushers]
 
         for pusher in pusher_dicts:
-            if self._msc3881_enabled:
+            if msc3881_enabled:
                 pusher["org.matrix.msc3881.enabled"] = pusher["enabled"]
                 pusher["org.matrix.msc3881.device_id"] = pusher["device_id"]
             del pusher["enabled"]
@@ -80,11 +83,15 @@ class PushersSetRestServlet(RestServlet):
         self.auth = hs.get_auth()
         self.notifier = hs.get_notifier()
         self.pusher_pool = self.hs.get_pusherpool()
-        self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
+        self._store = hs.get_datastores().main
 
     async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
         requester = await self.auth.get_user_by_req(request)
-        user = requester.user
+        user_id = requester.user.to_string()
+
+        msc3881_enabled = await self._store.is_feature_enabled(
+            user_id, ExperimentalFeature.MSC3881
+        )
 
         content = parse_json_object_from_request(request)
 
@@ -95,7 +102,7 @@ class PushersSetRestServlet(RestServlet):
             and content["kind"] is None
         ):
             await self.pusher_pool.remove_pusher(
-                content["app_id"], content["pushkey"], user_id=user.to_string()
+                content["app_id"], content["pushkey"], user_id=user_id
             )
             return 200, {}
 
@@ -120,19 +127,19 @@ class PushersSetRestServlet(RestServlet):
             append = content["append"]
 
         enabled = True
-        if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content:
+        if msc3881_enabled and "org.matrix.msc3881.enabled" in content:
             enabled = content["org.matrix.msc3881.enabled"]
 
         if not append:
             await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
                 app_id=content["app_id"],
                 pushkey=content["pushkey"],
-                not_user_id=user.to_string(),
+                not_user_id=user_id,
             )
 
         try:
             await self.pusher_pool.add_or_update_pusher(
-                user_id=user.to_string(),
+                user_id=user_id,
                 kind=content["kind"],
                 app_id=content["app_id"],
                 app_display_name=content["app_display_name"],
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index b5ab0d8534..2a22bc14ec 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -53,6 +53,7 @@ from synapse.http.servlet import (
 )
 from synapse.http.site import SynapseRequest
 from synapse.logging.opentracing import trace_with_opname
+from synapse.rest.admin.experimental_features import ExperimentalFeature
 from synapse.types import JsonDict, Requester, StreamToken
 from synapse.types.rest.client import SlidingSyncBody
 from synapse.util import json_decoder
@@ -673,7 +674,9 @@ class SlidingSyncE2eeRestServlet(RestServlet):
         )
 
     async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
+        requester = await self.auth.get_user_by_req_experimental_feature(
+            request, allow_guest=True, feature=ExperimentalFeature.MSC3575
+        )
         user = requester.user
         device_id = requester.device_id
 
@@ -761,7 +764,6 @@ class SlidingSyncRestServlet(RestServlet):
             "lists": {
                 "foo-list": {
                     "ranges": [ [0, 99] ],
-                    "sort": [ "by_notification_level", "by_recency", "by_name" ],
                     "required_state": [
                         ["m.room.join_rules", ""],
                         ["m.room.history_visibility", ""],
@@ -771,7 +773,6 @@ class SlidingSyncRestServlet(RestServlet):
                     "filters": {
                         "is_dm": true
                     },
-                    "bump_event_types": [ "m.room.message", "m.room.encrypted" ],
                 }
             },
             // Room Subscriptions API
@@ -779,10 +780,6 @@ class SlidingSyncRestServlet(RestServlet):
                 "!sub1:bar": {
                     "required_state": [ ["*","*"] ],
                     "timeline_limit": 10,
-                    "include_old_rooms": {
-                        "timeline_limit": 1,
-                        "required_state": [ ["m.room.tombstone", ""], ["m.room.create", ""] ],
-                    }
                 }
             },
             // Extensions API
@@ -791,7 +788,7 @@ class SlidingSyncRestServlet(RestServlet):
 
     Response JSON::
         {
-            "next_pos": "s58_224_0_13_10_1_1_16_0_1",
+            "pos": "s58_224_0_13_10_1_1_16_0_1",
             "lists": {
                 "foo-list": {
                     "count": 1337,
@@ -830,7 +827,8 @@ class SlidingSyncRestServlet(RestServlet):
                     "joined_count": 41,
                     "invited_count": 1,
                     "notification_count": 1,
-                    "highlight_count": 0
+                    "highlight_count": 0,
+                    "num_live": 2"
                 },
                 // rooms from list
                 "!foo:bar": {
@@ -855,7 +853,8 @@ class SlidingSyncRestServlet(RestServlet):
                     "joined_count": 4,
                     "invited_count": 0,
                     "notification_count": 54,
-                    "highlight_count": 3
+                    "highlight_count": 3,
+                    "num_live": 1,
                 },
                  // ... 99 more items
             },
@@ -871,12 +870,16 @@ class SlidingSyncRestServlet(RestServlet):
         super().__init__()
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
+        self.clock = hs.get_clock()
         self.filtering = hs.get_filtering()
         self.sliding_sync_handler = hs.get_sliding_sync_handler()
+        self.event_serializer = hs.get_event_client_serializer()
 
-    # TODO: Update this to `on_GET` once we figure out how we want to handle params
     async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
+        requester = await self.auth.get_user_by_req_experimental_feature(
+            request, allow_guest=True, feature=ExperimentalFeature.MSC3575
+        )
+
         user = requester.user
         device_id = requester.device_id
 
@@ -920,22 +923,25 @@ class SlidingSyncRestServlet(RestServlet):
             logger.info("Client has disconnected; not serializing response.")
             return 200, {}
 
-        response_content = await self.encode_response(sliding_sync_results)
+        response_content = await self.encode_response(requester, sliding_sync_results)
 
         return 200, response_content
 
     # TODO: Is there a better way to encode things?
     async def encode_response(
         self,
+        requester: Requester,
         sliding_sync_result: SlidingSyncResult,
     ) -> JsonDict:
         response: JsonDict = defaultdict(dict)
 
-        response["next_pos"] = await sliding_sync_result.next_pos.to_string(self.store)
+        response["pos"] = await sliding_sync_result.next_pos.to_string(self.store)
         serialized_lists = self.encode_lists(sliding_sync_result.lists)
         if serialized_lists:
             response["lists"] = serialized_lists
-        response["rooms"] = {}  # TODO: sliding_sync_result.rooms
+        response["rooms"] = await self.encode_rooms(
+            requester, sliding_sync_result.rooms
+        )
         response["extensions"] = {}  # TODO: sliding_sync_result.extensions
 
         return response
@@ -961,10 +967,95 @@ class SlidingSyncRestServlet(RestServlet):
 
         return serialized_lists
 
+    async def encode_rooms(
+        self,
+        requester: Requester,
+        rooms: Dict[str, SlidingSyncResult.RoomResult],
+    ) -> JsonDict:
+        time_now = self.clock.time_msec()
+
+        serialize_options = SerializeEventConfig(
+            event_format=format_event_for_client_v2_without_room_id,
+            requester=requester,
+        )
+
+        serialized_rooms: Dict[str, JsonDict] = {}
+        for room_id, room_result in rooms.items():
+            serialized_rooms[room_id] = {
+                "joined_count": room_result.joined_count,
+                "invited_count": room_result.invited_count,
+                "notification_count": room_result.notification_count,
+                "highlight_count": room_result.highlight_count,
+            }
+
+            if room_result.name:
+                serialized_rooms[room_id]["name"] = room_result.name
+
+            if room_result.avatar:
+                serialized_rooms[room_id]["avatar"] = room_result.avatar
+
+            if room_result.heroes:
+                serialized_rooms[room_id]["heroes"] = room_result.heroes
+
+            # We should only include the `initial` key if it's `True` to save bandwidth.
+            # The absense of this flag means `False`.
+            if room_result.initial:
+                serialized_rooms[room_id]["initial"] = room_result.initial
+
+            # This will be omitted for invite/knock rooms with `stripped_state`
+            if room_result.required_state is not None:
+                serialized_required_state = (
+                    await self.event_serializer.serialize_events(
+                        room_result.required_state,
+                        time_now,
+                        config=serialize_options,
+                    )
+                )
+                serialized_rooms[room_id]["required_state"] = serialized_required_state
+
+            # This will be omitted for invite/knock rooms with `stripped_state`
+            if room_result.timeline_events is not None:
+                serialized_timeline = await self.event_serializer.serialize_events(
+                    room_result.timeline_events,
+                    time_now,
+                    config=serialize_options,
+                    bundle_aggregations=room_result.bundled_aggregations,
+                )
+                serialized_rooms[room_id]["timeline"] = serialized_timeline
+
+            # This will be omitted for invite/knock rooms with `stripped_state`
+            if room_result.limited is not None:
+                serialized_rooms[room_id]["limited"] = room_result.limited
+
+            # This will be omitted for invite/knock rooms with `stripped_state`
+            if room_result.prev_batch is not None:
+                serialized_rooms[room_id]["prev_batch"] = (
+                    await room_result.prev_batch.to_string(self.store)
+                )
+
+            # This will be omitted for invite/knock rooms with `stripped_state`
+            if room_result.num_live is not None:
+                serialized_rooms[room_id]["num_live"] = room_result.num_live
+
+            # Field should be absent on non-DM rooms
+            if room_result.is_dm:
+                serialized_rooms[room_id]["is_dm"] = room_result.is_dm
+
+            # Stripped state only applies to invite/knock rooms
+            if room_result.stripped_state is not None:
+                # TODO: `knocked_state` but that isn't specced yet.
+                #
+                # TODO: Instead of adding `knocked_state`, it would be good to rename
+                # this to `stripped_state` so it can be shared between invite and knock
+                # rooms, see
+                # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1117629919
+                serialized_rooms[room_id]["invite_state"] = room_result.stripped_state
+
+        return serialized_rooms
+
 
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     SyncRestServlet(hs).register(http_server)
 
-    if hs.config.experimental.msc3575_enabled:
-        SlidingSyncRestServlet(hs).register(http_server)
-        SlidingSyncE2eeRestServlet(hs).register(http_server)
+    SlidingSyncRestServlet(hs).register(http_server)
+    SlidingSyncE2eeRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index f428158139..e01e5f542a 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -25,11 +25,11 @@ import logging
 import re
 from typing import TYPE_CHECKING, Tuple
 
-from twisted.web.server import Request
-
 from synapse.api.constants import RoomCreationPreset
 from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet
+from synapse.http.site import SynapseRequest
+from synapse.rest.admin.experimental_features import ExperimentalFeature
 from synapse.types import JsonDict
 
 if TYPE_CHECKING:
@@ -45,6 +45,8 @@ class VersionsRestServlet(RestServlet):
     def __init__(self, hs: "HomeServer"):
         super().__init__()
         self.config = hs.config
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastores().main
 
         # Calculate these once since they shouldn't change after start-up.
         self.e2ee_forced_public = (
@@ -60,7 +62,17 @@ class VersionsRestServlet(RestServlet):
             in self.config.room.encryption_enabled_by_default_for_room_presets
         )
 
-    def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
+    async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+        msc3881_enabled = self.config.experimental.msc3881_enabled
+
+        if self.auth.has_access_token(request):
+            requester = await self.auth.get_user_by_req(request)
+            user_id = requester.user.to_string()
+
+            msc3881_enabled = await self.store.is_feature_enabled(
+                user_id, ExperimentalFeature.MSC3881
+            )
+
         return (
             200,
             {
@@ -124,7 +136,7 @@ class VersionsRestServlet(RestServlet):
                     # TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
                     "org.matrix.msc3882": self.config.auth.login_via_existing_enabled,
                     # Adds support for remotely enabling/disabling pushers, as per MSC3881
-                    "org.matrix.msc3881": self.config.experimental.msc3881_enabled,
+                    "org.matrix.msc3881": msc3881_enabled,
                     # Adds support for filtering /messages by event relation.
                     "org.matrix.msc3874": self.config.experimental.msc3874_enabled,
                     # Adds support for simple HTTP rendezvous as per MSC3886
diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py
index 1628d58926..c32c626905 100644
--- a/synapse/rest/media/download_resource.py
+++ b/synapse/rest/media/download_resource.py
@@ -105,4 +105,5 @@ class DownloadResource(RestServlet):
                 file_name,
                 max_timeout_ms,
                 ip_address,
+                False,
             )
diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index ce511c6dce..70354aa439 100644
--- a/synapse/rest/media/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -88,11 +88,25 @@ class ThumbnailResource(RestServlet):
         if self._is_mine_server_name(server_name):
             if self.dynamic_thumbnails:
                 await self.thumbnail_provider.select_or_generate_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             else:
                 await self.thumbnail_provider.respond_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             self.media_repo.mark_recently_accessed(None, media_id)
         else:
@@ -120,5 +134,6 @@ class ThumbnailResource(RestServlet):
                 m_type,
                 max_timeout_ms,
                 ip_address,
+                False,
             )
             self.media_repo.mark_recently_accessed(server_name, media_id)
diff --git a/synapse/server.py b/synapse/server.py
index ae927c3904..4a3f9ff934 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -28,7 +28,7 @@
 import abc
 import functools
 import logging
-from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
+from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, TypeVar, cast
 
 from typing_extensions import TypeAlias
 
@@ -161,6 +161,7 @@ if TYPE_CHECKING:
     from synapse.handlers.jwt import JwtHandler
     from synapse.handlers.oidc import OidcHandler
     from synapse.handlers.saml import SamlHandler
+    from synapse.storage._base import SQLBaseStore
 
 
 # The annotation for `cache_in_self` used to be
@@ -255,10 +256,13 @@ class HomeServer(metaclass=abc.ABCMeta):
         "stats",
     ]
 
-    # This is overridden in derived application classes
-    # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
-    # instantiated during setup() for future return by get_datastores()
-    DATASTORE_CLASS = abc.abstractproperty()
+    @property
+    @abc.abstractmethod
+    def DATASTORE_CLASS(self) -> Type["SQLBaseStore"]:
+        # This is overridden in derived application classes
+        # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
+        # instantiated during setup() for future return by get_datastores()
+        pass
 
     def __init__(
         self,
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index cc9b162ae4..b50eb8868e 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -409,7 +409,7 @@ class StateStorageController:
 
         return state_ids
 
-    async def get_state_at(
+    async def get_state_ids_at(
         self,
         room_id: str,
         stream_position: StreamToken,
@@ -436,6 +436,9 @@ class StateStorageController:
             )
         )
 
+        # FIXME: This will return incorrect results when there are timeline gaps. For
+        # example, when you try to get a point in the room we haven't backfilled before.
+
         if last_event_id:
             state = await self.get_state_after_event(
                 last_event_id,
@@ -459,6 +462,30 @@ class StateStorageController:
 
     @trace
     @tag_args
+    async def get_state_at(
+        self,
+        room_id: str,
+        stream_position: StreamToken,
+        state_filter: Optional[StateFilter] = None,
+        await_full_state: bool = True,
+    ) -> StateMap[EventBase]:
+        """Same as `get_state_ids_at` but also fetches the events"""
+        state_map_ids = await self.get_state_ids_at(
+            room_id, stream_position, state_filter, await_full_state
+        )
+
+        event_map = await self.stores.main.get_events(list(state_map_ids.values()))
+
+        state_map = {}
+        for key, event_id in state_map_ids.items():
+            event = event_map.get(event_id)
+            if event:
+                state_map[key] = event
+
+        return state_map
+
+    @trace
+    @tag_args
     async def get_state_for_groups(
         self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
     ) -> Dict[int, MutableStateMap[str]]:
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index a16e0fdc99..47ba1eeff4 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -825,14 +825,13 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             # Check if we've already inserted a matching message_id for that
             # origin. This can happen if the origin doesn't receive our
             # acknowledgement from the first time we received the message.
-            already_inserted = self.db_pool.simple_select_one_txn(
+            already_inserted = self.db_pool.simple_select_list_txn(
                 txn,
                 table="device_federation_inbox",
                 keyvalues={"origin": origin, "message_id": message_id},
                 retcols=("message_id",),
-                allow_none=True,
             )
-            if already_inserted is not None:
+            if already_inserted:
                 return
 
             # Add an entry for this message_id so that we know we've processed
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 4eb4b8819a..a4fd9ebbef 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -55,7 +55,7 @@ from synapse.api.room_versions import (
 )
 from synapse.events import EventBase, make_event_from_dict
 from synapse.events.snapshot import EventContext
-from synapse.events.utils import prune_event
+from synapse.events.utils import prune_event, strip_event
 from synapse.logging.context import (
     PreserveLoggingContext,
     current_context,
@@ -1025,15 +1025,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         state_to_include = await self.get_events(selected_state_ids.values())
 
-        return [
-            {
-                "type": e.type,
-                "state_key": e.state_key,
-                "content": e.content,
-                "sender": e.sender,
-            }
-            for e in state_to_include.values()
-        ]
+        return [strip_event(e) for e in state_to_include.values()]
 
     def _maybe_start_fetch_thread(self) -> None:
         """Starts an event fetch thread if we are not yet at the maximum number."""
diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py
index fbb98d8f63..d980c57fa8 100644
--- a/synapse/storage/databases/main/experimental_features.py
+++ b/synapse/storage/databases/main/experimental_features.py
@@ -21,7 +21,11 @@
 
 from typing import TYPE_CHECKING, Dict, FrozenSet, List, Tuple, cast
 
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.database import (
+    DatabasePool,
+    LoggingDatabaseConnection,
+    LoggingTransaction,
+)
 from synapse.storage.databases.main import CacheInvalidationWorkerStore
 from synapse.util.caches.descriptors import cached
 
@@ -73,12 +77,54 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore):
             features:
                 pairs of features and True/False for whether the feature should be enabled
         """
-        for feature, enabled in features.items():
-            await self.db_pool.simple_upsert(
-                table="per_user_experimental_features",
-                keyvalues={"feature": feature, "user_id": user},
-                values={"enabled": enabled},
-                insertion_values={"user_id": user, "feature": feature},
-            )
 
-            await self.invalidate_cache_and_stream("list_enabled_features", (user,))
+        def set_features_for_user_txn(txn: LoggingTransaction) -> None:
+            for feature, enabled in features.items():
+                self.db_pool.simple_upsert_txn(
+                    txn,
+                    table="per_user_experimental_features",
+                    keyvalues={"feature": feature, "user_id": user},
+                    values={"enabled": enabled},
+                    insertion_values={"user_id": user, "feature": feature},
+                )
+
+                self._invalidate_cache_and_stream(
+                    txn, self.is_feature_enabled, (user, feature)
+                )
+
+            self._invalidate_cache_and_stream(txn, self.list_enabled_features, (user,))
+
+        return await self.db_pool.runInteraction(
+            "set_features_for_user", set_features_for_user_txn
+        )
+
+    @cached()
+    async def is_feature_enabled(
+        self, user_id: str, feature: "ExperimentalFeature"
+    ) -> bool:
+        """
+        Checks to see if a given feature is enabled for the user
+        Args:
+            user_id: the user to be queried on
+            feature: the feature in question
+        Returns:
+                True if the feature is enabled, False if it is not or if the feature was
+                not found.
+        """
+
+        if feature.is_globally_enabled(self.hs.config):
+            return True
+
+        # if it's not enabled globally, check if it is enabled per-user
+        res = await self.db_pool.simple_select_one_onecol(
+            table="per_user_experimental_features",
+            keyvalues={"user_id": user_id, "feature": feature},
+            retcol="enabled",
+            allow_none=True,
+            desc="get_feature_enabled",
+        )
+
+        # None and false are treated the same
+        db_enabled = bool(res)
+
+        return db_enabled
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index b7eb3116ae..d34376b8df 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -44,6 +44,7 @@ what sort order was used:
 import logging
 from typing import (
     TYPE_CHECKING,
+    AbstractSet,
     Any,
     Collection,
     Dict,
@@ -62,7 +63,7 @@ from typing_extensions import Literal
 
 from twisted.internet import defer
 
-from synapse.api.constants import Direction
+from synapse.api.constants import Direction, EventTypes, Membership
 from synapse.api.filtering import Filter
 from synapse.events import EventBase
 from synapse.logging.context import make_deferred_yieldable, run_in_background
@@ -111,6 +112,32 @@ class _EventsAround:
     end: RoomStreamToken
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class CurrentStateDeltaMembership:
+    """
+    Attributes:
+        event_id: The "current" membership event ID in this room.
+        event_pos: The position of the "current" membership event in the event stream.
+        prev_event_id: The previous membership event in this room that was replaced by
+            the "current" one. May be `None` if there was no previous membership event.
+        room_id: The room ID of the membership event.
+        membership: The membership state of the user in the room
+        sender: The person who sent the membership event
+    """
+
+    room_id: str
+    # Event
+    event_id: Optional[str]
+    event_pos: PersistedEventPosition
+    membership: str
+    sender: Optional[str]
+    # Prev event
+    prev_event_id: Optional[str]
+    prev_event_pos: Optional[PersistedEventPosition]
+    prev_membership: Optional[str]
+    prev_sender: Optional[str]
+
+
 def generate_pagination_where_clause(
     direction: Direction,
     column_names: Tuple[str, str],
@@ -390,6 +417,43 @@ def _filter_results(
     return True
 
 
+def _filter_results_by_stream(
+    lower_token: Optional[RoomStreamToken],
+    upper_token: Optional[RoomStreamToken],
+    instance_name: str,
+    stream_ordering: int,
+) -> bool:
+    """
+    This function only works with "live" tokens with `stream_ordering` only. See
+    `_filter_results(...)` if you want to work with all tokens.
+
+    Returns True if the event persisted by the given instance at the given
+    stream_ordering falls between the two tokens (taking a None
+    token to mean unbounded).
+
+    Used to filter results from fetching events in the DB against the given
+    tokens. This is necessary to handle the case where the tokens include
+    position maps, which we handle by fetching more than necessary from the DB
+    and then filtering (rather than attempting to construct a complicated SQL
+    query).
+    """
+    if lower_token:
+        assert lower_token.topological is None
+
+        # If these are live tokens we compare the stream ordering against the
+        # writers stream position.
+        if stream_ordering <= lower_token.get_stream_pos_for_instance(instance_name):
+            return False
+
+    if upper_token:
+        assert upper_token.topological is None
+
+        if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering:
+            return False
+
+    return True
+
+
 def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]:
     # NB: This may create SQL clauses that don't optimise well (and we don't
     # have indices on all possible clauses). E.g. it may create
@@ -734,6 +798,191 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return ret, key
 
+    async def get_current_state_delta_membership_changes_for_user(
+        self,
+        user_id: str,
+        from_key: RoomStreamToken,
+        to_key: RoomStreamToken,
+        excluded_room_ids: Optional[List[str]] = None,
+    ) -> List[CurrentStateDeltaMembership]:
+        """
+        Fetch membership events (and the previous event that was replaced by that one)
+        for a given user.
+
+        Note: This function only works with "live" tokens with `stream_ordering` only.
+
+        We're looking for membership changes in the token range (> `from_key` and <=
+        `to_key`).
+
+        Please be mindful to only use this with `from_key` and `to_key` tokens that are
+        recent enough to be after when the first local user joined the room. Otherwise,
+        the results may be incomplete or too greedy. For example, if you use a token
+        range before the first local user joined the room, you will see 0 events since
+        `current_state_delta_stream` tracks what the server thinks is the current state
+        of the room as time goes. It does not track how state progresses from the
+        beginning of the room. So for example, when you remotely join a room, the first
+        rows will just be the state when you joined and progress from there.
+
+        You can probably reasonably use this with `/sync` because the `to_key` passed in
+        will be the "current" now token and the range will cover when the user joined
+        the room.
+
+        Args:
+            user_id: The user ID to fetch membership events for.
+            from_key: The point in the stream to sync from (fetching events > this point).
+            to_key: The token to fetch rooms up to (fetching events <= this point).
+            excluded_room_ids: Optional list of room IDs to exclude from the results.
+
+        Returns:
+            All membership changes to the current state in the token range. Events are
+            sorted by `stream_ordering` ascending.
+        """
+        # Start by ruling out cases where a DB query is not necessary.
+        if from_key == to_key:
+            return []
+
+        if from_key:
+            has_changed = self._membership_stream_cache.has_entity_changed(
+                user_id, int(from_key.stream)
+            )
+            if not has_changed:
+                return []
+
+        def f(txn: LoggingTransaction) -> List[CurrentStateDeltaMembership]:
+            # To handle tokens with a non-empty instance_map we fetch more
+            # results than necessary and then filter down
+            min_from_id = from_key.stream
+            max_to_id = to_key.get_max_stream_pos()
+
+            args: List[Any] = [min_from_id, max_to_id, EventTypes.Member, user_id]
+
+            # TODO: It would be good to assert that the `from_token`/`to_token` is >=
+            # the first row in `current_state_delta_stream` for the rooms we're
+            # interested in. Otherwise, we will end up with empty results and not know
+            # it.
+
+            # We could `COALESCE(e.stream_ordering, s.stream_id)` to get more accurate
+            # stream positioning when available but given our usages, we can avoid the
+            # complexity. Between two (valid) stream tokens, we will still get all of
+            # the state changes. Since those events are persisted in a batch, valid
+            # tokens will either be before or after the batch of events.
+            #
+            # `stream_ordering` from the `events` table is more accurate when available
+            # since the `current_state_delta_stream` table only tracks that the current
+            # state is at this stream position (not what stream position the state event
+            # was added) and uses the *minimum* stream position for batches of events.
+            sql = """
+                SELECT
+                    s.room_id,
+                    e.event_id,
+                    s.instance_name,
+                    s.stream_id,
+                    m.membership,
+                    e.sender,
+                    s.prev_event_id,
+                    e_prev.instance_name AS prev_instance_name,
+                    e_prev.stream_ordering AS prev_stream_ordering,
+                    m_prev.membership AS prev_membership,
+                    e_prev.sender AS prev_sender
+                FROM current_state_delta_stream AS s
+                    LEFT JOIN events AS e ON e.event_id = s.event_id
+                    LEFT JOIN room_memberships AS m ON m.event_id = s.event_id
+                    LEFT JOIN events AS e_prev ON e_prev.event_id = s.prev_event_id
+                    LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = s.prev_event_id
+                WHERE s.stream_id > ? AND s.stream_id <= ?
+                    AND s.type = ?
+                    AND s.state_key = ?
+                ORDER BY s.stream_id ASC
+            """
+
+            txn.execute(sql, args)
+
+            membership_changes: List[CurrentStateDeltaMembership] = []
+            for (
+                room_id,
+                event_id,
+                instance_name,
+                stream_ordering,
+                membership,
+                sender,
+                prev_event_id,
+                prev_instance_name,
+                prev_stream_ordering,
+                prev_membership,
+                prev_sender,
+            ) in txn:
+                assert room_id is not None
+                assert instance_name is not None
+                assert stream_ordering is not None
+
+                if _filter_results_by_stream(
+                    from_key,
+                    to_key,
+                    instance_name,
+                    stream_ordering,
+                ):
+                    # When the server leaves a room, it will insert new rows into the
+                    # `current_state_delta_stream` table with `event_id = null` for all
+                    # current state. This means we might already have a row for the
+                    # leave event and then another for the same leave where the
+                    # `event_id=null` but the `prev_event_id` is pointing back at the
+                    # earlier leave event. We don't want to report the leave, if we
+                    # already have a leave event.
+                    if event_id is None and prev_membership == Membership.LEAVE:
+                        continue
+
+                    membership_change = CurrentStateDeltaMembership(
+                        room_id=room_id,
+                        # Event
+                        event_id=event_id,
+                        event_pos=PersistedEventPosition(
+                            instance_name=instance_name,
+                            stream=stream_ordering,
+                        ),
+                        # When `s.event_id = null`, we won't be able to get respective
+                        # `room_membership` but can assume the user has left the room
+                        # because this only happens when the server leaves a room
+                        # (meaning everyone locally left) or a state reset which removed
+                        # the person from the room.
+                        membership=(
+                            membership if membership is not None else Membership.LEAVE
+                        ),
+                        sender=sender,
+                        # Prev event
+                        prev_event_id=prev_event_id,
+                        prev_event_pos=(
+                            PersistedEventPosition(
+                                instance_name=prev_instance_name,
+                                stream=prev_stream_ordering,
+                            )
+                            if (
+                                prev_instance_name is not None
+                                and prev_stream_ordering is not None
+                            )
+                            else None
+                        ),
+                        prev_membership=prev_membership,
+                        prev_sender=prev_sender,
+                    )
+
+                    membership_changes.append(membership_change)
+
+            return membership_changes
+
+        membership_changes = await self.db_pool.runInteraction(
+            "get_current_state_delta_membership_changes_for_user", f
+        )
+
+        room_ids_to_exclude: AbstractSet[str] = set()
+        if excluded_room_ids is not None:
+            room_ids_to_exclude = set(excluded_room_ids)
+
+        return [
+            membership_change
+            for membership_change in membership_changes
+            if membership_change.room_id not in room_ids_to_exclude
+        ]
+
     @cancellable
     async def get_membership_changes_for_user(
         self,
@@ -769,10 +1018,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
             ignore_room_clause = ""
             if excluded_rooms is not None and len(excluded_rooms) > 0:
-                ignore_room_clause = "AND e.room_id NOT IN (%s)" % ",".join(
-                    "?" for _ in excluded_rooms
+                ignore_room_clause, ignore_room_args = make_in_list_sql_clause(
+                    txn.database_engine, "e.room_id", excluded_rooms, negative=True
                 )
-                args = args + excluded_rooms
+                ignore_room_clause = f"AND {ignore_room_clause}"
+                args += ignore_room_args
 
             sql = """
                 SELECT m.event_id, instance_name, topological_ordering, stream_ordering
@@ -1554,6 +1804,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
     ) -> Tuple[List[EventBase], RoomStreamToken]:
         """Returns list of events before or after a given token.
 
+        When Direction.FORWARDS: from_key < x <= to_key
+        When Direction.BACKWARDS: from_key >= x > to_key
+
         Args:
             room_id
             from_key: The token used to stream from
@@ -1570,6 +1823,27 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             and `to_key`).
         """
 
+        # We can bail early if we're looking forwards, and our `to_key` is already
+        # before our `from_key`.
+        if (
+            direction == Direction.FORWARDS
+            and to_key is not None
+            and to_key.is_before_or_eq(from_key)
+        ):
+            # Token selection matches what we do in `_paginate_room_events_txn` if there
+            # are no rows
+            return [], to_key if to_key else from_key
+        # Or vice-versa, if we're looking backwards and our `from_key` is already before
+        # our `to_key`.
+        elif (
+            direction == Direction.BACKWARDS
+            and to_key is not None
+            and from_key.is_before_or_eq(to_key)
+        ):
+            # Token selection matches what we do in `_paginate_room_events_txn` if there
+            # are no rows
+            return [], to_key if to_key else from_key
+
         rows, token = await self.db_pool.runInteraction(
             "paginate_room_events",
             self._paginate_room_events_txn,
diff --git a/synapse/storage/schema/main/delta/42/current_state_delta.sql b/synapse/storage/schema/main/delta/42/current_state_delta.sql
index 876b61e6a5..3d2fd69480 100644
--- a/synapse/storage/schema/main/delta/42/current_state_delta.sql
+++ b/synapse/storage/schema/main/delta/42/current_state_delta.sql
@@ -32,7 +32,10 @@
  * limitations under the License.
  */
 
-
+-- Tracks what the server thinks is the current state of the room as time goes. It does
+-- not track how state progresses from the beginning of the room. So for example, when
+-- you remotely join a room, the first rows will just be the state when you joined and
+-- progress from there.
 CREATE TABLE current_state_delta_stream (
     stream_id BIGINT NOT NULL,
     room_id TEXT NOT NULL,
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 8ab9f90238..b22a13ef01 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -1096,6 +1096,9 @@ class PersistedPosition:
     stream: int
 
     def persisted_after(self, token: AbstractMultiWriterStreamToken) -> bool:
+        """
+        Checks whether this position happened after the token
+        """
         return token.get_stream_pos_for_instance(self.instance_name) < self.stream
 
 
diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py
index 1d65551d5b..3bd3268e59 100644
--- a/synapse/types/handlers/__init__.py
+++ b/synapse/types/handlers/__init__.py
@@ -31,9 +31,12 @@ else:
     from pydantic import Extra
 
 from synapse.events import EventBase
-from synapse.types import JsonMapping, StreamToken, UserID
+from synapse.types import JsonDict, JsonMapping, StreamToken, UserID
 from synapse.types.rest.client import SlidingSyncBody
 
+if TYPE_CHECKING:
+    from synapse.handlers.relations import BundledAggregations
+
 
 class ShutdownRoomParams(TypedDict):
     """
@@ -153,21 +156,33 @@ class SlidingSyncResult:
             avatar: Room avatar
             heroes: List of stripped membership events (containing `user_id` and optionally
                 `avatar_url` and `displayname`) for the users used to calculate the room name.
+            is_dm: Flag to specify whether the room is a direct-message room (most likely
+                between two people).
             initial: Flag which is set when this is the first time the server is sending this
                 data on this connection. Clients can use this flag to replace or update
                 their local state. When there is an update, servers MUST omit this flag
                 entirely and NOT send "initial":false as this is wasteful on bandwidth. The
                 absence of this flag means 'false'.
             required_state: The current state of the room
-            timeline: Latest events in the room. The last event is the most recent
-            is_dm: Flag to specify whether the room is a direct-message room (most likely
-                between two people).
-            invite_state: Stripped state events. Same as `rooms.invite.$room_id.invite_state`
-                in sync v2, absent on joined/left rooms
+            timeline: Latest events in the room. The last event is the most recent.
+            bundled_aggregations: A mapping of event ID to the bundled aggregations for
+                the timeline events above. This allows clients to show accurate reaction
+                counts (or edits, threads), even if some of the reaction events were skipped
+                over in a gappy sync.
+            stripped_state: Stripped state events (for rooms where the usre is
+                invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
+                absent on joined/left rooms
             prev_batch: A token that can be passed as a start parameter to the
                 `/rooms/<room_id>/messages` API to retrieve earlier messages.
             limited: True if their are more events than fit between the given position and now.
                 Sync again to get more.
+            num_live: The number of timeline events which have just occurred and are not historical.
+                The last N events are 'live' and should be treated as such. This is mostly
+                useful to determine whether a given @mention event should make a noise or not.
+                Clients cannot rely solely on the absence of `initial: true` to determine live
+                events because if a room not in the sliding window bumps into the window because
+                of an @mention it will have `initial: true` yet contain a single live event
+                (with potentially other old events in the timeline).
             joined_count: The number of users with membership of join, including the client's
                 own user ID. (same as sync `v2 m.joined_member_count`)
             invited_count: The number of users with membership of invite. (same as sync v2
@@ -176,30 +191,30 @@ class SlidingSyncResult:
                 as sync v2)
             highlight_count: The number of unread notifications for this room with the highlight
                 flag set. (same as sync v2)
-            num_live: The number of timeline events which have just occurred and are not historical.
-                The last N events are 'live' and should be treated as such. This is mostly
-                useful to determine whether a given @mention event should make a noise or not.
-                Clients cannot rely solely on the absence of `initial: true` to determine live
-                events because if a room not in the sliding window bumps into the window because
-                of an @mention it will have `initial: true` yet contain a single live event
-                (with potentially other old events in the timeline).
         """
 
-        name: str
+        name: Optional[str]
         avatar: Optional[str]
         heroes: Optional[List[EventBase]]
-        initial: bool
-        required_state: List[EventBase]
-        timeline: List[EventBase]
         is_dm: bool
-        invite_state: List[EventBase]
-        prev_batch: StreamToken
-        limited: bool
+        initial: bool
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        required_state: Optional[List[EventBase]]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        timeline_events: Optional[List[EventBase]]
+        bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
+        # Optional because it's only relevant to invite/knock rooms
+        stripped_state: Optional[List[JsonDict]]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        prev_batch: Optional[StreamToken]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        limited: Optional[bool]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        num_live: Optional[int]
         joined_count: int
         invited_count: int
         notification_count: int
         highlight_count: int
-        num_live: int
 
     @attr.s(slots=True, frozen=True, auto_attribs=True)
     class SlidingWindowList:
diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py
index e2c79c4106..55f6b44053 100644
--- a/synapse/types/rest/client/__init__.py
+++ b/synapse/types/rest/client/__init__.py
@@ -152,22 +152,14 @@ class SlidingSyncBody(RequestBodyModel):
                 anyway.
             timeline_limit: The maximum number of timeline events to return per response.
                 (Max 1000 messages)
-            include_old_rooms: Determines if `predecessor` rooms are included in the
-                `rooms` response. The user MUST be joined to old rooms for them to show up
-                in the response.
         """
 
-        class IncludeOldRooms(RequestBodyModel):
-            timeline_limit: StrictInt
-            required_state: List[Tuple[StrictStr, StrictStr]]
-
         required_state: List[Tuple[StrictStr, StrictStr]]
         # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
         if TYPE_CHECKING:
             timeline_limit: int
         else:
             timeline_limit: conint(le=1000, strict=True)  # type: ignore[valid-type]
-        include_old_rooms: Optional[IncludeOldRooms] = None
 
     class SlidingSyncList(CommonRoomParameters):
         """
@@ -208,9 +200,6 @@ class SlidingSyncBody(RequestBodyModel):
                     }
 
             timeline_limit: The maximum number of timeline events to return per response.
-            include_old_rooms: Determines if `predecessor` rooms are included in the
-                `rooms` response. The user MUST be joined to old rooms for them to show up
-                in the response.
             include_heroes: Return a stripped variant of membership events (containing
                 `user_id` and optionally `avatar_url` and `displayname`) for the users used
                 to calculate the room name.
@@ -270,7 +259,7 @@ class SlidingSyncBody(RequestBodyModel):
             is_encrypted: Optional[StrictBool] = None
             is_invite: Optional[StrictBool] = None
             room_types: Optional[List[Union[StrictStr, None]]] = None
-            not_room_types: Optional[List[StrictStr]] = None
+            not_room_types: Optional[List[Union[StrictStr, None]]] = None
             room_name_like: Optional[StrictStr] = None
             tags: Optional[List[StrictStr]] = None
             not_tags: Optional[List[StrictStr]] = None
diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
index 2c396adbe3..0dcf20f5f5 100644
--- a/tests/federation/test_federation_media.py
+++ b/tests/federation/test_federation_media.py
@@ -35,11 +35,11 @@ from synapse.types import UserID
 from synapse.util import Clock
 
 from tests import unittest
+from tests.media.test_media_storage import small_png
 from tests.test_utils import SMALL_PNG
-from tests.unittest import override_config
 
 
-class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
+class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
 
     def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         super().prepare(reactor, clock, hs)
@@ -65,9 +65,6 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
         )
         self.media_repo = hs.get_media_repository()
 
-    @override_config(
-        {"experimental_features": {"msc3916_authenticated_media_enabled": True}}
-    )
     def test_file_download(self) -> None:
         content = io.BytesIO(b"file_to_stream")
         content_uri = self.get_success(
@@ -82,7 +79,7 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
         # test with a text file
         channel = self.make_signed_federation_request(
             "GET",
-            f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+            f"/_matrix/federation/v1/media/download/{content_uri.media_id}",
         )
         self.pump()
         self.assertEqual(200, channel.code)
@@ -106,7 +103,8 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
 
         # check that the text file and expected value exist
         found_file = any(
-            "\r\nContent-Type: text/plain\r\n\r\nfile_to_stream" in field
+            "\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_to_stream"
+            in field
             for field in stripped
         )
         self.assertTrue(found_file)
@@ -124,7 +122,7 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
         # test with an image file
         channel = self.make_signed_federation_request(
             "GET",
-            f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+            f"/_matrix/federation/v1/media/download/{content_uri.media_id}",
         )
         self.pump()
         self.assertEqual(200, channel.code)
@@ -150,24 +148,111 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
         found_file = any(SMALL_PNG in field for field in stripped_bytes)
         self.assertTrue(found_file)
 
-    @override_config(
-        {"experimental_features": {"msc3916_authenticated_media_enabled": False}}
-    )
-    def test_disable_config(self) -> None:
-        content = io.BytesIO(b"file_to_stream")
+
+class FederationThumbnailTest(unittest.FederatingHomeserverTestCase):
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        super().prepare(reactor, clock, hs)
+        self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
+        self.addCleanup(shutil.rmtree, self.test_dir)
+        self.primary_base_path = os.path.join(self.test_dir, "primary")
+        self.secondary_base_path = os.path.join(self.test_dir, "secondary")
+
+        hs.config.media.media_store_path = self.primary_base_path
+
+        storage_providers = [
+            StorageProviderWrapper(
+                FileStorageProviderBackend(hs, self.secondary_base_path),
+                store_local=True,
+                store_remote=False,
+                store_synchronous=True,
+            )
+        ]
+
+        self.filepaths = MediaFilePaths(self.primary_base_path)
+        self.media_storage = MediaStorage(
+            hs, self.primary_base_path, self.filepaths, storage_providers
+        )
+        self.media_repo = hs.get_media_repository()
+
+    def test_thumbnail_download_scaled(self) -> None:
+        content = io.BytesIO(small_png.data)
         content_uri = self.get_success(
             self.media_repo.create_content(
-                "text/plain",
-                "test_upload",
+                "image/png",
+                "test_png_thumbnail",
                 content,
-                46,
+                67,
                 UserID.from_string("@user_id:whatever.org"),
             )
         )
+        # test with an image file
         channel = self.make_signed_federation_request(
             "GET",
-            f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
+            f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=scale",
         )
         self.pump()
-        self.assertEqual(404, channel.code)
-        self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
+        self.assertEqual(200, channel.code)
+
+        content_type = channel.headers.getRawHeaders("content-type")
+        assert content_type is not None
+        assert "multipart/mixed" in content_type[0]
+        assert "boundary" in content_type[0]
+
+        # extract boundary
+        boundary = content_type[0].split("boundary=")[1]
+        # split on boundary and check that json field and expected value exist
+        body = channel.result.get("body")
+        assert body is not None
+        stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+        found_json = any(
+            b"\r\nContent-Type: application/json\r\n\r\n{}" in field
+            for field in stripped_bytes
+        )
+        self.assertTrue(found_json)
+
+        # check that the png file exists and matches the expected scaled bytes
+        found_file = any(small_png.expected_scaled in field for field in stripped_bytes)
+        self.assertTrue(found_file)
+
+    def test_thumbnail_download_cropped(self) -> None:
+        content = io.BytesIO(small_png.data)
+        content_uri = self.get_success(
+            self.media_repo.create_content(
+                "image/png",
+                "test_png_thumbnail",
+                content,
+                67,
+                UserID.from_string("@user_id:whatever.org"),
+            )
+        )
+        # test with an image file
+        channel = self.make_signed_federation_request(
+            "GET",
+            f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=crop",
+        )
+        self.pump()
+        self.assertEqual(200, channel.code)
+
+        content_type = channel.headers.getRawHeaders("content-type")
+        assert content_type is not None
+        assert "multipart/mixed" in content_type[0]
+        assert "boundary" in content_type[0]
+
+        # extract boundary
+        boundary = content_type[0].split("boundary=")[1]
+        # split on boundary and check that json field and expected value exist
+        body = channel.result.get("body")
+        assert body is not None
+        stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+        found_json = any(
+            b"\r\nContent-Type: application/json\r\n\r\n{}" in field
+            for field in stripped_bytes
+        )
+        self.assertTrue(found_json)
+
+        # check that the png file exists and matches the expected cropped bytes
+        found_file = any(
+            small_png.expected_cropped in field for field in stripped_bytes
+        )
+        self.assertTrue(found_file)
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
index c698771a06..d7b54383db 100644
--- a/tests/handlers/test_deactivate_account.py
+++ b/tests/handlers/test_deactivate_account.py
@@ -461,3 +461,25 @@ class DeactivateAccountTestCase(HomeserverTestCase):
         # Validate that there is no displayname in any of the events
         for event in events:
             self.assertTrue("displayname" not in event.content)
+
+    def test_rooms_forgotten_upon_deactivation(self) -> None:
+        """
+        Tests that the user 'forgets' the rooms they left upon deactivation.
+        """
+        # Create a room
+        room_id = self.helper.create_room_as(
+            self.user,
+            is_public=True,
+            tok=self.token,
+        )
+
+        # Deactivate the account
+        self._deactivate_my_account()
+
+        # Get all of the user's forgotten rooms
+        forgotten_rooms = self.get_success(
+            self._store.get_forgotten_rooms_for_user(self.user)
+        )
+
+        # Validate that the created room is forgotten
+        self.assertTrue(room_id in forgotten_rooms)
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 8dd4521b18..5f83b637c5 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -18,28 +18,567 @@
 #
 #
 import logging
+from copy import deepcopy
+from typing import Optional
 from unittest.mock import patch
 
 from parameterized import parameterized
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership
+from synapse.api.constants import (
+    AccountDataTypes,
+    EventContentFields,
+    EventTypes,
+    JoinRules,
+    Membership,
+    RoomTypes,
+)
 from synapse.api.room_versions import RoomVersions
-from synapse.handlers.sliding_sync import SlidingSyncConfig
+from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues
 from synapse.rest import admin
 from synapse.rest.client import knock, login, room
 from synapse.server import HomeServer
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
 from synapse.types import JsonDict, UserID
+from synapse.types.handlers import SlidingSyncConfig
 from synapse.util import Clock
 
 from tests.replication._base import BaseMultiWorkerStreamTestCase
-from tests.unittest import HomeserverTestCase
+from tests.unittest import HomeserverTestCase, TestCase
 
 logger = logging.getLogger(__name__)
 
 
+class RoomSyncConfigTestCase(TestCase):
+    def _assert_room_config_equal(
+        self,
+        actual: RoomSyncConfig,
+        expected: RoomSyncConfig,
+        message_prefix: Optional[str] = None,
+    ) -> None:
+        self.assertEqual(actual.timeline_limit, expected.timeline_limit, message_prefix)
+
+        # `self.assertEqual(...)` works fine to catch differences but the output is
+        # almost impossible to read because of the way it truncates the output and the
+        # order doesn't actually matter.
+        self.assertCountEqual(
+            actual.required_state_map, expected.required_state_map, message_prefix
+        )
+        for event_type, expected_state_keys in expected.required_state_map.items():
+            self.assertCountEqual(
+                actual.required_state_map[event_type],
+                expected_state_keys,
+                f"{message_prefix}: Mismatch for {event_type}",
+            )
+
+    @parameterized.expand(
+        [
+            (
+                "from_list_config",
+                """
+                Test that we can convert a `SlidingSyncConfig.SlidingSyncList` to a
+                `RoomSyncConfig`.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.Member, "@baz"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            "@foo",
+                            "@bar",
+                            "@baz",
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+            ),
+            (
+                "from_room_subscription",
+                """
+                Test that we can convert a `SlidingSyncConfig.RoomSubscription` to a
+                `RoomSyncConfig`.
+                """,
+                # Input
+                SlidingSyncConfig.RoomSubscription(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.Member, "@baz"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            "@foo",
+                            "@bar",
+                            "@baz",
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+            ),
+            (
+                "wildcard",
+                """
+                Test that a wildcard (*) for both the `event_type` and `state_key` will override
+                all other values.
+
+                Note: MSC3575 describes different behavior to how we're handling things here but
+                since it's not wrong to return more state than requested (`required_state` is
+                just the minimum requested), it doesn't matter if we include things that the
+                client wanted excluded. This complexity is also under scrutiny, see
+                https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050
+
+                > One unique exception is when you request all state events via ["*", "*"]. When used,
+                > all state events are returned by default, and additional entries FILTER OUT the returned set
+                > of state events. These additional entries cannot use '*' themselves.
+                > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member
+                > event _except_ for @alice:example.com, and include every other state event.
+                > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not
+                > required as it would have been returned anyway.
+                >
+                > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575)
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (StateValues.WILDCARD, StateValues.WILDCARD),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+            (
+                "wildcard_type",
+                """
+                Test that a wildcard (*) as a `event_type` will override all other values for the
+                same `state_key`.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (StateValues.WILDCARD, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {""},
+                        EventTypes.Member: {"@foo"},
+                    },
+                ),
+            ),
+            (
+                "multiple_wildcard_type",
+                """
+                Test that multiple wildcard (*) as a `event_type` will override all other values
+                for the same `state_key`.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (StateValues.WILDCARD, ""),
+                        (EventTypes.Member, "@foo"),
+                        (StateValues.WILDCARD, "@foo"),
+                        ("org.matrix.personal_count", "@foo"),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {
+                            "",
+                            "@foo",
+                        },
+                        EventTypes.Member: {"@bar"},
+                    },
+                ),
+            ),
+            (
+                "wildcard_state_key",
+                """
+                Test that a wildcard (*) as a `state_key` will override all other values for the
+                same `event_type`.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.Member, StateValues.WILDCARD),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.Member, StateValues.LAZY),
+                        (EventTypes.Member, "@baz"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            StateValues.WILDCARD,
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+            ),
+            (
+                "wildcard_merge",
+                """
+                Test that a wildcard (*) entries for the `event_type` and another one for
+                `state_key` will play together.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (StateValues.WILDCARD, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.Member, StateValues.WILDCARD),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {""},
+                        EventTypes.Member: {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+            (
+                "wildcard_merge2",
+                """
+                Test that an all wildcard ("*", "*") entry will override any other
+                values (including other wildcards).
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (StateValues.WILDCARD, ""),
+                        (EventTypes.Member, StateValues.WILDCARD),
+                        (EventTypes.Member, "@foo"),
+                        # One of these should take precedence over everything else
+                        (StateValues.WILDCARD, StateValues.WILDCARD),
+                        (StateValues.WILDCARD, StateValues.WILDCARD),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+            (
+                "lazy_members",
+                """
+                `$LAZY` room members should just be another additional key next to other
+                explicit keys. We will unroll the special `$LAZY` meaning later.
+                """,
+                # Input
+                SlidingSyncConfig.SlidingSyncList(
+                    timeline_limit=10,
+                    required_state=[
+                        (EventTypes.Name, ""),
+                        (EventTypes.Member, "@foo"),
+                        (EventTypes.Member, "@bar"),
+                        (EventTypes.Member, StateValues.LAZY),
+                        (EventTypes.Member, "@baz"),
+                        (EventTypes.CanonicalAlias, ""),
+                    ],
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            "@foo",
+                            "@bar",
+                            StateValues.LAZY,
+                            "@baz",
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+            ),
+        ]
+    )
+    def test_from_room_config(
+        self,
+        _test_label: str,
+        _test_description: str,
+        room_params: SlidingSyncConfig.CommonRoomParameters,
+        expected_room_sync_config: RoomSyncConfig,
+    ) -> None:
+        """
+        Test `RoomSyncConfig.from_room_config(room_params)` will result in the `expected_room_sync_config`.
+        """
+        room_sync_config = RoomSyncConfig.from_room_config(room_params)
+
+        self._assert_room_config_equal(
+            room_sync_config,
+            expected_room_sync_config,
+        )
+
+    @parameterized.expand(
+        [
+            (
+                "no_direct_overlap",
+                # A
+                RoomSyncConfig(
+                    timeline_limit=9,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            "@foo",
+                            "@bar",
+                        },
+                    },
+                ),
+                # B
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Member: {
+                            StateValues.LAZY,
+                            "@baz",
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Name: {""},
+                        EventTypes.Member: {
+                            "@foo",
+                            "@bar",
+                            StateValues.LAZY,
+                            "@baz",
+                        },
+                        EventTypes.CanonicalAlias: {""},
+                    },
+                ),
+            ),
+            (
+                "wildcard_overlap",
+                # A
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD},
+                    },
+                ),
+                # B
+                RoomSyncConfig(
+                    timeline_limit=9,
+                    required_state_map={
+                        EventTypes.Dummy: {StateValues.WILDCARD},
+                        StateValues.WILDCARD: {"@bar"},
+                        EventTypes.Member: {"@foo"},
+                    },
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+            (
+                "state_type_wildcard_overlap",
+                # A
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {"dummy"},
+                        StateValues.WILDCARD: {
+                            "",
+                            "@foo",
+                        },
+                        EventTypes.Member: {"@bar"},
+                    },
+                ),
+                # B
+                RoomSyncConfig(
+                    timeline_limit=9,
+                    required_state_map={
+                        EventTypes.Dummy: {"dummy2"},
+                        StateValues.WILDCARD: {
+                            "",
+                            "@bar",
+                        },
+                        EventTypes.Member: {"@foo"},
+                    },
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {
+                            "dummy",
+                            "dummy2",
+                        },
+                        StateValues.WILDCARD: {
+                            "",
+                            "@foo",
+                            "@bar",
+                        },
+                    },
+                ),
+            ),
+            (
+                "state_key_wildcard_overlap",
+                # A
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {"dummy"},
+                        EventTypes.Member: {StateValues.WILDCARD},
+                        "org.matrix.flowers": {StateValues.WILDCARD},
+                    },
+                ),
+                # B
+                RoomSyncConfig(
+                    timeline_limit=9,
+                    required_state_map={
+                        EventTypes.Dummy: {StateValues.WILDCARD},
+                        EventTypes.Member: {StateValues.WILDCARD},
+                        "org.matrix.flowers": {"tulips"},
+                    },
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {StateValues.WILDCARD},
+                        EventTypes.Member: {StateValues.WILDCARD},
+                        "org.matrix.flowers": {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+            (
+                "state_type_and_state_key_wildcard_merge",
+                # A
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {"dummy"},
+                        StateValues.WILDCARD: {
+                            "",
+                            "@foo",
+                        },
+                        EventTypes.Member: {"@bar"},
+                    },
+                ),
+                # B
+                RoomSyncConfig(
+                    timeline_limit=9,
+                    required_state_map={
+                        EventTypes.Dummy: {"dummy2"},
+                        StateValues.WILDCARD: {""},
+                        EventTypes.Member: {StateValues.WILDCARD},
+                    },
+                ),
+                # Expected
+                RoomSyncConfig(
+                    timeline_limit=10,
+                    required_state_map={
+                        EventTypes.Dummy: {
+                            "dummy",
+                            "dummy2",
+                        },
+                        StateValues.WILDCARD: {
+                            "",
+                            "@foo",
+                        },
+                        EventTypes.Member: {StateValues.WILDCARD},
+                    },
+                ),
+            ),
+        ]
+    )
+    def test_combine_room_sync_config(
+        self,
+        _test_label: str,
+        a: RoomSyncConfig,
+        b: RoomSyncConfig,
+        expected: RoomSyncConfig,
+    ) -> None:
+        """
+        Combine A into B and B into A to make sure we get the same result.
+        """
+        # Since we're mutating these in place, make a copy for each of our trials
+        room_sync_config_a = deepcopy(a)
+        room_sync_config_b = deepcopy(b)
+
+        # Combine B into A
+        room_sync_config_a.combine_room_sync_config(room_sync_config_b)
+
+        self._assert_room_config_equal(room_sync_config_a, expected, "B into A")
+
+        # Since we're mutating these in place, make a copy for each of our trials
+        room_sync_config_a = deepcopy(a)
+        room_sync_config_b = deepcopy(b)
+
+        # Combine A into B
+        room_sync_config_b.combine_room_sync_config(room_sync_config_a)
+
+        self._assert_room_config_equal(room_sync_config_b, expected, "A into B")
+
+
 class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
     """
     Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it returns
@@ -63,6 +602,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
         self.store = self.hs.get_datastores().main
         self.event_sources = hs.get_event_sources()
+        self.storage_controllers = hs.get_storage_controllers()
 
     def test_no_rooms(self) -> None:
         """
@@ -90,10 +630,13 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
         before_room_token = self.event_sources.get_current_token()
 
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id, user1_id, tok=user1_tok)
 
         after_room_token = self.event_sources.get_current_token()
 
@@ -106,6 +649,15 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
 
         self.assertEqual(room_id_results.keys(), {room_id})
+        # It should be pointing to the join event (latest membership event in the
+        # from/to range)
+        self.assertEqual(
+            room_id_results[room_id].event_id,
+            join_response["event_id"],
+        )
+        # We should be considered `newly_joined` because we joined during the token
+        # range
+        self.assertEqual(room_id_results[room_id].newly_joined, True)
 
     def test_get_already_joined_room(self) -> None:
         """
@@ -113,8 +665,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id, user1_id, tok=user1_tok)
 
         after_room_token = self.event_sources.get_current_token()
 
@@ -127,6 +682,14 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
 
         self.assertEqual(room_id_results.keys(), {room_id})
+        # It should be pointing to the join event (latest membership event in the
+        # from/to range)
+        self.assertEqual(
+            room_id_results[room_id].event_id,
+            join_response["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertEqual(room_id_results[room_id].newly_joined, False)
 
     def test_get_invited_banned_knocked_room(self) -> None:
         """
@@ -142,14 +705,18 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Setup the invited room (user2 invites user1 to the room)
         invited_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.invite(invited_room_id, targ=user1_id, tok=user2_tok)
+        invite_response = self.helper.invite(
+            invited_room_id, targ=user1_id, tok=user2_tok
+        )
 
         # Setup the ban room (user2 bans user1 from the room)
         ban_room_id = self.helper.create_room_as(
             user2_id, tok=user2_tok, is_public=True
         )
         self.helper.join(ban_room_id, user1_id, tok=user1_tok)
-        self.helper.ban(ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+        ban_response = self.helper.ban(
+            ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+        )
 
         # Setup the knock room (user1 knocks on the room)
         knock_room_id = self.helper.create_room_as(
@@ -162,13 +729,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             tok=user2_tok,
         )
         # User1 knocks on the room
-        channel = self.make_request(
+        knock_channel = self.make_request(
             "POST",
             "/_matrix/client/r0/knock/%s" % (knock_room_id,),
             b"{}",
             user1_tok,
         )
-        self.assertEqual(channel.code, 200, channel.result)
+        self.assertEqual(knock_channel.code, 200, knock_channel.result)
+        knock_room_membership_state_event = self.get_success(
+            self.storage_controllers.state.get_current_state_event(
+                knock_room_id, EventTypes.Member, user1_id
+            )
+        )
+        assert knock_room_membership_state_event is not None
 
         after_room_token = self.event_sources.get_current_token()
 
@@ -189,6 +762,25 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 knock_room_id,
             },
         )
+        # It should be pointing to the the respective membership event (latest
+        # membership event in the from/to range)
+        self.assertEqual(
+            room_id_results[invited_room_id].event_id,
+            invite_response["event_id"],
+        )
+        self.assertEqual(
+            room_id_results[ban_room_id].event_id,
+            ban_response["event_id"],
+        )
+        self.assertEqual(
+            room_id_results[knock_room_id].event_id,
+            knock_room_membership_state_event.event_id,
+        )
+        # We should *NOT* be `newly_joined` because we were not joined at the the time
+        # of the `to_token`.
+        self.assertEqual(room_id_results[invited_room_id].newly_joined, False)
+        self.assertEqual(room_id_results[ban_room_id].newly_joined, False)
+        self.assertEqual(room_id_results[knock_room_id].newly_joined, False)
 
     def test_get_kicked_room(self) -> None:
         """
@@ -206,7 +798,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
         self.helper.join(kick_room_id, user1_id, tok=user1_tok)
         # Kick user1 from the room
-        self.helper.change_membership(
+        kick_response = self.helper.change_membership(
             room=kick_room_id,
             src=user2_id,
             targ=user1_id,
@@ -229,6 +821,14 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # The kicked room should show up
         self.assertEqual(room_id_results.keys(), {kick_room_id})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[kick_room_id].event_id,
+            kick_response["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because we were not joined at the the time
+        # of the `to_token`.
+        self.assertEqual(room_id_results[kick_room_id].newly_joined, False)
 
     def test_forgotten_rooms(self) -> None:
         """
@@ -329,7 +929,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Leave during the from_token/to_token range (newly_left)
         room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
-        self.helper.leave(room_id2, user1_id, tok=user1_tok)
+        _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
 
         after_room2_token = self.event_sources.get_current_token()
 
@@ -343,6 +943,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Only the newly_left room should show up
         self.assertEqual(room_id_results.keys(), {room_id2})
+        # It should be pointing to the latest membership event in the from/to range but
+        # the `event_id` is `None` because we left the room causing the server to leave
+        # the room because no other local users are in it (quirk of the
+        # `current_state_delta_stream` table that we source things from)
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            None,  # _leave_response2["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because we are instead `newly_left`
+        self.assertEqual(room_id_results[room_id2].newly_joined, False)
 
     def test_no_joins_after_to_token(self) -> None:
         """
@@ -351,16 +961,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
         before_room1_token = self.event_sources.get_current_token()
 
-        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
-        # Room join after after our `to_token` shouldn't show up
-        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
-        _ = room_id2
+        # Room join after our `to_token` shouldn't show up
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -371,6 +984,13 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
 
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response1["event_id"],
+        )
+        # We should be `newly_joined` because we joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
 
     def test_join_during_range_and_left_room_after_to_token(self) -> None:
         """
@@ -380,15 +1000,18 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
         before_room1_token = self.event_sources.get_current_token()
 
-        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Leave the room after we already have our tokens
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -401,6 +1024,20 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # We should still see the room because we were joined during the
         # from_token/to_token time period.
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "leave_response": leave_response["event_id"],
+                }
+            ),
+        )
+        # We should be `newly_joined` because we joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
 
     def test_join_before_range_and_left_room_after_to_token(self) -> None:
         """
@@ -410,13 +1047,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
-        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Leave the room after we already have our tokens
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -428,6 +1068,20 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # We should still see the room because we were joined before the `from_token`
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "leave_response": leave_response["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_kicked_before_range_and_left_after_to_token(self) -> None:
         """
@@ -444,9 +1098,9 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         kick_room_id = self.helper.create_room_as(
             user2_id, tok=user2_tok, is_public=True
         )
-        self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(kick_room_id, user1_id, tok=user1_tok)
         # Kick user1 from the room
-        self.helper.change_membership(
+        kick_response = self.helper.change_membership(
             room=kick_room_id,
             src=user2_id,
             targ=user1_id,
@@ -463,8 +1117,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         #
         # We have to join before we can leave (leave -> leave isn't a valid transition
         # or at least it doesn't work in Synapse, 403 forbidden)
-        self.helper.join(kick_room_id, user1_id, tok=user1_tok)
-        self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -476,6 +1130,22 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # We shouldn't see the room because it was forgotten
         self.assertEqual(room_id_results.keys(), {kick_room_id})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[kick_room_id].event_id,
+            kick_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "kick_response": kick_response["event_id"],
+                    "join_response2": join_response2["event_id"],
+                    "leave_response": leave_response["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we were kicked
+        self.assertEqual(room_id_results[kick_room_id].newly_joined, False)
 
     def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None:
         """
@@ -494,14 +1164,14 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # leave and can still re-join.
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join and leave the room during the from/to range
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Join and leave the room after we already have our tokens
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -513,6 +1183,22 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Room should still show up because it's newly_left during the from/to range
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_response1["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "leave_response1": leave_response1["event_id"],
+                    "join_response2": join_response2["event_id"],
+                    "leave_response2": leave_response2["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we left during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_newly_left_during_range_and_join_after_to_token(self) -> None:
         """
@@ -531,13 +1217,13 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # leave and can still re-join.
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join and leave the room during the from/to range
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Join the room after we already have our tokens
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -549,11 +1235,26 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Room should still show up because it's newly_left during the from/to range
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_response1["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "leave_response1": leave_response1["event_id"],
+                    "join_response2": join_response2["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we left during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_no_from_token(self) -> None:
         """
         Test that if we don't provide a `from_token`, we get all the rooms that we we're
-        joined to up to the `to_token`.
+        joined up to the `to_token`.
 
         Providing `from_token` only really has the effect that it adds `newly_left`
         rooms to the response.
@@ -569,7 +1270,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
 
         # Join room1
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         # Join and leave the room2 before the `to_token`
         self.helper.join(room_id2, user1_id, tok=user1_tok)
@@ -590,6 +1291,14 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Only rooms we were joined to before the `to_token` should show up
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response1["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because there is no `from_token` to
+        # define a "live" range to compare against
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_from_token_ahead_of_to_token(self) -> None:
         """
@@ -609,7 +1318,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
 
         # Join room1 before `before_room_token`
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         # Join and leave the room2 before `before_room_token`
         self.helper.join(room_id2, user1_id, tok=user1_tok)
@@ -651,6 +1360,13 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # There won't be any newly_left rooms because the `from_token` is ahead of the
         # `to_token` and that range will give no membership changes to check.
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response1["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because we joined `room1` before either of the tokens
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_leave_before_range_and_join_leave_after_to_token(self) -> None:
         """
@@ -741,16 +1457,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # leave and can still re-join.
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join, leave, join back to the room before the from/to range
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Leave and Join the room multiple times after we already have our tokens
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -762,6 +1478,24 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Room should show up because it was newly_left and joined during the from/to range
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response2["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "leave_response1": leave_response1["event_id"],
+                    "join_response2": join_response2["event_id"],
+                    "leave_response2": leave_response2["event_id"],
+                    "join_response3": join_response3["event_id"],
+                    "leave_response3": leave_response3["event_id"],
+                }
+            ),
+        )
+        # We should be `newly_joined` because we joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
 
     def test_join_leave_multiple_times_before_range_and_after_to_token(
         self,
@@ -781,16 +1515,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # leave and can still re-join.
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join, leave, join back to the room before the from/to range
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Leave and Join the room multiple times after we already have our tokens
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
@@ -802,6 +1536,24 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Room should show up because we were joined before the from/to range
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response2["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "leave_response1": leave_response1["event_id"],
+                    "join_response2": join_response2["event_id"],
+                    "leave_response2": leave_response2["event_id"],
+                    "join_response3": join_response3["event_id"],
+                    "leave_response3": leave_response3["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
 
     def test_invite_before_range_and_join_leave_after_to_token(
         self,
@@ -821,24 +1573,495 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
 
         # Invited to the room before the token
-        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        invite_response = self.helper.invite(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Join and leave the room after we already have our tokens
+        join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=after_room1_token,
+                to_token=after_room1_token,
+            )
+        )
+
+        # Room should show up because we were invited before the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            invite_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "invite_response": invite_response["event_id"],
+                    "join_respsonse": join_respsonse["event_id"],
+                    "leave_response": leave_response["event_id"],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we were only invited before the
+        # token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+
+    def test_join_and_display_name_changes_in_token_range(
+        self,
+    ) -> None:
+        """
+        Test that we point to the correct membership event within the from/to range even
+        if there are multiple `join` membership events in a row indicating
+        `displayname`/`avatar_url` updates.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Update the displayname during the token range
+        displayname_change_during_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Update the displayname after the token range
+        displayname_change_after_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname after token range",
+            },
+            tok=user1_tok,
+        )
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=before_room1_token,
+                to_token=after_room1_token,
+            )
+        )
+
+        # Room should show up because we were joined during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            displayname_change_during_token_range_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "displayname_change_during_token_range_response": displayname_change_during_token_range_response[
+                        "event_id"
+                    ],
+                    "displayname_change_after_token_range_response": displayname_change_after_token_range_response[
+                        "event_id"
+                    ],
+                }
+            ),
+        )
+        # We should be `newly_joined` because we joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+
+    def test_display_name_changes_in_token_range(
+        self,
+    ) -> None:
+        """
+        Test that we point to the correct membership event within the from/to range even
+        if there is `displayname`/`avatar_url` updates.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Update the displayname during the token range
+        displayname_change_during_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+
+        after_change1_token = self.event_sources.get_current_token()
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=after_room1_token,
+                to_token=after_change1_token,
+            )
+        )
+
+        # Room should show up because we were joined during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            displayname_change_during_token_range_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "displayname_change_during_token_range_response": displayname_change_during_token_range_response[
+                        "event_id"
+                    ],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+
+    def test_display_name_changes_before_and_after_token_range(
+        self,
+    ) -> None:
+        """
+        Test that we point to the correct membership event even though there are no
+        membership events in the from/range but there are `displayname`/`avatar_url`
+        changes before/after the token range.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Update the displayname before the token range
+        displayname_change_before_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Update the displayname after the token range
+        displayname_change_after_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname after token range",
+            },
+            tok=user1_tok,
+        )
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=after_room1_token,
+                to_token=after_room1_token,
+            )
+        )
+
+        # Room should show up because we were joined before the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            displayname_change_before_token_range_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "displayname_change_before_token_range_response": displayname_change_before_token_range_response[
+                        "event_id"
+                    ],
+                    "displayname_change_after_token_range_response": displayname_change_after_token_range_response[
+                        "event_id"
+                    ],
+                }
+            ),
+        )
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+
+    def test_display_name_changes_leave_after_token_range(
+        self,
+    ) -> None:
+        """
+        Test that we point to the correct membership event within the from/to range even
+        if there are multiple `join` membership events in a row indicating
+        `displayname`/`avatar_url` updates and we leave after the `to_token`.
+
+        See condition "1a)" comments in the `get_sync_room_ids_for_user()` method.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Update the displayname during the token range
+        displayname_change_during_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Update the displayname after the token range
+        displayname_change_after_token_range_response = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname after token range",
+            },
+            tok=user1_tok,
+        )
+
+        # Leave after the token
+        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=before_room1_token,
+                to_token=after_room1_token,
+            )
+        )
+
+        # Room should show up because we were joined during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            displayname_change_during_token_range_response["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response": join_response["event_id"],
+                    "displayname_change_during_token_range_response": displayname_change_during_token_range_response[
+                        "event_id"
+                    ],
+                    "displayname_change_after_token_range_response": displayname_change_after_token_range_response[
+                        "event_id"
+                    ],
+                }
+            ),
+        )
+        # We should be `newly_joined` because we joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+
+    def test_display_name_changes_join_after_token_range(
+        self,
+    ) -> None:
+        """
+        Test that multiple `join` membership events (after the `to_token`) in a row
+        indicating `displayname`/`avatar_url` updates doesn't affect the results (we
+        joined after the token range so it shouldn't show up)
+
+        See condition "1b)" comments in the `get_sync_room_ids_for_user()` method.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Update the displayname after the token range
+        self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname after token range",
+            },
+            tok=user1_tok,
+        )
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=before_room1_token,
+                to_token=after_room1_token,
+            )
+        )
+
+        # Room shouldn't show up because we joined after the from/to range
+        self.assertEqual(room_id_results.keys(), set())
+
+    def test_newly_joined_with_leave_join_in_token_range(
+        self,
+    ) -> None:
+        """
+        Test that even though we're joined before the token range, if we leave and join
+        within the token range, it's still counted as `newly_joined`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Leave and join back during the token range
         self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        after_more_changes_token = self.event_sources.get_current_token()
 
         room_id_results = self.get_success(
             self.sliding_sync_handler.get_sync_room_ids_for_user(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
+                to_token=after_more_changes_token,
+            )
+        )
+
+        # Room should show up because we were joined during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_response2["event_id"],
+        )
+        # We should be considered `newly_joined` because there is some non-join event in
+        # between our latest join event.
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+
+    def test_newly_joined_only_joins_during_token_range(
+        self,
+    ) -> None:
+        """
+        Test that a join and more joins caused by display name changes, all during the
+        token range, still count as `newly_joined`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        # We create the room with user2 so the room isn't left with no members when we
+        # leave and can still re-join.
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+        # Join, leave, join back to the room before the from/to range
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Update the displayname during the token range (looks like another join)
+        displayname_change_during_token_range_response1 = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+        # Update the displayname during the token range (looks like another join)
+        displayname_change_during_token_range_response2 = self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.Member,
+            state_key=user1_id,
+            body={
+                "membership": Membership.JOIN,
+                "displayname": "displayname during token range",
+            },
+            tok=user1_tok,
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        room_id_results = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=before_room1_token,
                 to_token=after_room1_token,
             )
         )
 
-        # Room should show up because we were invited before the from/to range
+        # Room should show up because it was newly_left and joined during the from/to range
         self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            displayname_change_during_token_range_response2["event_id"],
+            "Corresponding map to disambiguate the opaque event IDs: "
+            + str(
+                {
+                    "join_response1": join_response1["event_id"],
+                    "displayname_change_during_token_range_response1": displayname_change_during_token_range_response1[
+                        "event_id"
+                    ],
+                    "displayname_change_during_token_range_response2": displayname_change_during_token_range_response2[
+                        "event_id"
+                    ],
+                }
+            ),
+        )
+        # We should be `newly_joined` because we first joined during the token range
+        self.assertEqual(room_id_results[room_id1].newly_joined, True)
 
     def test_multiple_rooms_are_not_confused(
         self,
@@ -1363,6 +2586,211 @@ class FilterRoomsTestCase(HomeserverTestCase):
 
         self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
 
+    def test_filter_room_types(self) -> None:
+        """
+        Test `filter.room_types` for different room types
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Create an arbitrarily typed room
+        foo_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {
+                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
+                }
+            },
+        )
+
+        after_rooms_token = self.event_sources.get_current_token()
+
+        # Get the rooms the user should be syncing with
+        sync_room_map = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=None,
+                to_token=after_rooms_token,
+            )
+        )
+
+        # Try finding only normal rooms
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {room_id})
+
+        # Try finding only spaces
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {space_room_id})
+
+        # Try finding normal rooms and spaces
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    room_types=[None, RoomTypes.SPACE]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {room_id, space_room_id})
+
+        # Try finding an arbitrary room type
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    room_types=["org.matrix.foobarbaz"]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {foo_room_id})
+
+    def test_filter_not_room_types(self) -> None:
+        """
+        Test `filter.not_room_types` for different room types
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Create an arbitrarily typed room
+        foo_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {
+                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
+                }
+            },
+        )
+
+        after_rooms_token = self.event_sources.get_current_token()
+
+        # Get the rooms the user should be syncing with
+        sync_room_map = self.get_success(
+            self.sliding_sync_handler.get_sync_room_ids_for_user(
+                UserID.from_string(user1_id),
+                from_token=None,
+                to_token=after_rooms_token,
+            )
+        )
+
+        # Try finding *NOT* normal rooms
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {space_room_id, foo_room_id})
+
+        # Try finding *NOT* spaces
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    not_room_types=[RoomTypes.SPACE]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {room_id, foo_room_id})
+
+        # Try finding *NOT* normal rooms or spaces
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    not_room_types=[None, RoomTypes.SPACE]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {foo_room_id})
+
+        # Test how it behaves when we have both `room_types` and `not_room_types`.
+        # `not_room_types` should win.
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    room_types=[None], not_room_types=[None]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        # Nothing matches because nothing is both a normal room and not a normal room
+        self.assertEqual(filtered_room_map.keys(), set())
+
+        # Test how it behaves when we have both `room_types` and `not_room_types`.
+        # `not_room_types` should win.
+        filtered_room_map = self.get_success(
+            self.sliding_sync_handler.filter_rooms(
+                UserID.from_string(user1_id),
+                sync_room_map,
+                SlidingSyncConfig.SlidingSyncList.Filters(
+                    room_types=[None, RoomTypes.SPACE], not_room_types=[None]
+                ),
+                after_rooms_token,
+            )
+        )
+
+        self.assertEqual(filtered_room_map.keys(), {space_room_id})
+
 
 class SortRoomsTestCase(HomeserverTestCase):
     """
diff --git a/tests/http/test_client.py b/tests/http/test_client.py
index a98091d711..721917f957 100644
--- a/tests/http/test_client.py
+++ b/tests/http/test_client.py
@@ -37,18 +37,155 @@ from synapse.http.client import (
     BlocklistingAgentWrapper,
     BlocklistingReactorWrapper,
     BodyExceededMaxSize,
+    MultipartResponse,
     _DiscardBodyWithMaxSizeProtocol,
+    _MultipartParserProtocol,
     read_body_with_max_size,
+    read_multipart_response,
 )
 
 from tests.server import FakeTransport, get_clock
 from tests.unittest import TestCase
 
 
+class ReadMultipartResponseTests(TestCase):
+    data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_"
+    data2 = b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+
+    redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+
+    def _build_multipart_response(
+        self, response_length: Union[int, str], max_length: int
+    ) -> Tuple[
+        BytesIO,
+        "Deferred[MultipartResponse]",
+        _MultipartParserProtocol,
+    ]:
+        """Start reading the body, returns the response, result and proto"""
+        response = Mock(length=response_length)
+        result = BytesIO()
+        boundary = "6067d4698f8d40a0a794ea7d7379d53a"
+        deferred = read_multipart_response(response, result, boundary, max_length)
+
+        # Fish the protocol out of the response.
+        protocol = response.deliverBody.call_args[0][0]
+        protocol.transport = Mock()
+
+        return result, deferred, protocol
+
+    def _assert_error(
+        self,
+        deferred: "Deferred[MultipartResponse]",
+        protocol: _MultipartParserProtocol,
+    ) -> None:
+        """Ensure that the expected error is received."""
+        assert isinstance(deferred.result, Failure)
+        self.assertIsInstance(deferred.result.value, BodyExceededMaxSize)
+        assert protocol.transport is not None
+        # type-ignore: presumably abortConnection has been replaced with a Mock.
+        protocol.transport.abortConnection.assert_called_once()  # type: ignore[attr-defined]
+
+    def _cleanup_error(self, deferred: "Deferred[MultipartResponse]") -> None:
+        """Ensure that the error in the Deferred is handled gracefully."""
+        called = [False]
+
+        def errback(f: Failure) -> None:
+            called[0] = True
+
+        deferred.addErrback(errback)
+        self.assertTrue(called[0])
+
+    def test_parse_file(self) -> None:
+        """
+        Check that a multipart response containing a file is properly parsed
+        into the json/file parts, and the json and file are properly captured
+        """
+        result, deferred, protocol = self._build_multipart_response(249, 250)
+
+        # Start sending data.
+        protocol.dataReceived(self.data1)
+        protocol.dataReceived(self.data2)
+        # Close the connection.
+        protocol.connectionLost(Failure(ResponseDone()))
+
+        multipart_response: MultipartResponse = deferred.result  # type: ignore[assignment]
+
+        self.assertEqual(multipart_response.json, b"{}")
+        self.assertEqual(result.getvalue(), b"file_to_stream")
+        self.assertEqual(multipart_response.length, len(b"file_to_stream"))
+        self.assertEqual(multipart_response.content_type, b"text/plain")
+        self.assertEqual(
+            multipart_response.disposition, b"inline; filename=test_upload"
+        )
+
+    def test_parse_redirect(self) -> None:
+        """
+        check that a multipart response containing a redirect is properly parsed and redirect url is
+        returned
+        """
+        result, deferred, protocol = self._build_multipart_response(249, 250)
+
+        # Start sending data.
+        protocol.dataReceived(self.redirect_data)
+        # Close the connection.
+        protocol.connectionLost(Failure(ResponseDone()))
+
+        multipart_response: MultipartResponse = deferred.result  # type: ignore[assignment]
+
+        self.assertEqual(multipart_response.json, b"{}")
+        self.assertEqual(result.getvalue(), b"")
+        self.assertEqual(
+            multipart_response.url, b"https://cdn.example.org/ab/c1/2345.txt"
+        )
+
+    def test_too_large(self) -> None:
+        """A response which is too large raises an exception."""
+        result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
+
+        # Start sending data.
+        protocol.dataReceived(self.data1)
+
+        self.assertEqual(result.getvalue(), b"file_")
+        self._assert_error(deferred, protocol)
+        self._cleanup_error(deferred)
+
+    def test_additional_data(self) -> None:
+        """A connection can receive data after being closed."""
+        result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
+
+        # Start sending data.
+        protocol.dataReceived(self.data1)
+        self._assert_error(deferred, protocol)
+
+        # More data might have come in.
+        protocol.dataReceived(self.data2)
+
+        self.assertEqual(result.getvalue(), b"file_")
+        self._assert_error(deferred, protocol)
+        self._cleanup_error(deferred)
+
+    def test_content_length(self) -> None:
+        """The body shouldn't be read (at all) if the Content-Length header is too large."""
+        result, deferred, protocol = self._build_multipart_response(250, 1)
+
+        # Deferred shouldn't be called yet.
+        self.assertFalse(deferred.called)
+
+        # Start sending data.
+        protocol.dataReceived(self.data1)
+        self._assert_error(deferred, protocol)
+        self._cleanup_error(deferred)
+
+        # The data is never consumed.
+        self.assertEqual(result.getvalue(), b"")
+
+
 class ReadBodyWithMaxSizeTests(TestCase):
-    def _build_response(
-        self, length: Union[int, str] = UNKNOWN_LENGTH
-    ) -> Tuple[BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol]:
+    def _build_response(self, length: Union[int, str] = UNKNOWN_LENGTH) -> Tuple[
+        BytesIO,
+        "Deferred[int]",
+        _DiscardBodyWithMaxSizeProtocol,
+    ]:
         """Start reading the body, returns the response, result and proto"""
         response = Mock(length=length)
         result = BytesIO()
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 46d20ce775..70912e22f8 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -18,7 +18,6 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-import itertools
 import os
 import shutil
 import tempfile
@@ -129,7 +128,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
 
 
 @attr.s(auto_attribs=True, slots=True, frozen=True)
-class _TestImage:
+class TestImage:
     """An image for testing thumbnailing with the expected results
 
     Attributes:
@@ -158,7 +157,7 @@ class _TestImage:
     is_inline: bool = True
 
 
-small_png = _TestImage(
+small_png = TestImage(
     SMALL_PNG,
     b"image/png",
     b".png",
@@ -175,7 +174,7 @@ small_png = _TestImage(
     ),
 )
 
-small_png_with_transparency = _TestImage(
+small_png_with_transparency = TestImage(
     unhexlify(
         b"89504e470d0a1a0a0000000d49484452000000010000000101000"
         b"00000376ef9240000000274524e5300010194fdae0000000a4944"
@@ -188,7 +187,7 @@ small_png_with_transparency = _TestImage(
     # different versions of Pillow.
 )
 
-small_lossless_webp = _TestImage(
+small_lossless_webp = TestImage(
     unhexlify(
         b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
     ),
@@ -196,7 +195,7 @@ small_lossless_webp = _TestImage(
     b".webp",
 )
 
-empty_file = _TestImage(
+empty_file = TestImage(
     b"",
     b"image/gif",
     b".gif",
@@ -204,7 +203,7 @@ empty_file = _TestImage(
     unable_to_thumbnail=True,
 )
 
-SVG = _TestImage(
+SVG = TestImage(
     b"""<?xml version="1.0"?>
 <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
   "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
@@ -227,19 +226,15 @@ test_images = [
     empty_file,
     SVG,
 ]
-urls = [
-    "_matrix/media/r0/thumbnail",
-    "_matrix/client/unstable/org.matrix.msc3916/media/thumbnail",
-]
+input_values = [(x,) for x in test_images]
 
 
-@parameterized_class(("test_image", "url"), itertools.product(test_images, urls))
+@parameterized_class(("test_image",), input_values)
 class MediaRepoTests(unittest.HomeserverTestCase):
     servlets = [media.register_servlets]
-    test_image: ClassVar[_TestImage]
+    test_image: ClassVar[TestImage]
     hijack_auth = True
     user_id = "@test:user"
-    url: ClassVar[str]
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.fetches: List[
@@ -304,7 +299,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             "config": {"directory": self.storage_path},
         }
         config["media_storage_providers"] = [provider_config]
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
 
         hs = self.setup_test_homeserver(config=config, federation_http_client=client)
 
@@ -509,7 +503,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         params = "?width=32&height=32&method=scale"
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -537,7 +531,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -573,7 +567,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         params = "?width=32&height=32&method=" + method
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -608,7 +602,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
                 channel.json_body,
                 {
                     "errcode": "M_UNKNOWN",
-                    "error": f"Cannot find any thumbnails for the requested media ('/{self.url}/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
+                    "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
                 },
             )
         else:
@@ -618,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
                 channel.json_body,
                 {
                     "errcode": "M_NOT_FOUND",
-                    "error": f"Not found '/{self.url}/example.com/12345'",
+                    "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'",
                 },
             )
 
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index dce00d8b7f..bcca472617 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -26,7 +26,8 @@ from twisted.test.proto_helpers import MemoryReactor
 import synapse.rest.admin
 from synapse.logging.context import make_deferred_yieldable
 from synapse.push import PusherConfig, PusherConfigException
-from synapse.rest.client import login, push_rule, pusher, receipts, room
+from synapse.rest.admin.experimental_features import ExperimentalFeature
+from synapse.rest.client import login, push_rule, pusher, receipts, room, versions
 from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util import Clock
@@ -42,6 +43,7 @@ class HTTPPusherTests(HomeserverTestCase):
         receipts.register_servlets,
         push_rule.register_servlets,
         pusher.register_servlets,
+        versions.register_servlets,
     ]
     user_id = True
     hijack_auth = False
@@ -969,6 +971,84 @@ class HTTPPusherTests(HomeserverTestCase):
             lookup_result.device_id,
         )
 
+    def test_device_id_feature_flag(self) -> None:
+        """Tests that a pusher created with a given device ID shows that device ID in
+        GET /pushers requests when feature is enabled for the user
+        """
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # We create the pusher with an HTTP request rather than with
+        # _make_user_with_pusher so that we can test the device ID is correctly set when
+        # creating a pusher via an API call.
+        self.make_request(
+            method="POST",
+            path="/pushers/set",
+            content={
+                "kind": "http",
+                "app_id": "m.http",
+                "app_display_name": "HTTP Push Notifications",
+                "device_display_name": "pushy push",
+                "pushkey": "a@example.com",
+                "lang": "en",
+                "data": {"url": "http://example.com/_matrix/push/v1/notify"},
+            },
+            access_token=access_token,
+        )
+
+        # Look up the user info for the access token so we can compare the device ID.
+        store = self.hs.get_datastores().main
+        lookup_result = self.get_success(store.get_user_by_access_token(access_token))
+        assert lookup_result is not None
+
+        # Check field is not there before we enable the feature flag
+        channel = self.make_request("GET", "/pushers", access_token=access_token)
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(len(channel.json_body["pushers"]), 1)
+        self.assertNotIn(
+            "org.matrix.msc3881.device_id", channel.json_body["pushers"][0]
+        )
+
+        self.get_success(
+            store.set_features_for_user(user_id, {ExperimentalFeature.MSC3881: True})
+        )
+
+        # Get the user's devices and check it has the correct device ID.
+        channel = self.make_request("GET", "/pushers", access_token=access_token)
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(len(channel.json_body["pushers"]), 1)
+        self.assertEqual(
+            channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"],
+            lookup_result.device_id,
+        )
+
+    def test_msc3881_client_versions_flag(self) -> None:
+        """Tests that MSC3881 only appears in /versions if user has it enabled."""
+
+        user_id = self.register_user("user", "pass")
+        access_token = self.login("user", "pass")
+
+        # Check feature is disabled in /versions
+        channel = self.make_request(
+            "GET", "/_matrix/client/versions", access_token=access_token
+        )
+        self.assertEqual(channel.code, 200)
+        self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc3881"])
+
+        # Enable feature for user
+        self.get_success(
+            self.hs.get_datastores().main.set_features_for_user(
+                user_id, {ExperimentalFeature.MSC3881: True}
+            )
+        )
+
+        # Check feature is now enabled in /versions for user
+        channel = self.make_request(
+            "GET", "/_matrix/client/versions", access_token=access_token
+        )
+        self.assertEqual(channel.code, 200)
+        self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc3881"])
+
     @override_config({"push": {"jitter_delay": "10s"}})
     def test_jitter(self) -> None:
         """Tests that enabling jitter actually delays sending push."""
diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py
index 4927e45446..6fc4600c41 100644
--- a/tests/replication/test_multi_media_repo.py
+++ b/tests/replication/test_multi_media_repo.py
@@ -28,7 +28,7 @@ from twisted.web.http import HTTPChannel
 from twisted.web.server import Request
 
 from synapse.rest import admin
-from synapse.rest.client import login
+from synapse.rest.client import login, media
 from synapse.server import HomeServer
 from synapse.util import Clock
 
@@ -255,6 +255,238 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
         return sum(len(files) for _, _, files in os.walk(path))
 
 
+class AuthenticatedMediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
+    """Checks running multiple media repos work correctly using autheticated media paths"""
+
+    servlets = [
+        admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+        media.register_servlets,
+    ]
+
+    file_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.user_id = self.register_user("user", "pass")
+        self.access_token = self.login("user", "pass")
+
+        self.reactor.lookups["example.com"] = "1.2.3.4"
+
+    def default_config(self) -> dict:
+        conf = super().default_config()
+        conf["federation_custom_ca_list"] = [get_test_ca_cert_file()]
+        return conf
+
+    def make_worker_hs(
+        self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any
+    ) -> HomeServer:
+        worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs)
+        # Force the media paths onto the replication resource.
+        worker_hs.get_media_repository_resource().register_servlets(
+            self._hs_to_site[worker_hs].resource, worker_hs
+        )
+        return worker_hs
+
+    def _get_media_req(
+        self, hs: HomeServer, target: str, media_id: str
+    ) -> Tuple[FakeChannel, Request]:
+        """Request some remote media from the given HS by calling the download
+        API.
+
+        This then triggers an outbound request from the HS to the target.
+
+        Returns:
+            The channel for the *client* request and the *outbound* request for
+            the media which the caller should respond to.
+        """
+        channel = make_request(
+            self.reactor,
+            self._hs_to_site[hs],
+            "GET",
+            f"/_matrix/client/v1/media/download/{target}/{media_id}",
+            shorthand=False,
+            access_token=self.access_token,
+            await_result=False,
+        )
+        self.pump()
+
+        clients = self.reactor.tcpClients
+        self.assertGreaterEqual(len(clients), 1)
+        (host, port, client_factory, _timeout, _bindAddress) = clients.pop()
+
+        # build the test server
+        server_factory = Factory.forProtocol(HTTPChannel)
+        # Request.finish expects the factory to have a 'log' method.
+        server_factory.log = _log_request
+
+        server_tls_protocol = wrap_server_factory_for_tls(
+            server_factory, self.reactor, sanlist=[b"DNS:example.com"]
+        ).buildProtocol(None)
+
+        # now, tell the client protocol factory to build the client protocol (it will be a
+        # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an
+        # HTTP11ClientProtocol) and wire the output of said protocol up to the server via
+        # a FakeTransport.
+        #
+        # Normally this would be done by the TCP socket code in Twisted, but we are
+        # stubbing that out here.
+        client_protocol = client_factory.buildProtocol(None)
+        client_protocol.makeConnection(
+            FakeTransport(server_tls_protocol, self.reactor, client_protocol)
+        )
+
+        # tell the server tls protocol to send its stuff back to the client, too
+        server_tls_protocol.makeConnection(
+            FakeTransport(client_protocol, self.reactor, server_tls_protocol)
+        )
+
+        # fish the test server back out of the server-side TLS protocol.
+        http_server: HTTPChannel = server_tls_protocol.wrappedProtocol
+
+        # give the reactor a pump to get the TLS juices flowing.
+        self.reactor.pump((0.1,))
+
+        self.assertEqual(len(http_server.requests), 1)
+        request = http_server.requests[0]
+
+        self.assertEqual(request.method, b"GET")
+        self.assertEqual(
+            request.path,
+            f"/_matrix/federation/v1/media/download/{media_id}".encode(),
+        )
+        self.assertEqual(
+            request.requestHeaders.getRawHeaders(b"host"), [target.encode("utf-8")]
+        )
+
+        return channel, request
+
+    def test_basic(self) -> None:
+        """Test basic fetching of remote media from a single worker."""
+        hs1 = self.make_worker_hs("synapse.app.generic_worker")
+
+        channel, request = self._get_media_req(hs1, "example.com:443", "ABC123")
+
+        request.setResponseCode(200)
+        request.responseHeaders.setRawHeaders(
+            b"Content-Type",
+            ["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
+        )
+        request.write(self.file_data)
+        request.finish()
+
+        self.pump(0.1)
+
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.result["body"], b"file_to_stream")
+
+    def test_download_simple_file_race(self) -> None:
+        """Test that fetching remote media from two different processes at the
+        same time works.
+        """
+        hs1 = self.make_worker_hs("synapse.app.generic_worker")
+        hs2 = self.make_worker_hs("synapse.app.generic_worker")
+
+        start_count = self._count_remote_media()
+
+        # Make two requests without responding to the outbound media requests.
+        channel1, request1 = self._get_media_req(hs1, "example.com:443", "ABC123")
+        channel2, request2 = self._get_media_req(hs2, "example.com:443", "ABC123")
+
+        # Respond to the first outbound media request and check that the client
+        # request is successful
+        request1.setResponseCode(200)
+        request1.responseHeaders.setRawHeaders(
+            b"Content-Type",
+            ["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
+        )
+        request1.write(self.file_data)
+        request1.finish()
+
+        self.pump(0.1)
+
+        self.assertEqual(channel1.code, 200, channel1.result["body"])
+        self.assertEqual(channel1.result["body"], b"file_to_stream")
+
+        # Now respond to the second with the same content.
+        request2.setResponseCode(200)
+        request2.responseHeaders.setRawHeaders(
+            b"Content-Type",
+            ["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
+        )
+        request2.write(self.file_data)
+        request2.finish()
+
+        self.pump(0.1)
+
+        self.assertEqual(channel2.code, 200, channel2.result["body"])
+        self.assertEqual(channel2.result["body"], b"file_to_stream")
+
+        # We expect only one new file to have been persisted.
+        self.assertEqual(start_count + 1, self._count_remote_media())
+
+    def test_download_image_race(self) -> None:
+        """Test that fetching remote *images* from two different processes at
+        the same time works.
+
+        This checks that races generating thumbnails are handled correctly.
+        """
+        hs1 = self.make_worker_hs("synapse.app.generic_worker")
+        hs2 = self.make_worker_hs("synapse.app.generic_worker")
+
+        start_count = self._count_remote_thumbnails()
+
+        channel1, request1 = self._get_media_req(hs1, "example.com:443", "PIC1")
+        channel2, request2 = self._get_media_req(hs2, "example.com:443", "PIC1")
+
+        request1.setResponseCode(200)
+        request1.responseHeaders.setRawHeaders(
+            b"Content-Type",
+            ["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
+        )
+        img_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: image/png\r\nContent-Disposition: inline; filename=test_img\r\n\r\n"
+        request1.write(img_data)
+        request1.write(SMALL_PNG)
+        request1.write(b"\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n")
+        request1.finish()
+
+        self.pump(0.1)
+
+        self.assertEqual(channel1.code, 200, channel1.result["body"])
+        self.assertEqual(channel1.result["body"], SMALL_PNG)
+
+        request2.setResponseCode(200)
+        request2.responseHeaders.setRawHeaders(
+            b"Content-Type",
+            ["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
+        )
+        request2.write(img_data)
+        request2.write(SMALL_PNG)
+        request2.write(b"\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n")
+        request2.finish()
+
+        self.pump(0.1)
+
+        self.assertEqual(channel2.code, 200, channel2.result["body"])
+        self.assertEqual(channel2.result["body"], SMALL_PNG)
+
+        # We expect only three new thumbnails to have been persisted.
+        self.assertEqual(start_count + 3, self._count_remote_thumbnails())
+
+    def _count_remote_media(self) -> int:
+        """Count the number of files in our remote media directory."""
+        path = os.path.join(
+            self.hs.get_media_repository().primary_base_path, "remote_content"
+        )
+        return sum(len(files) for _, _, files in os.walk(path))
+
+    def _count_remote_thumbnails(self) -> int:
+        """Count the number of files in our remote thumbnails directory."""
+        path = os.path.join(
+            self.hs.get_media_repository().primary_base_path, "remote_thumbnail"
+        )
+        return sum(len(files) for _, _, files in os.walk(path))
+
+
 def _log_request(request: Request) -> None:
     """Implements Factory.log, which is expected by Request.finish"""
     logger.info("Completed request %s", request)
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 5f6f7213b3..6351326fff 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -384,7 +384,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
             "PUT",
             url,
             content={
-                "features": {"msc3026": True, "msc3881": True},
+                "features": {"msc3881": True},
             },
             access_token=self.admin_user_tok,
         )
@@ -401,10 +401,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(
             True,
-            channel.json_body["features"]["msc3026"],
-        )
-        self.assertEqual(
-            True,
             channel.json_body["features"]["msc3881"],
         )
 
@@ -413,7 +409,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
         channel = self.make_request(
             "PUT",
             url,
-            content={"features": {"msc3026": False}},
+            content={"features": {"msc3881": False}},
             access_token=self.admin_user_tok,
         )
         self.assertEqual(channel.code, 200)
@@ -429,10 +425,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(
             False,
-            channel.json_body["features"]["msc3026"],
-        )
-        self.assertEqual(
-            True,
             channel.json_body["features"]["msc3881"],
         )
 
@@ -441,7 +433,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
         channel = self.make_request(
             "PUT",
             url,
-            content={"features": {"msc3026": False}},
+            content={"features": {"msc3881": False}},
             access_token=self.admin_user_tok,
         )
         self.assertEqual(channel.code, 200)
diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index be4a289ec1..7f2caed7d5 100644
--- a/tests/rest/client/test_media.py
+++ b/tests/rest/client/test_media.py
@@ -19,31 +19,57 @@
 #
 #
 import base64
+import io
 import json
 import os
 import re
-from typing import Any, Dict, Optional, Sequence, Tuple, Type
+import shutil
+from typing import Any, BinaryIO, Dict, List, Optional, Sequence, Tuple, Type
+from unittest.mock import MagicMock, Mock, patch
+from urllib import parse
 from urllib.parse import quote, urlencode
 
+from parameterized import parameterized, parameterized_class
+from PIL import Image as Image
+from typing_extensions import ClassVar
+
+from twisted.internet import defer
 from twisted.internet._resolver import HostResolution
 from twisted.internet.address import IPv4Address, IPv6Address
+from twisted.internet.defer import Deferred
 from twisted.internet.error import DNSLookupError
 from twisted.internet.interfaces import IAddress, IResolutionReceiver
+from twisted.python.failure import Failure
 from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
-from twisted.web.resource import Resource
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import UNKNOWN_LENGTH, IResponse
 
+from synapse.api.errors import HttpResponseException
+from synapse.api.ratelimiting import Ratelimiter
 from synapse.config.oembed import OEmbedEndpointConfig
-from synapse.media._base import FileInfo
+from synapse.http.client import MultipartResponse
+from synapse.http.types import QueryParams
+from synapse.logging.context import make_deferred_yieldable
+from synapse.media._base import FileInfo, ThumbnailInfo
+from synapse.media.thumbnailer import ThumbnailProvider
 from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS
 from synapse.rest import admin
 from synapse.rest.client import login, media
 from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
 from synapse.util import Clock
 from synapse.util.stringutils import parse_and_validate_mxc_uri
 
 from tests import unittest
-from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.media.test_media_storage import (
+    SVG,
+    TestImage,
+    empty_file,
+    small_lossless_webp,
+    small_png,
+    small_png_with_transparency,
+)
+from tests.server import FakeChannel, FakeTransport, ThreadedMemoryReactorClock
 from tests.test_utils import SMALL_PNG
 from tests.unittest import override_config
 
@@ -53,7 +79,7 @@ except ImportError:
     lxml = None  # type: ignore[assignment]
 
 
-class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
+class MediaDomainBlockingTests(unittest.HomeserverTestCase):
     remote_media_id = "doesnotmatter"
     remote_server_name = "evil.com"
     servlets = [
@@ -121,7 +147,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
             # Should result in a 404.
             "prevent_media_downloads_from": ["evil.com"],
             "dynamic_thumbnails": True,
-            "experimental_features": {"msc3916_authenticated_media_enabled": True},
         }
     )
     def test_cannot_download_blocked_media_thumbnail(self) -> None:
@@ -130,7 +155,7 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
         """
         response = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+            f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
             shorthand=False,
             content={"width": 100, "height": 100},
             access_token=self.tok,
@@ -143,7 +168,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
             # This proves we haven't broken anything.
             "prevent_media_downloads_from": ["not-listed.com"],
             "dynamic_thumbnails": True,
-            "experimental_features": {"msc3916_authenticated_media_enabled": True},
         }
     )
     def test_remote_media_thumbnail_normally_unblocked(self) -> None:
@@ -152,14 +176,14 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
         """
         response = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+            f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
             shorthand=False,
             access_token=self.tok,
         )
         self.assertEqual(response.code, 200)
 
 
-class UnstableURLPreviewTests(unittest.HomeserverTestCase):
+class URLPreviewTests(unittest.HomeserverTestCase):
     if not lxml:
         skip = "url preview feature requires lxml"
 
@@ -175,7 +199,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         config = self.default_config()
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
         config["url_preview_enabled"] = True
         config["max_spider_size"] = 9999999
         config["url_preview_ip_range_blacklist"] = (
@@ -261,18 +284,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         self.reactor.nameResolver = Resolver()  # type: ignore[assignment]
 
-    def create_resource_dict(self) -> Dict[str, Resource]:
-        """Create a resource tree for the test server
-
-        A resource tree is a mapping from path to twisted.web.resource.
-
-        The default implementation creates a JsonResource and calls each function in
-        `servlets` to register servlets against it.
-        """
-        resources = super().create_resource_dict()
-        resources["/_matrix/media"] = self.hs.get_media_repository_resource()
-        return resources
-
     def _assert_small_png(self, json_body: JsonDict) -> None:
         """Assert properties from the SMALL_PNG test image."""
         self.assertTrue(json_body["og:image"].startswith("mxc://"))
@@ -286,7 +297,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -311,7 +322,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check the cache returns the correct response
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
         )
 
@@ -329,7 +340,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check the database cache returns the correct response
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
         )
 
@@ -352,7 +363,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -382,7 +393,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -418,7 +429,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -459,7 +470,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -494,7 +505,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -527,7 +538,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -557,7 +568,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -580,7 +591,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -599,7 +610,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://192.168.1.1",
+            "/_matrix/client/v1/media/preview_url?url=http://192.168.1.1",
             shorthand=False,
         )
 
@@ -617,7 +628,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://1.1.1.2",
+            "/_matrix/client/v1/media/preview_url?url=http://1.1.1.2",
             shorthand=False,
         )
 
@@ -636,7 +647,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -673,7 +684,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
         self.assertEqual(channel.code, 502)
@@ -695,7 +706,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -718,7 +729,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -737,7 +748,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "OPTIONS",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
         self.assertEqual(channel.code, 204)
@@ -751,7 +762,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Build and make a request to the server
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -804,7 +815,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -854,7 +865,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -896,7 +907,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -936,7 +947,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -977,7 +988,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?{query_params}",
+            f"/_matrix/client/v1/media/preview_url?{query_params}",
             shorthand=False,
         )
         self.pump()
@@ -998,7 +1009,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -1035,7 +1046,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1095,7 +1106,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1144,7 +1155,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.hulu.com/watch/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.hulu.com/watch/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1189,7 +1200,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1218,7 +1229,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1310,7 +1321,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1351,7 +1362,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://cdn.twitter.com/matrixdotorg",
+            "/_matrix/client/v1/media/preview_url?url=http://cdn.twitter.com/matrixdotorg",
             shorthand=False,
             await_result=False,
         )
@@ -1393,7 +1404,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check fetching
         channel = self.make_request(
             "GET",
-            f"/_matrix/media/v3/download/{host}/{media_id}",
+            f"/_matrix/client/v1/media/download/{host}/{media_id}",
             shorthand=False,
             await_result=False,
         )
@@ -1406,7 +1417,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/media/v3/download/{host}/{media_id}",
+            f"/_matrix/client/v1/download/{host}/{media_id}",
             shorthand=False,
             await_result=False,
         )
@@ -1441,7 +1452,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check fetching
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+            f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
             shorthand=False,
             await_result=False,
         )
@@ -1459,7 +1470,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+            f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
             shorthand=False,
             await_result=False,
         )
@@ -1509,8 +1520,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + bad_url,
+            "/_matrix/client/v1/media/preview_url?url=" + bad_url,
             shorthand=False,
             await_result=False,
         )
@@ -1519,8 +1529,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + good_url,
+            "/_matrix/client/v1/media/preview_url?url=" + good_url,
             shorthand=False,
             await_result=False,
         )
@@ -1552,8 +1561,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + bad_url,
+            "/_matrix/client/v1/media/preview_url?url=" + bad_url,
             shorthand=False,
             await_result=False,
         )
@@ -1561,7 +1569,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 403, channel.result)
 
 
-class UnstableMediaConfigTest(unittest.HomeserverTestCase):
+class MediaConfigTest(unittest.HomeserverTestCase):
     servlets = [
         media.register_servlets,
         admin.register_servlets,
@@ -1572,7 +1580,6 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
         self, reactor: ThreadedMemoryReactorClock, clock: Clock
     ) -> HomeServer:
         config = self.default_config()
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
 
         self.storage_path = self.mktemp()
         self.media_store_path = self.mktemp()
@@ -1599,7 +1606,7 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
     def test_media_config(self) -> None:
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/config",
+            "/_matrix/client/v1/media/config",
             shorthand=False,
             access_token=self.tok,
         )
@@ -1607,3 +1614,815 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
         self.assertEqual(
             channel.json_body["m.upload.size"], self.hs.config.media.max_upload_size
         )
+
+
+class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        media.register_servlets,
+        login.register_servlets,
+        admin.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+        config = self.default_config()
+
+        self.storage_path = self.mktemp()
+        self.media_store_path = self.mktemp()
+        os.mkdir(self.storage_path)
+        os.mkdir(self.media_store_path)
+        config["media_store_path"] = self.media_store_path
+
+        provider_config = {
+            "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+
+        config["media_storage_providers"] = [provider_config]
+
+        return self.setup_test_homeserver(config=config)
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.repo = hs.get_media_repository()
+        self.client = hs.get_federation_http_client()
+        self.store = hs.get_datastores().main
+        self.user = self.register_user("user", "pass")
+        self.tok = self.login("user", "pass")
+
+    # mock actually reading file body
+    def read_multipart_response_30MiB(*args: Any, **kwargs: Any) -> Deferred:
+        d: Deferred = defer.Deferred()
+        d.callback(MultipartResponse(b"{}", 31457280, b"img/png", None))
+        return d
+
+    def read_multipart_response_50MiB(*args: Any, **kwargs: Any) -> Deferred:
+        d: Deferred = defer.Deferred()
+        d.callback(MultipartResponse(b"{}", 31457280, b"img/png", None))
+        return d
+
+    @patch(
+        "synapse.http.matrixfederationclient.read_multipart_response",
+        read_multipart_response_30MiB,
+    )
+    def test_download_ratelimit_default(self) -> None:
+        """
+        Test remote media download ratelimiting against default configuration - 500MB bucket
+        and 87kb/second drain rate
+        """
+
+        # mock out actually sending the request, returns a 30MiB response
+        async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+            resp = MagicMock(spec=IResponse)
+            resp.code = 200
+            resp.length = 31457280
+            resp.headers = Headers(
+                {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+            )
+            resp.phrase = b"OK"
+            return resp
+
+        self.client._send_request = _send_request  # type: ignore
+
+        # first request should go through
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abc",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 200
+
+        # next 15 should go through
+        for i in range(15):
+            channel2 = self.make_request(
+                "GET",
+                f"/_matrix/client/v1/media/download/remote.org/abc{i}",
+                shorthand=False,
+                access_token=self.tok,
+            )
+            assert channel2.code == 200
+
+        # 17th will hit ratelimit
+        channel3 = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcd",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel3.code == 429
+
+        # however, a request from a different IP will go through
+        channel4 = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcde",
+            shorthand=False,
+            client_ip="187.233.230.159",
+            access_token=self.tok,
+        )
+        assert channel4.code == 200
+
+        # at 87Kib/s it should take about 2 minutes for enough to drain from bucket that another
+        # 30MiB download is authorized - The last download was blocked at 503,316,480.
+        # The next download will be authorized when bucket hits 492,830,720
+        # (524,288,000 total capacity - 31,457,280 download size) so 503,316,480 - 492,830,720 ~= 10,485,760
+        # needs to drain before another download will be authorized, that will take ~=
+        # 2 minutes (10,485,760/89,088/60)
+        self.reactor.pump([2.0 * 60.0])
+
+        # enough has drained and next request goes through
+        channel5 = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcdef",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel5.code == 200
+
+    @override_config(
+        {
+            "remote_media_download_per_second": "50M",
+            "remote_media_download_burst_count": "50M",
+        }
+    )
+    @patch(
+        "synapse.http.matrixfederationclient.read_multipart_response",
+        read_multipart_response_50MiB,
+    )
+    def test_download_rate_limit_config(self) -> None:
+        """
+        Test that download rate limit config options are correctly picked up and applied
+        """
+
+        async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+            resp = MagicMock(spec=IResponse)
+            resp.code = 200
+            resp.length = 52428800
+            resp.headers = Headers(
+                {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+            )
+            resp.phrase = b"OK"
+            return resp
+
+        self.client._send_request = _send_request  # type: ignore
+
+        # first request should go through
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abc",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 200
+
+        # immediate second request should fail
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcd",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 429
+
+        # advance half a second
+        self.reactor.pump([0.5])
+
+        # request still fails
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcde",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 429
+
+        # advance another half second
+        self.reactor.pump([0.5])
+
+        # enough has drained from bucket and request is successful
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcdef",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 200
+
+    @patch(
+        "synapse.http.matrixfederationclient.read_multipart_response",
+        read_multipart_response_30MiB,
+    )
+    def test_download_ratelimit_max_size_sub(self) -> None:
+        """
+        Test that if no content-length is provided, the default max size is applied instead
+        """
+
+        # mock out actually sending the request
+        async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+            resp = MagicMock(spec=IResponse)
+            resp.code = 200
+            resp.length = UNKNOWN_LENGTH
+            resp.headers = Headers(
+                {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+            )
+            resp.phrase = b"OK"
+            return resp
+
+        self.client._send_request = _send_request  # type: ignore
+
+        # ten requests should go through using the max size (500MB/50MB)
+        for i in range(10):
+            channel2 = self.make_request(
+                "GET",
+                f"/_matrix/client/v1/media/download/remote.org/abc{i}",
+                shorthand=False,
+                access_token=self.tok,
+            )
+            assert channel2.code == 200
+
+        # eleventh will hit ratelimit
+        channel3 = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcd",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel3.code == 429
+
+    def test_file_download(self) -> None:
+        content = io.BytesIO(b"file_to_stream")
+        content_uri = self.get_success(
+            self.repo.create_content(
+                "text/plain",
+                "test_upload",
+                content,
+                46,
+                UserID.from_string("@user_id:whatever.org"),
+            )
+        )
+        # test with a text file
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/download/test/{content_uri.media_id}",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.pump()
+        self.assertEqual(200, channel.code)
+
+
+test_images = [
+    small_png,
+    small_png_with_transparency,
+    small_lossless_webp,
+    empty_file,
+    SVG,
+]
+input_values = [(x,) for x in test_images]
+
+
+@parameterized_class(("test_image",), input_values)
+class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase):
+    test_image: ClassVar[TestImage]
+    servlets = [
+        media.register_servlets,
+        login.register_servlets,
+        admin.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+        self.fetches: List[
+            Tuple[
+                "Deferred[Any]",
+                str,
+                str,
+                Optional[QueryParams],
+            ]
+        ] = []
+
+        def federation_get_file(
+            destination: str,
+            path: str,
+            output_stream: BinaryIO,
+            download_ratelimiter: Ratelimiter,
+            ip_address: Any,
+            max_size: int,
+            args: Optional[QueryParams] = None,
+            retry_on_dns_fail: bool = True,
+            ignore_backoff: bool = False,
+            follow_redirects: bool = False,
+        ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]], bytes]]":
+            """A mock for MatrixFederationHttpClient.federation_get_file."""
+
+            def write_to(
+                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]
+            ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
+                data, response = r
+                output_stream.write(data)
+                return response
+
+            def write_err(f: Failure) -> Failure:
+                f.trap(HttpResponseException)
+                output_stream.write(f.value.response)
+                return f
+
+            d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]] = (
+                Deferred()
+            )
+            self.fetches.append((d, destination, path, args))
+            # Note that this callback changes the value held by d.
+            d_after_callback = d.addCallbacks(write_to, write_err)
+            return make_deferred_yieldable(d_after_callback)
+
+        def get_file(
+            destination: str,
+            path: str,
+            output_stream: BinaryIO,
+            download_ratelimiter: Ratelimiter,
+            ip_address: Any,
+            max_size: int,
+            args: Optional[QueryParams] = None,
+            retry_on_dns_fail: bool = True,
+            ignore_backoff: bool = False,
+            follow_redirects: bool = False,
+        ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]":
+            """A mock for MatrixFederationHttpClient.get_file."""
+
+            def write_to(
+                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]
+            ) -> Tuple[int, Dict[bytes, List[bytes]]]:
+                data, response = r
+                output_stream.write(data)
+                return response
+
+            def write_err(f: Failure) -> Failure:
+                f.trap(HttpResponseException)
+                output_stream.write(f.value.response)
+                return f
+
+            d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred()
+            self.fetches.append((d, destination, path, args))
+            # Note that this callback changes the value held by d.
+            d_after_callback = d.addCallbacks(write_to, write_err)
+            return make_deferred_yieldable(d_after_callback)
+
+        # Mock out the homeserver's MatrixFederationHttpClient
+        client = Mock()
+        client.federation_get_file = federation_get_file
+        client.get_file = get_file
+
+        self.storage_path = self.mktemp()
+        self.media_store_path = self.mktemp()
+        os.mkdir(self.storage_path)
+        os.mkdir(self.media_store_path)
+
+        config = self.default_config()
+        config["media_store_path"] = self.media_store_path
+        config["max_image_pixels"] = 2000000
+
+        provider_config = {
+            "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+        config["media_storage_providers"] = [provider_config]
+
+        hs = self.setup_test_homeserver(config=config, federation_http_client=client)
+
+        return hs
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.media_repo = hs.get_media_repository()
+
+        self.remote = "example.com"
+        self.media_id = "12345"
+
+        self.user = self.register_user("user", "pass")
+        self.tok = self.login("user", "pass")
+
+    def _req(
+        self, content_disposition: Optional[bytes], include_content_type: bool = True
+    ) -> FakeChannel:
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/download/{self.remote}/{self.media_id}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        # We've made one fetch, to example.com, using the federation media URL
+        self.assertEqual(len(self.fetches), 1)
+        self.assertEqual(self.fetches[0][1], "example.com")
+        self.assertEqual(
+            self.fetches[0][2], "/_matrix/federation/v1/media/download/" + self.media_id
+        )
+        self.assertEqual(
+            self.fetches[0][3],
+            {"timeout_ms": "20000"},
+        )
+
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.test_image.data))],
+        }
+
+        if include_content_type:
+            headers[b"Content-Type"] = [self.test_image.content_type]
+
+        if content_disposition:
+            headers[b"Content-Disposition"] = [content_disposition]
+
+        self.fetches[0][0].callback(
+            (self.test_image.data, (len(self.test_image.data), headers, b"{}"))
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+
+        return channel
+
+    def test_handle_missing_content_type(self) -> None:
+        channel = self._req(
+            b"attachment; filename=out" + self.test_image.extension,
+            include_content_type=False,
+        )
+        headers = channel.headers
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"]
+        )
+
+    def test_disposition_filename_ascii(self) -> None:
+        """
+        If the filename is filename=<ascii> then Synapse will decode it as an
+        ASCII string, and use filename= in the response.
+        """
+        channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+        headers = channel.headers
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+        )
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Disposition"),
+            [
+                (b"inline" if self.test_image.is_inline else b"attachment")
+                + b"; filename=out"
+                + self.test_image.extension
+            ],
+        )
+
+    def test_disposition_filenamestar_utf8escaped(self) -> None:
+        """
+        If the filename is filename=*utf8''<utf8 escaped> then Synapse will
+        correctly decode it as the UTF-8 string, and use filename* in the
+        response.
+        """
+        filename = parse.quote("\u2603".encode()).encode("ascii")
+        channel = self._req(
+            b"attachment; filename*=utf-8''" + filename + self.test_image.extension
+        )
+
+        headers = channel.headers
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+        )
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Disposition"),
+            [
+                (b"inline" if self.test_image.is_inline else b"attachment")
+                + b"; filename*=utf-8''"
+                + filename
+                + self.test_image.extension
+            ],
+        )
+
+    def test_disposition_none(self) -> None:
+        """
+        If there is no filename, Content-Disposition should only
+        be a disposition type.
+        """
+        channel = self._req(None)
+
+        headers = channel.headers
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+        )
+        self.assertEqual(
+            headers.getRawHeaders(b"Content-Disposition"),
+            [b"inline" if self.test_image.is_inline else b"attachment"],
+        )
+
+    def test_x_robots_tag_header(self) -> None:
+        """
+        Tests that the `X-Robots-Tag` header is present, which informs web crawlers
+        to not index, archive, or follow links in media.
+        """
+        channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+        headers = channel.headers
+        self.assertEqual(
+            headers.getRawHeaders(b"X-Robots-Tag"),
+            [b"noindex, nofollow, noarchive, noimageindex"],
+        )
+
+    def test_cross_origin_resource_policy_header(self) -> None:
+        """
+        Test that the Cross-Origin-Resource-Policy header is set to "cross-origin"
+        allowing web clients to embed media from the downloads API.
+        """
+        channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+        headers = channel.headers
+
+        self.assertEqual(
+            headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
+            [b"cross-origin"],
+        )
+
+    def test_unknown_federation_endpoint(self) -> None:
+        """
+        Test that if the download request to remote federation endpoint returns a 404
+        we fall back to the _matrix/media endpoint
+        """
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/download/{self.remote}/{self.media_id}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        # We've made one fetch, to example.com, using the media URL, and asking
+        # the other server not to do a remote fetch
+        self.assertEqual(len(self.fetches), 1)
+        self.assertEqual(self.fetches[0][1], "example.com")
+        self.assertEqual(
+            self.fetches[0][2], f"/_matrix/federation/v1/media/download/{self.media_id}"
+        )
+
+        # The result which says the endpoint is unknown.
+        unknown_endpoint = b'{"errcode":"M_UNRECOGNIZED","error":"Unknown request"}'
+        self.fetches[0][0].errback(
+            HttpResponseException(404, "NOT FOUND", unknown_endpoint)
+        )
+
+        self.pump()
+
+        # There should now be another request to the _matrix/media/v3/download URL.
+        self.assertEqual(len(self.fetches), 2)
+        self.assertEqual(self.fetches[1][1], "example.com")
+        self.assertEqual(
+            self.fetches[1][2],
+            f"/_matrix/media/v3/download/example.com/{self.media_id}",
+        )
+
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.test_image.data))],
+        }
+
+        self.fetches[1][0].callback(
+            (self.test_image.data, (len(self.test_image.data), headers))
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+
+    def test_thumbnail_crop(self) -> None:
+        """Test that a cropped remote thumbnail is available."""
+        self._test_thumbnail(
+            "crop",
+            self.test_image.expected_cropped,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_thumbnail_scale(self) -> None:
+        """Test that a scaled remote thumbnail is available."""
+        self._test_thumbnail(
+            "scale",
+            self.test_image.expected_scaled,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_invalid_type(self) -> None:
+        """An invalid thumbnail type is never available."""
+        self._test_thumbnail(
+            "invalid",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    @unittest.override_config(
+        {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]}
+    )
+    def test_no_thumbnail_crop(self) -> None:
+        """
+        Override the config to generate only scaled thumbnails, but request a cropped one.
+        """
+        self._test_thumbnail(
+            "crop",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    @unittest.override_config(
+        {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]}
+    )
+    def test_no_thumbnail_scale(self) -> None:
+        """
+        Override the config to generate only cropped thumbnails, but request a scaled one.
+        """
+        self._test_thumbnail(
+            "scale",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_thumbnail_repeated_thumbnail(self) -> None:
+        """Test that fetching the same thumbnail works, and deleting the on disk
+        thumbnail regenerates it.
+        """
+        self._test_thumbnail(
+            "scale",
+            self.test_image.expected_scaled,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+        if not self.test_image.expected_found:
+            return
+
+        # Fetching again should work, without re-requesting the image from the
+        # remote.
+        params = "?width=32&height=32&method=scale"
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 200)
+        if self.test_image.expected_scaled:
+            self.assertEqual(
+                channel.result["body"],
+                self.test_image.expected_scaled,
+                channel.result["body"],
+            )
+
+        # Deleting the thumbnail on disk then re-requesting it should work as
+        # Synapse should regenerate missing thumbnails.
+        info = self.get_success(
+            self.store.get_cached_remote_media(self.remote, self.media_id)
+        )
+        assert info is not None
+        file_id = info.filesystem_id
+
+        thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(
+            self.remote, file_id
+        )
+        shutil.rmtree(thumbnail_dir, ignore_errors=True)
+
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 200)
+        if self.test_image.expected_scaled:
+            self.assertEqual(
+                channel.result["body"],
+                self.test_image.expected_scaled,
+                channel.result["body"],
+            )
+
+    def _test_thumbnail(
+        self,
+        method: str,
+        expected_body: Optional[bytes],
+        expected_found: bool,
+        unable_to_thumbnail: bool = False,
+    ) -> None:
+        """Test the given thumbnailing method works as expected.
+
+        Args:
+            method: The thumbnailing method to use (crop, scale).
+            expected_body: The expected bytes from thumbnailing, or None if
+                test should just check for a valid image.
+            expected_found: True if the file should exist on the server, or False if
+                a 404/400 is expected.
+            unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or
+                False if the thumbnailing should succeed or a normal 404 is expected.
+        """
+
+        params = "?width=32&height=32&method=" + method
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.test_image.data))],
+            b"Content-Type": [self.test_image.content_type],
+        }
+        self.fetches[0][0].callback(
+            (self.test_image.data, (len(self.test_image.data), headers))
+        )
+        self.pump()
+        if expected_found:
+            self.assertEqual(channel.code, 200)
+
+            self.assertEqual(
+                channel.headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
+                [b"cross-origin"],
+            )
+
+            if expected_body is not None:
+                self.assertEqual(
+                    channel.result["body"], expected_body, channel.result["body"]
+                )
+            else:
+                # ensure that the result is at least some valid image
+                Image.open(io.BytesIO(channel.result["body"]))
+        elif unable_to_thumbnail:
+            # A 400 with a JSON body.
+            self.assertEqual(channel.code, 400)
+            self.assertEqual(
+                channel.json_body,
+                {
+                    "errcode": "M_UNKNOWN",
+                    "error": "Cannot find any thumbnails for the requested media ('/_matrix/client/v1/media/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
+                },
+            )
+        else:
+            # A 404 with a JSON body.
+            self.assertEqual(channel.code, 404)
+            self.assertEqual(
+                channel.json_body,
+                {
+                    "errcode": "M_NOT_FOUND",
+                    "error": "Not found '/_matrix/client/v1/media/thumbnail/example.com/12345'",
+                },
+            )
+
+    @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)])
+    def test_same_quality(self, method: str, desired_size: int) -> None:
+        """Test that choosing between thumbnails with the same quality rating succeeds.
+
+        We are not particular about which thumbnail is chosen."""
+
+        content_type = self.test_image.content_type.decode()
+        media_repo = self.hs.get_media_repository()
+        thumbnail_provider = ThumbnailProvider(
+            self.hs, media_repo, media_repo.media_storage
+        )
+
+        self.assertIsNotNone(
+            thumbnail_provider._select_thumbnail(
+                desired_width=desired_size,
+                desired_height=desired_size,
+                desired_method=method,
+                desired_type=content_type,
+                # Provide two identical thumbnails which are guaranteed to have the same
+                # quality rating.
+                thumbnail_infos=[
+                    ThumbnailInfo(
+                        width=32,
+                        height=32,
+                        method=method,
+                        type=content_type,
+                        length=256,
+                    ),
+                    ThumbnailInfo(
+                        width=32,
+                        height=32,
+                        method=method,
+                        type=content_type,
+                        length=256,
+                    ),
+                ],
+                file_id=f"image{self.test_image.extension.decode()}",
+                url_cache=False,
+                server_name=None,
+            )
+        )
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 12c11f342c..cb2888409e 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -20,7 +20,7 @@
 #
 import json
 import logging
-from typing import Dict, List
+from typing import AbstractSet, Any, Dict, Iterable, List, Optional
 
 from parameterized import parameterized, parameterized_class
 
@@ -31,12 +31,16 @@ from synapse.api.constants import (
     AccountDataTypes,
     EventContentFields,
     EventTypes,
+    HistoryVisibility,
+    Membership,
     ReceiptTypes,
     RelationTypes,
 )
+from synapse.events import EventBase
+from synapse.handlers.sliding_sync import StateValues
 from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync
 from synapse.server import HomeServer
-from synapse.types import JsonDict, RoomStreamToken, StreamKeyType
+from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID
 from synapse.util import Clock
 
 from tests import unittest
@@ -44,6 +48,7 @@ from tests.federation.transport.test_knocking import (
     KnockingStrippedStateEventHelperMixin,
 )
 from tests.server import TimedOutException
+from tests.test_utils.event_injection import mark_event_as_partial_state
 
 logger = logging.getLogger(__name__)
 
@@ -1236,6 +1241,94 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
         )
         self.store = hs.get_datastores().main
         self.event_sources = hs.get_event_sources()
+        self.storage_controllers = hs.get_storage_controllers()
+
+    def _assertRequiredStateIncludes(
+        self,
+        actual_required_state: Any,
+        expected_state_events: Iterable[EventBase],
+        exact: bool = False,
+    ) -> None:
+        """
+        Wrapper around `_assertIncludes` to give slightly better looking diff error
+        messages that include some context "$event_id (type, state_key)".
+
+        Args:
+            actual_required_state: The "required_state" of a room from a Sliding Sync
+                request response.
+            expected_state_events: The expected state events to be included in the
+                `actual_required_state`.
+            exact: Whether the actual state should be exactly equal to the expected
+                state (no extras).
+        """
+
+        assert isinstance(actual_required_state, list)
+        for event in actual_required_state:
+            assert isinstance(event, dict)
+
+        self._assertIncludes(
+            {
+                f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")'
+                for event in actual_required_state
+            },
+            {
+                f'{event.event_id} ("{event.type}", "{event.state_key}")'
+                for event in expected_state_events
+            },
+            exact=exact,
+            # Message to help understand the diff in context
+            message=str(actual_required_state),
+        )
+
+    def _assertIncludes(
+        self,
+        actual_items: AbstractSet[str],
+        expected_items: AbstractSet[str],
+        exact: bool = False,
+        message: Optional[str] = None,
+    ) -> None:
+        """
+        Assert that all of the `expected_items` are included in the `actual_items`.
+
+        This assert could also be called `assertContains`, `assertItemsInSet`
+
+        Args:
+            actual_items: The container
+            expected_items: The items to check for in the container
+            exact: Whether the actual state should be exactly equal to the expected
+                state (no extras).
+            message: Optional message to include in the failure message.
+        """
+        # Check that each set has the same items
+        if exact and actual_items == expected_items:
+            return
+        # Check for a superset
+        elif not exact and actual_items >= expected_items:
+            return
+
+        expected_lines: List[str] = []
+        for expected_item in expected_items:
+            is_expected_in_actual = expected_item in actual_items
+            expected_lines.append(
+                "{}  {}".format(" " if is_expected_in_actual else "?", expected_item)
+            )
+
+        actual_lines: List[str] = []
+        for actual_item in actual_items:
+            is_actual_in_expected = actual_item in expected_items
+            actual_lines.append(
+                "{}  {}".format("+" if is_actual_in_expected else " ", actual_item)
+            )
+
+        newline = "\n"
+        expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}"
+        actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}"
+        first_message = (
+            "Items must match exactly" if exact else "Some expected items are missing."
+        )
+        diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
+
+        self.fail(f"{diff_message}\n{message}")
 
     def _add_new_dm_to_global_account_data(
         self, source_user_id: str, target_user_id: str, target_room_id: str
@@ -1326,7 +1419,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
 
     def test_sync_list(self) -> None:
         """
-        Test that room IDs show up in the Sliding Sync lists
+        Test that room IDs show up in the Sliding Sync `lists`
         """
         alice_user_id = self.register_user("alice", "correcthorse")
         alice_access_token = self.login(alice_user_id, "correcthorse")
@@ -1425,15 +1518,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
         channel.await_result(timeout_ms=200)
         self.assertEqual(channel.code, 200, channel.json_body)
 
-        # We expect the `next_pos` in the result to be the same as what we requested
+        # We expect the next `pos` in the result to be the same as what we requested
         # with because we weren't able to find anything new yet.
-        self.assertEqual(
-            channel.json_body["next_pos"], future_position_token_serialized
-        )
+        self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
 
     def test_filter_list(self) -> None:
         """
-        Test that filters apply to lists
+        Test that filters apply to `lists`
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1564,7 +1655,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
 
     def test_sort_list(self) -> None:
         """
-        Test that the lists are sorted by `stream_ordering`
+        Test that the `lists` are sorted by `stream_ordering`
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1618,3 +1709,1686 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
             ],
             channel.json_body["lists"]["foo-list"],
         )
+
+    def test_sliced_windows(self) -> None:
+        """
+        Test that the `lists` `ranges` are sliced correctly. Both sides of each range
+        are inclusive.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+        # Make the Sliding Sync request for a single room
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 0]],
+                        "required_state": [
+                            ["m.room.join_rules", ""],
+                            ["m.room.history_visibility", ""],
+                            ["m.space.child", "*"],
+                        ],
+                        "timeline_limit": 1,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(channel.json_body["lists"].keys()),
+            ["foo-list"],
+            channel.json_body["lists"].keys(),
+        )
+        # Make sure the list is sorted in the way we expect
+        self.assertListEqual(
+            list(channel.json_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 0],
+                    "room_ids": [room_id3],
+                }
+            ],
+            channel.json_body["lists"]["foo-list"],
+        )
+
+        # Make the Sliding Sync request for the first two rooms
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            ["m.room.join_rules", ""],
+                            ["m.room.history_visibility", ""],
+                            ["m.space.child", "*"],
+                        ],
+                        "timeline_limit": 1,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(channel.json_body["lists"].keys()),
+            ["foo-list"],
+            channel.json_body["lists"].keys(),
+        )
+        # Make sure the list is sorted in the way we expect
+        self.assertListEqual(
+            list(channel.json_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 1],
+                    "room_ids": [room_id3, room_id2],
+                }
+            ],
+            channel.json_body["lists"]["foo-list"],
+        )
+
+    def test_rooms_limited_initial_sync(self) -> None:
+        """
+        Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
+        on initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity1", tok=user2_tok)
+        self.helper.send(room_id1, "activity2", tok=user2_tok)
+        event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
+        event_pos3 = self.get_success(
+            self.store.get_position_for_event(event_response3["event_id"])
+        )
+        event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
+        event_pos4 = self.get_success(
+            self.store.get_position_for_event(event_response4["event_id"])
+        )
+        event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            True,
+            channel.json_body["rooms"][room_id1],
+        )
+        # Check to make sure the latest events are returned
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                event_response4["event_id"],
+                event_response5["event_id"],
+                user1_join_response["event_id"],
+            ],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+
+        # Check to make sure the `prev_batch` points at the right place
+        prev_batch_token = self.get_success(
+            StreamToken.from_string(
+                self.store, channel.json_body["rooms"][room_id1]["prev_batch"]
+            )
+        )
+        prev_batch_room_stream_token_serialized = self.get_success(
+            prev_batch_token.room_key.to_string(self.store)
+        )
+        # If we use the `prev_batch` token to look backwards, we should see `event3`
+        # next so make sure the token encompasses it
+        self.assertEqual(
+            event_pos3.persisted_after(prev_batch_token.room_key),
+            False,
+            f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}",
+        )
+        # If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
+        # anymore since it was just returned in this response.
+        self.assertEqual(
+            event_pos4.persisted_after(prev_batch_token.room_key),
+            True,
+            f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}",
+        )
+
+        # With no `from_token` (initial sync), it's all historical since there is no
+        # "live" range
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            0,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_not_limited_initial_sync(self) -> None:
+        """
+        Test that we mark `rooms` as `limited=False` when there are no more events to
+        paginate to.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity1", tok=user2_tok)
+        self.helper.send(room_id1, "activity2", tok=user2_tok)
+        self.helper.send(room_id1, "activity3", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        timeline_limit = 100
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": timeline_limit,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # The timeline should be `limited=False` because we have all of the events (no
+        # more to paginate to)
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            False,
+            channel.json_body["rooms"][room_id1],
+        )
+        expected_number_of_events = 9
+        # We're just looking to make sure we got all of the events before hitting the `timeline_limit`
+        self.assertEqual(
+            len(channel.json_body["rooms"][room_id1]["timeline"]),
+            expected_number_of_events,
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+        self.assertLessEqual(expected_number_of_events, timeline_limit)
+
+        # With no `from_token` (initial sync), it's all historical since there is no
+        # "live" token range.
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            0,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_incremental_sync(self) -> None:
+        """
+        Test `rooms` data during an incremental sync after an initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
+
+        # Make an initial Sliding Sync request to grab a token. This is also a sanity
+        # check that we can go from initial to incremental sync.
+        sync_params = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            sync_params,
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        next_pos = channel.json_body["pos"]
+
+        # Send some events but don't send enough to saturate the `timeline_limit`.
+        # We want to later test that we only get the new events since the `next_pos`
+        event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+        # Make an incremental Sliding Sync request (what we're trying to test)
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?pos={next_pos}",
+            sync_params,
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We only expect to see the new events since the last sync which isn't enough to
+        # fill up the `timeline_limit`.
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            False,
+            f'Our `timeline_limit` was {sync_params["lists"]["foo-list"]["timeline_limit"]} '
+            + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+            + str(channel.json_body["rooms"][room_id1]),
+        )
+        # Check to make sure the latest events are returned
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                event_response2["event_id"],
+                event_response3["event_id"],
+            ],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+
+        # All events are "live"
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            2,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_newly_joined_incremental_sync(self) -> None:
+        """
+        Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
+        able to see some historical events before the `from_token`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before token1", tok=user2_tok)
+        event_response2 = self.helper.send(
+            room_id1, "activity before token2", tok=user2_tok
+        )
+
+        from_token = self.event_sources.get_current_token()
+
+        # Join the room after the `from_token` which will make us consider this room as
+        # `newly_joined`.
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Send some events but don't send enough to saturate the `timeline_limit`.
+        # We want to later test that we only get the new events since the `next_pos`
+        event_response3 = self.helper.send(
+            room_id1, "activity after token3", tok=user2_tok
+        )
+        event_response4 = self.helper.send(
+            room_id1, "activity after token4", tok=user2_tok
+        )
+
+        # The `timeline_limit` is set to 4 so we can at least see one historical event
+        # before the `from_token`. We should see historical events because this is a
+        # `newly_joined` room.
+        timeline_limit = 4
+        # Make an incremental Sliding Sync request (what we're trying to test)
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": timeline_limit,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see the new events and the rest should be filled with historical
+        # events which will make us `limited=True` since there are more to paginate to.
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            True,
+            f"Our `timeline_limit` was {timeline_limit} "
+            + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+            + str(channel.json_body["rooms"][room_id1]),
+        )
+        # Check to make sure that the "live" and historical events are returned
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                event_response2["event_id"],
+                user1_join_response["event_id"],
+                event_response3["event_id"],
+                event_response4["event_id"],
+            ],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+
+        # Only events after the `from_token` are "live" (join, event3, event4)
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            3,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_invite_shared_history_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        initial sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but we also shouldn't see any timeline events because the history visiblity is
+        `shared` and we haven't joined the room yet.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Ensure we're testing with a room with `shared` history visibility which means
+        # history visible until you actually join the room.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.SHARED,
+        )
+
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("timeline"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("num_live"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("limited"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("prev_batch"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("required_state"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            channel.json_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            channel.json_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_shared_history_incremental_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        incremental sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but we also shouldn't see any timeline events because the history visiblity is
+        `shared` and we haven't joined the room yet.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Ensure we're testing with a room with `shared` history visibility which means
+        # history visible until you actually join the room.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.SHARED,
+        )
+
+        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+        from_token = self.event_sources.get_current_token()
+
+        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("timeline"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("num_live"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("limited"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("prev_batch"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("required_state"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            channel.json_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            channel.json_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        initial sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but depending on the semantics we decide, we could potentially see some
+        historical events before/after the `from_token` because the history is
+        `world_readable`. Same situation for events after the `from_token` if the
+        history visibility was set to `invited`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "preset": "public_chat",
+                "initial_state": [
+                    {
+                        "content": {
+                            "history_visibility": HistoryVisibility.WORLD_READABLE
+                        },
+                        "state_key": "",
+                        "type": EventTypes.RoomHistoryVisibility,
+                    }
+                ],
+            },
+        )
+        # Ensure we're testing with a room with `world_readable` history visibility
+        # which means events are visible to anyone even without membership.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.WORLD_READABLE,
+        )
+
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        # Large enough to see the latest events and before the invite
+                        "timeline_limit": 4,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("timeline"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("num_live"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("limited"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("prev_batch"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("required_state"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            channel.json_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            channel.json_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        incremental sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but depending on the semantics we decide, we could potentially see some
+        historical events before/after the `from_token` because the history is
+        `world_readable`. Same situation for events after the `from_token` if the
+        history visibility was set to `invited`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "preset": "public_chat",
+                "initial_state": [
+                    {
+                        "content": {
+                            "history_visibility": HistoryVisibility.WORLD_READABLE
+                        },
+                        "state_key": "",
+                        "type": EventTypes.RoomHistoryVisibility,
+                    }
+                ],
+            },
+        )
+        # Ensure we're testing with a room with `world_readable` history visibility
+        # which means events are visible to anyone even without membership.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.WORLD_READABLE,
+        )
+
+        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+        from_token = self.event_sources.get_current_token()
+
+        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        # Large enough to see the latest events and before the invite
+                        "timeline_limit": 4,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("timeline"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("num_live"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("limited"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("prev_batch"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("required_state"),
+            channel.json_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            channel.json_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            channel.json_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_ban_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are banned from in an intial sync only allows us to see
+        timeline events up to the ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+        user1_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+
+        self.helper.send(room_id1, "activity after5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see events before the ban but not after
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                event_response3["event_id"],
+                event_response4["event_id"],
+                user1_ban_response["event_id"],
+            ],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+        # No "live" events in an initial sync (no `from_token` to define the "live"
+        # range)
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            0,
+            channel.json_body["rooms"][room_id1],
+        )
+        # There are more events to paginate to
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            True,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_ban_incremental_sync1(self) -> None:
+        """
+        Test that `rooms` we are banned from during the next incremental sync only
+        allows us to see timeline events up to the ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        from_token = self.event_sources.get_current_token()
+
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+        # The ban is within the token range (between the `from_token` and the sliding
+        # sync request)
+        user1_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+
+        self.helper.send(room_id1, "activity after5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 4,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see events before the ban but not after
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                event_response3["event_id"],
+                event_response4["event_id"],
+                user1_ban_response["event_id"],
+            ],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+        # All live events in the incremental sync
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            3,
+            channel.json_body["rooms"][room_id1],
+        )
+        # There aren't anymore events to paginate to in this range
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            False,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_ban_incremental_sync2(self) -> None:
+        """
+        Test that `rooms` we are banned from before the incremental sync don't return
+        any events in the timeline.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "activity after2", tok=user2_tok)
+        # The ban is before we get our `from_token`
+        self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+        from_token = self.event_sources.get_current_token()
+
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [],
+                        "timeline_limit": 4,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Nothing to see for this banned user in the room in the token range
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["timeline"],
+            [],
+            channel.json_body["rooms"][room_id1]["timeline"],
+        )
+        # No events returned in the timeline so nothing is "live"
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["num_live"],
+            0,
+            channel.json_body["rooms"][room_id1],
+        )
+        # There aren't anymore events to paginate to in this range
+        self.assertEqual(
+            channel.json_body["rooms"][room_id1]["limited"],
+            False,
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_no_required_state(self) -> None:
+        """
+        Empty `rooms.required_state` should not return any state events in the room
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        # Empty `required_state`
+                        "required_state": [],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # No `required_state` in response
+        self.assertIsNone(
+            channel.json_body["rooms"][room_id1].get("required_state"),
+            channel.json_body["rooms"][room_id1],
+        )
+
+    def test_rooms_required_state_initial_sync(self) -> None:
+        """
+        Test `rooms.required_state` returns requested state events in the room during an
+        initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            [EventTypes.RoomHistoryVisibility, ""],
+                            # This one doesn't exist in the room
+                            [EventTypes.Tombstone, ""],
+                        ],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.RoomHistoryVisibility, "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_incremental_sync(self) -> None:
+        """
+        Test `rooms.required_state` returns requested state events in the room during an
+        incremental sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        after_room_token = self.event_sources.get_current_token()
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(after_room_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            [EventTypes.RoomHistoryVisibility, ""],
+                            # This one doesn't exist in the room
+                            [EventTypes.Tombstone, ""],
+                        ],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # The returned state doesn't change from initial to incremental sync. In the
+        # future, we will only return updates but only if we've sent the room down the
+        # connection before.
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.RoomHistoryVisibility, "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_wildcard(self) -> None:
+        """
+        Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="namespaced",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [StateValues.WILDCARD, StateValues.WILDCARD],
+                        ],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            # We should see all the state events in the room
+            state_map.values(),
+            exact=True,
+        )
+
+    def test_rooms_required_state_wildcard_event_type(self) -> None:
+        """
+        Test `rooms.required_state` returns relevant state events when using wildcard in
+        the event_type `["*", "foobarbaz"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key=user2_id,
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `event_type`
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [StateValues.WILDCARD, user2_id],
+                        ],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # We expect at-least any state event with the `user2_id` as the `state_key`
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[("org.matrix.foo_state", user2_id)],
+            },
+            # Ideally, this would be exact but we're currently returning all state
+            # events when the `event_type` is a wildcard.
+            exact=False,
+        )
+
+    def test_rooms_required_state_wildcard_state_key(self) -> None:
+        """
+        Test `rooms.required_state` returns relevant state events when using wildcard in
+        the state_key `["foobarbaz","*"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request with wildcards for the `state_key`
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Member, StateValues.WILDCARD],
+                        ],
+                        "timeline_limit": 0,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_lazy_loading_room_members(self) -> None:
+        """
+        Test `rooms.required_state` returns people relevant to the timeline when
+        lazy-loading room members, `["m.room.member","$LAZY"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+
+        self.helper.send(room_id1, "1", tok=user2_tok)
+        self.helper.send(room_id1, "2", tok=user3_tok)
+        self.helper.send(room_id1, "3", tok=user2_tok)
+
+        # Make the Sliding Sync request with lazy loading for the room members
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            [EventTypes.Member, StateValues.LAZY],
+                        ],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[(EventTypes.Member, user3_id)],
+            },
+            exact=True,
+        )
+
+    @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
+    def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
+        """
+        Test `rooms.required_state` should not return state past a leave/ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        from_token = self.event_sources.get_current_token()
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        if stop_membership == Membership.LEAVE:
+            # User 1 leaves
+            self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        elif stop_membership == Membership.BAN:
+            # User 1 is banned
+            self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Change the state after user 1 leaves
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "qux"},
+            tok=user2_tok,
+        )
+        self.helper.leave(room_id1, user3_id, tok=user3_tok)
+
+        # Make the Sliding Sync request with lazy loading for the room members
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint
+            + f"?pos={self.get_success(from_token.to_string(self.store))}",
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            [EventTypes.Member, "*"],
+                            ["org.matrix.foo_state", ""],
+                        ],
+                        "timeline_limit": 3,
+                    }
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[(EventTypes.Member, user3_id)],
+                state_map[("org.matrix.foo_state", "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_combine_superset(self) -> None:
+        """
+        Test `rooms.required_state` is combined across lists and room subscriptions.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `state_key`
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            [EventTypes.Member, user1_id],
+                        ],
+                        "timeline_limit": 0,
+                    },
+                    "bar-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Member, StateValues.WILDCARD],
+                            ["org.matrix.foo_state", ""],
+                        ],
+                        "timeline_limit": 0,
+                    },
+                }
+                # TODO: Room subscription should also combine with the `required_state`
+                # "room_subscriptions": {
+                #     room_id1: {
+                #         "required_state": [
+                #             ["org.matrix.bar_state", ""]
+                #         ],
+                #         "timeline_limit": 0,
+                #     }
+                # }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            channel.json_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[("org.matrix.foo_state", "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_partial_state(self) -> None:
+        """
+        Test partially-stated room are excluded unless `rooms.required_state` is
+        lazy-loading room members.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        _join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        # Mark room2 as partial state
+        self.get_success(
+            mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2)
+        )
+
+        # Make the Sliding Sync request (NOT lazy-loading room members)
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                        ],
+                        "timeline_limit": 0,
+                    },
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make sure the list includes room1 but room2 is excluded because it's still
+        # partially-stated
+        self.assertListEqual(
+            list(channel.json_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 1],
+                    "room_ids": [room_id1],
+                }
+            ],
+            channel.json_body["lists"]["foo-list"],
+        )
+
+        # Make the Sliding Sync request (with lazy-loading room members)
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint,
+            {
+                "lists": {
+                    "foo-list": {
+                        "ranges": [[0, 1]],
+                        "required_state": [
+                            [EventTypes.Create, ""],
+                            # Lazy-load room members
+                            [EventTypes.Member, StateValues.LAZY],
+                        ],
+                        "timeline_limit": 0,
+                    },
+                }
+            },
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # The list should include both rooms now because we're lazy-loading room members
+        self.assertListEqual(
+            list(channel.json_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 1],
+                    "room_ids": [room_id2, room_id1],
+                }
+            ],
+            channel.json_body["lists"]["foo-list"],
+        )
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index f0ba40a1f1..e43140720d 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -261,9 +261,9 @@ class RestHelper:
         targ: str,
         expect_code: int = HTTPStatus.OK,
         tok: Optional[str] = None,
-    ) -> None:
+    ) -> JsonDict:
         """A convenience helper: `change_membership` with `membership` preset to "ban"."""
-        self.change_membership(
+        return self.change_membership(
             room=room,
             src=src,
             targ=targ,
diff --git a/tests/server.py b/tests/server.py
index f3a917f835..f1cd0f76be 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -946,7 +946,7 @@ def connect_client(
 
 
 class TestHomeServer(HomeServer):
-    DATASTORE_CLASS = DataStore  # type: ignore[assignment]
+    DATASTORE_CLASS = DataStore
 
 
 def setup_test_homeserver(
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index fe1e873e15..aad46b1b44 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -21,20 +21,32 @@
 
 import logging
 from typing import List, Tuple
+from unittest.mock import AsyncMock, patch
 
 from immutabledict import immutabledict
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.api.constants import Direction, EventTypes, RelationTypes
+from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes
 from synapse.api.filtering import Filter
+from synapse.crypto.event_signing import add_hashes_and_signatures
+from synapse.events import FrozenEventV3
+from synapse.federation.federation_client import SendJoinResult
 from synapse.rest import admin
 from synapse.rest.client import login, room
 from synapse.server import HomeServer
-from synapse.types import JsonDict, PersistedEventPosition, RoomStreamToken
+from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
+from synapse.types import (
+    JsonDict,
+    PersistedEventPosition,
+    RoomStreamToken,
+    UserID,
+    create_requester,
+)
 from synapse.util import Clock
 
-from tests.unittest import HomeserverTestCase
+from tests.test_utils.event_injection import create_event
+from tests.unittest import FederatingHomeserverTestCase, HomeserverTestCase
 
 logger = logging.getLogger(__name__)
 
@@ -543,3 +555,859 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
                 }
             ),
         )
+
+
+class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
+    """
+    Test `get_current_state_delta_membership_changes_for_user(...)`
+    """
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.event_sources = hs.get_event_sources()
+        self.state_handler = self.hs.get_state_handler()
+        persistence = hs.get_storage_controllers().persistence
+        assert persistence is not None
+        self.persistence = persistence
+
+    def test_returns_membership_events(self) -> None:
+        """
+        A basic test that a membership event in the token range is returned for the user.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_pos = self.get_success(
+            self.store.get_position_for_event(join_response["event_id"])
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_response["event_id"],
+                    event_pos=join_pos,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                )
+            ],
+        )
+
+    def test_server_left_room_after_us(self) -> None:
+        """
+        Test that when probing over part of the DAG where the server left the room *after
+        us*, we still see the join and leave changes.
+
+        This is to make sure we play nicely with this behavior: When the server leaves a
+        room, it will insert new rows with `event_id = null` into the
+        `current_state_delta_stream` table for all current state.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "power_level_content_override": {
+                    "users": {
+                        user2_id: 100,
+                        # Allow user1 to send state in the room
+                        user1_id: 100,
+                    }
+                }
+            },
+        )
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_pos1 = self.get_success(
+            self.store.get_position_for_event(join_response1["event_id"])
+        )
+        # Make sure that random other non-member state that happens to have a `state_key`
+        # matching the user ID doesn't mess with things.
+        self.helper.send_state(
+            room_id1,
+            event_type="foobarbazdummy",
+            state_key=user1_id,
+            body={"foo": "bar"},
+            tok=user1_tok,
+        )
+        # User1 should leave the room first
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_pos1 = self.get_success(
+            self.store.get_position_for_event(leave_response1["event_id"])
+        )
+
+        # User2 should also leave the room (everyone has left the room which means the
+        # server is no longer in the room).
+        self.helper.leave(room_id1, user2_id, tok=user2_tok)
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Get the membership changes for the user.
+        #
+        # At this point, the `current_state_delta_stream` table should look like the
+        # following. When the server leaves a room, it will insert new rows with
+        # `event_id = null` for all current state.
+        #
+        # | stream_id | room_id  | type                        | state_key      | event_id | prev_event_id |
+        # |-----------|----------|-----------------------------|----------------|----------|---------------|
+        # | 2         | !x:test  | 'm.room.create'             | ''             | $xxx     | None          |
+        # | 3         | !x:test  | 'm.room.member'             | '@user2:test'  | $aaa     | None          |
+        # | 4         | !x:test  | 'm.room.history_visibility' | ''             | $xxx     | None          |
+        # | 4         | !x:test  | 'm.room.join_rules'         | ''             | $xxx     | None          |
+        # | 4         | !x:test  | 'm.room.power_levels'       | ''             | $xxx     | None          |
+        # | 7         | !x:test  | 'm.room.member'             | '@user1:test'  | $ooo     | None          |
+        # | 8         | !x:test  | 'foobarbazdummy'            | '@user1:test'  | $xxx     | None          |
+        # | 9         | !x:test  | 'm.room.member'             | '@user1:test'  | $ppp     | $ooo          |
+        # | 10        | !x:test  | 'foobarbazdummy'            | '@user1:test'  | None     | $xxx          |
+        # | 10        | !x:test  | 'm.room.create'             | ''             | None     | $xxx          |
+        # | 10        | !x:test  | 'm.room.history_visibility' | ''             | None     | $xxx          |
+        # | 10        | !x:test  | 'm.room.join_rules'         | ''             | None     | $xxx          |
+        # | 10        | !x:test  | 'm.room.member'             | '@user1:test'  | None     | $ppp          |
+        # | 10        | !x:test  | 'm.room.member'             | '@user2:test'  | None     | $aaa          |
+        # | 10        | !x:test  | 'm.room.power_levels'       |                | None     | $xxx          |
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_response1["event_id"],
+                    event_pos=join_pos1,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=leave_response1["event_id"],
+                    event_pos=leave_pos1,
+                    membership="leave",
+                    sender=user1_id,
+                    prev_event_id=join_response1["event_id"],
+                    prev_event_pos=join_pos1,
+                    prev_membership="join",
+                    prev_sender=user1_id,
+                ),
+            ],
+        )
+
+    def test_server_left_room_after_us_later(self) -> None:
+        """
+        Test when the user leaves the room, then sometime later, everyone else leaves
+        the room, causing the server to leave the room, we shouldn't see any membership
+        changes.
+
+        This is to make sure we play nicely with this behavior: When the server leaves a
+        room, it will insert new rows with `event_id = null` into the
+        `current_state_delta_stream` table for all current state.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # User1 should leave the room first
+        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+        after_user1_leave_token = self.event_sources.get_current_token()
+
+        # User2 should also leave the room (everyone has left the room which means the
+        # server is no longer in the room).
+        self.helper.leave(room_id1, user2_id, tok=user2_tok)
+
+        after_server_leave_token = self.event_sources.get_current_token()
+
+        # Join another room as user1 just to advance the stream_ordering and bust
+        # `_membership_stream_cache`
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        # Get the membership changes for the user.
+        #
+        # At this point, the `current_state_delta_stream` table should look like the
+        # following. When the server leaves a room, it will insert new rows with
+        # `event_id = null` for all current state.
+        #
+        # TODO: Add DB rows to better see what's going on.
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=after_user1_leave_token.room_key,
+                to_key=after_server_leave_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [],
+        )
+
+    def test_we_cause_server_left_room(self) -> None:
+        """
+        Test that when probing over part of the DAG where the user leaves the room
+        causing the server to leave the room (because we were the last local user in the
+        room), we still see the join and leave changes.
+
+        This is to make sure we play nicely with this behavior: When the server leaves a
+        room, it will insert new rows with `event_id = null` into the
+        `current_state_delta_stream` table for all current state.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "power_level_content_override": {
+                    "users": {
+                        user2_id: 100,
+                        # Allow user1 to send state in the room
+                        user1_id: 100,
+                    }
+                }
+            },
+        )
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_pos1 = self.get_success(
+            self.store.get_position_for_event(join_response1["event_id"])
+        )
+        # Make sure that random other non-member state that happens to have a `state_key`
+        # matching the user ID doesn't mess with things.
+        self.helper.send_state(
+            room_id1,
+            event_type="foobarbazdummy",
+            state_key=user1_id,
+            body={"foo": "bar"},
+            tok=user1_tok,
+        )
+
+        # User2 should leave the room first.
+        self.helper.leave(room_id1, user2_id, tok=user2_tok)
+
+        # User1 (the person we're testing with) should also leave the room (everyone has
+        # left the room which means the server is no longer in the room).
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_pos1 = self.get_success(
+            self.store.get_position_for_event(leave_response1["event_id"])
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Get the membership changes for the user.
+        #
+        # At this point, the `current_state_delta_stream` table should look like the
+        # following. When the server leaves a room, it will insert new rows with
+        # `event_id = null` for all current state.
+        #
+        # | stream_id | room_id   | type                        | state_key     | event_id | prev_event_id |
+        # |-----------|-----------|-----------------------------|---------------|----------|---------------|
+        # | 2         | '!x:test' | 'm.room.create'             | ''            | '$xxx'   | None          |
+        # | 3         | '!x:test' | 'm.room.member'             | '@user2:test' | '$aaa'   | None          |
+        # | 4         | '!x:test' | 'm.room.history_visibility' | ''            | '$xxx'   | None          |
+        # | 4         | '!x:test' | 'm.room.join_rules'         | ''            | '$xxx'   | None          |
+        # | 4         | '!x:test' | 'm.room.power_levels'       | ''            | '$xxx'   | None          |
+        # | 7         | '!x:test' | 'm.room.member'             | '@user1:test' | '$ooo'   | None          |
+        # | 8         | '!x:test' | 'foobarbazdummy'            | '@user1:test' | '$xxx'   | None          |
+        # | 9         | '!x:test' | 'm.room.member'             | '@user2:test' | '$bbb'   | '$aaa'        |
+        # | 10        | '!x:test' | 'foobarbazdummy'            | '@user1:test' | None     | '$xxx'        |
+        # | 10        | '!x:test' | 'm.room.create'             | ''            | None     | '$xxx'        |
+        # | 10        | '!x:test' | 'm.room.history_visibility' | ''            | None     | '$xxx'        |
+        # | 10        | '!x:test' | 'm.room.join_rules'         | ''            | None     | '$xxx'        |
+        # | 10        | '!x:test' | 'm.room.member'             | '@user1:test' | None     | '$ooo'        |
+        # | 10        | '!x:test' | 'm.room.member'             | '@user2:test' | None     | '$bbb'        |
+        # | 10        | '!x:test' | 'm.room.power_levels'       | ''            | None     | '$xxx'        |
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_response1["event_id"],
+                    event_pos=join_pos1,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=None,  # leave_response1["event_id"],
+                    event_pos=leave_pos1,
+                    membership="leave",
+                    sender=None,  # user1_id,
+                    prev_event_id=join_response1["event_id"],
+                    prev_event_pos=join_pos1,
+                    prev_membership="join",
+                    prev_sender=user1_id,
+                ),
+            ],
+        )
+
+    def test_different_user_membership_persisted_in_same_batch(self) -> None:
+        """
+        Test batch of membership events from different users being processed at once.
+        This will result in all of the memberships being stored in the
+        `current_state_delta_stream` table with the same `stream_ordering` even though
+        the individual events have different `stream_ordering`s.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        _user4_tok = self.login(user4_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        # User2 is just the designated person to create the room (we do this across the
+        # tests to be consistent)
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # Persist the user1, user3, and user4 join events in the same batch so they all
+        # end up in the `current_state_delta_stream` table with the same
+        # stream_ordering.
+        join_event3, join_event_context3 = self.get_success(
+            create_event(
+                self.hs,
+                sender=user3_id,
+                type=EventTypes.Member,
+                state_key=user3_id,
+                content={"membership": "join"},
+                room_id=room_id1,
+            )
+        )
+        # We want to put user1 in the middle of the batch. This way, regardless of the
+        # implementation that inserts rows into current_state_delta_stream` (whether it
+        # be minimum/maximum of stream position of the batch), we will still catch bugs.
+        join_event1, join_event_context1 = self.get_success(
+            create_event(
+                self.hs,
+                sender=user1_id,
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": "join"},
+                room_id=room_id1,
+            )
+        )
+        join_event4, join_event_context4 = self.get_success(
+            create_event(
+                self.hs,
+                sender=user4_id,
+                type=EventTypes.Member,
+                state_key=user4_id,
+                content={"membership": "join"},
+                room_id=room_id1,
+            )
+        )
+        self.get_success(
+            self.persistence.persist_events(
+                [
+                    (join_event3, join_event_context3),
+                    (join_event1, join_event_context1),
+                    (join_event4, join_event_context4),
+                ]
+            )
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Get the membership changes for the user.
+        #
+        # At this point, the `current_state_delta_stream` table should look like (notice
+        # those three memberships at the end with `stream_id=7` because we persisted
+        # them in the same batch):
+        #
+        # | stream_id | room_id   | type                       | state_key        | event_id | prev_event_id |
+        # |-----------|-----------|----------------------------|------------------|----------|---------------|
+        # | 2         | '!x:test' | 'm.room.create'            | ''               | '$xxx'   | None          |
+        # | 3         | '!x:test' | 'm.room.member'            | '@user2:test'    | '$xxx'   | None          |
+        # | 4         | '!x:test' | 'm.room.history_visibility'| ''               | '$xxx'   | None          |
+        # | 4         | '!x:test' | 'm.room.join_rules'        | ''               | '$xxx'   | None          |
+        # | 4         | '!x:test' | 'm.room.power_levels'      | ''               | '$xxx'   | None          |
+        # | 7         | '!x:test' | 'm.room.member'            | '@user3:test'    | '$xxx'   | None          |
+        # | 7         | '!x:test' | 'm.room.member'            | '@user1:test'    | '$xxx'   | None          |
+        # | 7         | '!x:test' | 'm.room.member'            | '@user4:test'    | '$xxx'   | None          |
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+            )
+        )
+
+        join_pos3 = self.get_success(
+            self.store.get_position_for_event(join_event3.event_id)
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_event1.event_id,
+                    # Ideally, this would be `join_pos1` (to match the `event_id`) but
+                    # when events are persisted in a batch, they are all stored in the
+                    # `current_state_delta_stream` table with the minimum
+                    # `stream_ordering` from the batch.
+                    event_pos=join_pos3,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+            ],
+        )
+
+    def test_state_reset(self) -> None:
+        """
+        Test a state reset scenario where the user gets removed from the room (when
+        there is no corresponding leave event)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_pos1 = self.get_success(
+            self.store.get_position_for_event(join_response1["event_id"])
+        )
+
+        before_reset_token = self.event_sources.get_current_token()
+
+        # Send another state event to make a position for the state reset to happen at
+        dummy_state_response = self.helper.send_state(
+            room_id1,
+            event_type="foobarbaz",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        dummy_state_pos = self.get_success(
+            self.store.get_position_for_event(dummy_state_response["event_id"])
+        )
+
+        # Mock a state reset removing the membership for user1 in the current state
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="current_state_events",
+                keyvalues={
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                },
+                desc="state reset user in current_state_delta_stream",
+            )
+        )
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                table="current_state_delta_stream",
+                values={
+                    "stream_id": dummy_state_pos.stream,
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                    "event_id": None,
+                    "prev_event_id": join_response1["event_id"],
+                    "instance_name": dummy_state_pos.instance_name,
+                },
+                desc="state reset user in current_state_delta_stream",
+            )
+        )
+
+        # Manually bust the cache since we we're just manually messing with the database
+        # and not causing an actual state reset.
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, dummy_state_pos.stream
+        )
+
+        after_reset_token = self.event_sources.get_current_token()
+
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_reset_token.room_key,
+                to_key=after_reset_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=None,
+                    event_pos=dummy_state_pos,
+                    membership="leave",
+                    sender=None,  # user1_id,
+                    prev_event_id=join_response1["event_id"],
+                    prev_event_pos=join_pos1,
+                    prev_membership="join",
+                    prev_sender=user1_id,
+                ),
+            ],
+        )
+
+    def test_excluded_room_ids(self) -> None:
+        """
+        Test that the `excluded_room_ids` option excludes changes from the specified rooms.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        before_room1_token = self.event_sources.get_current_token()
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_pos1 = self.get_success(
+            self.store.get_position_for_event(join_response1["event_id"])
+        )
+
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+        join_pos2 = self.get_success(
+            self.store.get_position_for_event(join_response2["event_id"])
+        )
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # First test the the room is returned without the `excluded_room_ids` option
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_response1["event_id"],
+                    event_pos=join_pos1,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+                CurrentStateDeltaMembership(
+                    room_id=room_id2,
+                    event_id=join_response2["event_id"],
+                    event_pos=join_pos2,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+            ],
+        )
+
+        # The test that `excluded_room_ids` excludes room2 as expected
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_room1_token.room_key,
+                to_key=after_room1_token.room_key,
+                excluded_room_ids=[room_id2],
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=join_response1["event_id"],
+                    event_pos=join_pos1,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                )
+            ],
+        )
+
+
+class GetCurrentStateDeltaMembershipChangesForUserFederationTestCase(
+    FederatingHomeserverTestCase
+):
+    """
+    Test `get_current_state_delta_membership_changes_for_user(...)` when joining remote federated rooms.
+    """
+
+    servlets = [
+        admin.register_servlets_for_client_rest_resource,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+        self.store = self.hs.get_datastores().main
+        self.event_sources = hs.get_event_sources()
+        self.room_member_handler = hs.get_room_member_handler()
+
+    def test_remote_join(self) -> None:
+        """
+        Test remote join where the first rows in `current_state_delta_stream` will just
+        be the state when you joined the remote room.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        before_join_token = self.event_sources.get_current_token()
+
+        intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}"
+
+        # Remotely join a room on another homeserver.
+        #
+        # To do this we have to mock the responses from the remote homeserver. We also
+        # patch out a bunch of event checks on our end.
+        create_event_source = {
+            "auth_events": [],
+            "content": {
+                "creator": f"@creator:{self.OTHER_SERVER_NAME}",
+                "room_version": self.hs.config.server.default_room_version.identifier,
+            },
+            "depth": 0,
+            "origin_server_ts": 0,
+            "prev_events": [],
+            "room_id": intially_unjoined_room_id,
+            "sender": f"@creator:{self.OTHER_SERVER_NAME}",
+            "state_key": "",
+            "type": EventTypes.Create,
+        }
+        self.add_hashes_and_signatures_from_other_server(
+            create_event_source,
+            self.hs.config.server.default_room_version,
+        )
+        create_event = FrozenEventV3(
+            create_event_source,
+            self.hs.config.server.default_room_version,
+            {},
+            None,
+        )
+        creator_join_event_source = {
+            "auth_events": [create_event.event_id],
+            "content": {
+                "membership": "join",
+            },
+            "depth": 1,
+            "origin_server_ts": 1,
+            "prev_events": [],
+            "room_id": intially_unjoined_room_id,
+            "sender": f"@creator:{self.OTHER_SERVER_NAME}",
+            "state_key": f"@creator:{self.OTHER_SERVER_NAME}",
+            "type": EventTypes.Member,
+        }
+        self.add_hashes_and_signatures_from_other_server(
+            creator_join_event_source,
+            self.hs.config.server.default_room_version,
+        )
+        creator_join_event = FrozenEventV3(
+            creator_join_event_source,
+            self.hs.config.server.default_room_version,
+            {},
+            None,
+        )
+
+        # Our local user is going to remote join the room
+        join_event_source = {
+            "auth_events": [create_event.event_id],
+            "content": {"membership": "join"},
+            "depth": 1,
+            "origin_server_ts": 100,
+            "prev_events": [creator_join_event.event_id],
+            "sender": user1_id,
+            "state_key": user1_id,
+            "room_id": intially_unjoined_room_id,
+            "type": EventTypes.Member,
+        }
+        add_hashes_and_signatures(
+            self.hs.config.server.default_room_version,
+            join_event_source,
+            self.hs.hostname,
+            self.hs.signing_key,
+        )
+        join_event = FrozenEventV3(
+            join_event_source,
+            self.hs.config.server.default_room_version,
+            {},
+            None,
+        )
+
+        mock_make_membership_event = AsyncMock(
+            return_value=(
+                self.OTHER_SERVER_NAME,
+                join_event,
+                self.hs.config.server.default_room_version,
+            )
+        )
+        mock_send_join = AsyncMock(
+            return_value=SendJoinResult(
+                join_event,
+                self.OTHER_SERVER_NAME,
+                state=[create_event, creator_join_event],
+                auth_chain=[create_event, creator_join_event],
+                partial_state=False,
+                servers_in_room=frozenset(),
+            )
+        )
+
+        with patch.object(
+            self.room_member_handler.federation_handler.federation_client,
+            "make_membership_event",
+            mock_make_membership_event,
+        ), patch.object(
+            self.room_member_handler.federation_handler.federation_client,
+            "send_join",
+            mock_send_join,
+        ), patch(
+            "synapse.event_auth._is_membership_change_allowed",
+            return_value=None,
+        ), patch(
+            "synapse.handlers.federation_event.check_state_dependent_auth_rules",
+            return_value=None,
+        ):
+            self.get_success(
+                self.room_member_handler.update_membership(
+                    requester=create_requester(user1_id),
+                    target=UserID.from_string(user1_id),
+                    room_id=intially_unjoined_room_id,
+                    action=Membership.JOIN,
+                    remote_room_hosts=[self.OTHER_SERVER_NAME],
+                )
+            )
+
+        after_join_token = self.event_sources.get_current_token()
+
+        # Get the membership changes for the user.
+        #
+        # At this point, the `current_state_delta_stream` table should look like the
+        # following. Notice that all of the events are at the same `stream_id` because
+        # the current state starts out where we remotely joined:
+        #
+        # | stream_id | room_id                      | type            | state_key                    | event_id | prev_event_id |
+        # |-----------|------------------------------|-----------------|------------------------------|----------|---------------|
+        # | 2         | '!example:other.example.com' | 'm.room.member' | '@user1:test'                | '$xxx'   | None          |
+        # | 2         | '!example:other.example.com' | 'm.room.create' | ''                           | '$xxx'   | None          |
+        # | 2         | '!example:other.example.com' | 'm.room.member' | '@creator:other.example.com' | '$xxx'   | None          |
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_join_token.room_key,
+                to_key=after_join_token.room_key,
+            )
+        )
+
+        join_pos = self.get_success(
+            self.store.get_position_for_event(join_event.event_id)
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=intially_unjoined_room_id,
+                    event_id=join_event.event_id,
+                    event_pos=join_pos,
+                    membership="join",
+                    sender=user1_id,
+                    prev_event_id=None,
+                    prev_event_pos=None,
+                    prev_membership=None,
+                    prev_sender=None,
+                ),
+            ],
+        )
diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py
index fd03c23b89..35b3245708 100644
--- a/tests/test_utils/event_injection.py
+++ b/tests/test_utils/event_injection.py
@@ -125,13 +125,15 @@ async def mark_event_as_partial_state(
     in this table).
     """
     store = hs.get_datastores().main
-    await store.db_pool.simple_upsert(
-        table="partial_state_rooms",
-        keyvalues={"room_id": room_id},
-        values={},
-        insertion_values={"room_id": room_id},
+    # Use the store helper to insert into the database so the caches are busted
+    await store.store_partial_state_room(
+        room_id=room_id,
+        servers={hs.hostname},
+        device_lists_stream_id=0,
+        joined_via=hs.hostname,
     )
 
+    # FIXME: Bust the cache
     await store.db_pool.simple_insert(
         table="partial_state_events",
         values={