summary refs log tree commit diff
diff options
context:
space:
mode:
authorSean Quah <seanq@matrix.org>2022-10-04 11:26:47 +0100
committerSean Quah <seanq@matrix.org>2022-10-04 11:26:47 +0100
commitdedd4cd061d5db79ba0de25b1cfe519712c8095f (patch)
tree23d7d63ca6e24ebd4b89099fd7b4c322688864f2
parentMerge remote-tracking branch 'origin/develop' into matrix-org-hotfixes (diff)
parent1.69.0rc1 (diff)
downloadsynapse-dedd4cd061d5db79ba0de25b1cfe519712c8095f.tar.xz
Merge remote-tracking branch 'origin/release-v1.69' into matrix-org-hotfixes
-rw-r--r--.github/dependabot.yml17
-rw-r--r--.github/workflows/dependabot_changelog.yml46
-rw-r--r--.github/workflows/docker.yml8
-rw-r--r--.github/workflows/docs.yaml2
-rw-r--r--.github/workflows/latest_deps.yml14
-rw-r--r--.github/workflows/release-artifacts.yml15
-rw-r--r--.github/workflows/tests.yml39
-rw-r--r--.github/workflows/twisted_trunk.yml14
-rw-r--r--CHANGES.md107
-rw-r--r--build_rust.py3
-rw-r--r--changelog.d/13487.misc1
-rw-r--r--changelog.d/13556.feature1
-rw-r--r--changelog.d/13635.feature1
-rw-r--r--changelog.d/13667.feature1
-rw-r--r--changelog.d/13719.bugfix1
-rw-r--r--changelog.d/13722.feature1
-rw-r--r--changelog.d/13768.misc1
-rw-r--r--changelog.d/13772.doc1
-rw-r--r--changelog.d/13782.feature1
-rw-r--r--changelog.d/13787.misc1
-rw-r--r--changelog.d/13792.misc1
-rw-r--r--changelog.d/13796.misc1
-rw-r--r--changelog.d/13799.feature1
-rw-r--r--changelog.d/13800.misc1
-rw-r--r--changelog.d/13809.misc1
-rw-r--r--changelog.d/13818.doc1
-rw-r--r--changelog.d/13823.misc1
-rw-r--r--changelog.d/13830.bugfix1
-rw-r--r--changelog.d/13831.feature1
-rw-r--r--changelog.d/13832.feature1
-rw-r--r--changelog.d/13836.doc1
-rw-r--r--changelog.d/13838.misc1
-rw-r--r--changelog.d/13839.misc1
-rw-r--r--changelog.d/13840.bugfix1
-rw-r--r--changelog.d/13843.removal1
-rw-r--r--changelog.d/13850.misc1
-rw-r--r--changelog.d/13855.bugfix1
-rw-r--r--changelog.d/13859.misc1
-rw-r--r--changelog.d/13860.feature1
-rw-r--r--changelog.d/13863.bugfix1
-rw-r--r--changelog.d/13867.misc1
-rw-r--r--changelog.d/13868.misc1
-rw-r--r--changelog.d/13870.doc1
-rw-r--r--changelog.d/13872.bugfix1
-rw-r--r--changelog.d/13873.misc1
-rw-r--r--changelog.d/13874.misc1
-rw-r--r--changelog.d/13876.misc1
-rw-r--r--changelog.d/13879.misc1
-rw-r--r--changelog.d/13885.misc1
-rw-r--r--changelog.d/13888.misc1
-rw-r--r--changelog.d/13889.misc1
-rw-r--r--changelog.d/13890.misc1
-rw-r--r--changelog.d/13892.feature1
-rw-r--r--changelog.d/13893.feature1
-rw-r--r--changelog.d/13904.bugfix1
-rw-r--r--changelog.d/13905.misc1
-rw-r--r--changelog.d/13909.bugfix1
-rw-r--r--changelog.d/13911.doc1
-rw-r--r--changelog.d/13913.misc1
-rw-r--r--changelog.d/13914.misc1
-rw-r--r--changelog.d/13915.doc1
-rw-r--r--changelog.d/13920.feature1
-rw-r--r--changelog.d/13922.bugfix1
-rw-r--r--changelog.d/13924.misc1
-rw-r--r--changelog.d/13928.doc1
-rw-r--r--changelog.d/13930.doc1
-rw-r--r--changelog.d/13931.doc1
-rw-r--r--changelog.d/13932.feature1
-rw-r--r--changelog.d/13934.misc1
-rw-r--r--changelog.d/13936.feature1
-rw-r--r--changelog.d/13937.feature1
-rw-r--r--changelog.d/13939.feature1
-rw-r--r--changelog.d/13947.feature1
-rw-r--r--changelog.d/13952.bugfix1
-rw-r--r--changelog.d/13957.feature1
-rw-r--r--changelog.d/13972.bugfix1
-rw-r--r--changelog.d/13973.misc1
-rw-r--r--changelog.d/13974.doc1
-rw-r--r--debian/changelog5
-rw-r--r--docs/metrics-howto.md11
-rw-r--r--docs/upgrade.md28
-rw-r--r--docs/usage/configuration/config_documentation.md37
-rw-r--r--poetry.lock59
-rw-r--r--pyproject.toml2
-rwxr-xr-xscripts-dev/check_pydantic_models.py5
-rw-r--r--synapse/app/_base.py4
-rw-r--r--synapse/config/metrics.py26
-rw-r--r--synapse/federation/federation_base.py25
-rw-r--r--synapse/federation/federation_client.py50
-rw-r--r--synapse/handlers/cas.py3
-rw-r--r--synapse/handlers/federation_event.py10
-rw-r--r--synapse/handlers/relations.py25
-rw-r--r--synapse/handlers/room_member.py17
-rw-r--r--synapse/handlers/ui_auth/checkers.py3
-rw-r--r--synapse/handlers/user_directory.py36
-rw-r--r--synapse/logging/context.py20
-rw-r--r--synapse/logging/opentracing.py4
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py5
-rw-r--r--synapse/rest/client/relations.py6
-rw-r--r--synapse/rest/client/room.py34
-rw-r--r--synapse/storage/database.py82
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py83
-rw-r--r--synapse/storage/databases/main/event_push_actions.py58
-rw-r--r--synapse/storage/databases/main/events.py2
-rw-r--r--synapse/storage/databases/main/room.py7
-rw-r--r--synapse/storage/databases/main/search.py2
-rw-r--r--synapse/storage/schema/__init__.py1
-rw-r--r--synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres22
-rw-r--r--synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite24
-rw-r--r--synapse/visibility.py4
-rw-r--r--tests/federation/test_federation_client.py75
-rw-r--r--tests/rest/client/test_relations.py13
-rw-r--r--tests/rest/client/test_rooms.py123
-rw-r--r--tests/storage/test_monthly_active_users.py7
-rw-r--r--tests/test_federation.py4
-rw-r--r--tests/utils.py4
116 files changed, 939 insertions, 320 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..9c7db1fc86
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,17 @@
+version: 2
+updates:
+  - # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
+    package-ecosystem: "pip"
+    directory: "/"
+    schedule:
+      interval: "weekly"
+
+  - package-ecosystem: "docker"
+    directory: "/docker"
+    schedule:
+      interval: "weekly"
+
+  - package-ecosystem: "github-actions"
+    directory: "/"
+    schedule:
+      interval: "weekly"
diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml
new file mode 100644
index 0000000000..b6a29a5722
--- /dev/null
+++ b/.github/workflows/dependabot_changelog.yml
@@ -0,0 +1,46 @@
+name: Write changelog for dependabot PR
+on:
+  pull_request:
+    types:
+      - opened
+      - reopened  # For debugging!
+
+permissions:
+  # Needed to be able to push the commit. See 
+  #     https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
+  # for a similar example
+  contents: write
+
+jobs:
+  add-changelog:
+    runs-on: 'ubuntu-latest'
+    if: ${{ github.actor == 'dependabot[bot]' }}
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.event.pull_request.head.ref }}
+      - name: Write, commit and push changelog
+        run: |
+          echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
+          git add changelog.d
+          git config user.email "github-actions[bot]@users.noreply.github.com"
+          git config user.name "GitHub Actions"
+          git commit -m "Changelog"
+          git push
+        shell: bash
+      # The `git push` above does not trigger CI on the dependabot PR.
+      #
+      # By default, workflows can't trigger other workflows when they're just using the
+      # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
+      # recursive workflow loops by accident, because that'll get very expensive very
+      # quickly.) Instead, you have to manually call out to another workflow, or else
+      # make your changes (i.e. the `git push` above) using a personal access token.
+      # See
+      # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
+      #
+      # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
+      # See git commit history for previous attempts. If anyone desperately wants to try
+      # again in the future, make a matrix-bot account and use its access token to git push.
+
+  # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
+  # are sufficiently locked down to dependabot only as above.
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index d20d30c035..b3793e5c1f 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -17,19 +17,19 @@ jobs:
     steps:
       - name: Set up QEMU
         id: qemu
-        uses: docker/setup-qemu-action@v1
+        uses: docker/setup-qemu-action@v2
         with:
           platforms: arm64
 
       - name: Set up Docker Buildx
         id: buildx
-        uses: docker/setup-buildx-action@v1
+        uses: docker/setup-buildx-action@v2
 
       - name: Inspect builder
         run: docker buildx inspect
           
       - name: Log in to DockerHub
-        uses: docker/login-action@v1
+        uses: docker/login-action@v2
         with:
           username: ${{ secrets.DOCKERHUB_USERNAME }}
           password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -48,7 +48,7 @@ jobs:
             type=pep440,pattern={{raw}}
 
       - name: Build and push all platforms
-        uses: docker/build-push-action@v2
+        uses: docker/build-push-action@v3
         with:
           push: true
           labels: "gitsha1=${{ github.sha }}"
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index b366eb8667..17b9df601c 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -17,7 +17,7 @@ jobs:
     name: GitHub Pages
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Setup mdbook
         uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 9a708286a4..e240bf4e4f 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -25,7 +25,7 @@ jobs:
   mypy:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - name: Install Rust
         uses: actions-rs/toolchain@v1
         with:
@@ -59,7 +59,7 @@ jobs:
             postgres-version: "14"
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -133,7 +133,7 @@ jobs:
       BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -155,7 +155,7 @@ jobs:
         if: ${{ always() }}
         run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
       - name: Upload SyTest logs
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         if: ${{ always() }}
         with:
           name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -182,8 +182,8 @@ jobs:
             database: Postgres
 
     steps:
-      - name: Run actions/checkout@v2 for synapse
-        uses: actions/checkout@v2
+      - name: Run actions/checkout@v3 for synapse
+        uses: actions/checkout@v3
         with:
           path: synapse
 
@@ -210,7 +210,7 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 0708d631cd..eb12d88fbc 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -11,6 +11,7 @@ on:
 
     # we do the full build on tags.
     tags: ["v*"]
+  workflow_dispatch:
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.ref }}
@@ -24,7 +25,7 @@ jobs:
     name: "Calculate list of debian distros"
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: actions/setup-python@v2
       - id: set-distros
         run: |
@@ -49,18 +50,18 @@ jobs:
 
     steps:
       - name: Checkout
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           path: src
 
       - name: Set up Docker Buildx
         id: buildx
-        uses: docker/setup-buildx-action@v1
+        uses: docker/setup-buildx-action@v2
         with:
           install: true
 
       - name: Set up docker layer caching
-        uses: actions/cache@v2
+        uses: actions/cache@v3
         with:
           path: /tmp/.buildx-cache
           key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -84,7 +85,7 @@ jobs:
           mv /tmp/.buildx-cache-new /tmp/.buildx-cache
 
       - name: Upload debs as artifacts
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         with:
           name: debs
           path: debs/*
@@ -145,7 +146,7 @@ jobs:
       - name: Build sdist
         run: python -m build --sdist
 
-      - uses: actions/upload-artifact@v2
+      - uses: actions/upload-artifact@v3
         with:
           name: Sdist
           path: dist/*.tar.gz
@@ -162,7 +163,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Download all workflow run artifacts
-        uses: actions/download-artifact@v2
+        uses: actions/download-artifact@v3
       - name: Build a tarball for the debs
         run: tar -cvJf debs.tar.xz debs
       - name: Attach to release
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 9fe61930a5..94eb58b59d 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -4,6 +4,7 @@ on:
   push:
     branches: ["develop", "release-*"]
   pull_request:
+  workflow_dispatch:
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.ref }}
@@ -30,7 +31,7 @@ jobs:
   check-sampleconfig:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: actions/setup-python@v2
       - uses: matrix-org/setup-python-poetry@v1
         with:
@@ -41,7 +42,7 @@ jobs:
   check-schema-delta:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: actions/setup-python@v2
       - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
       - run: scripts-dev/check_schema_delta.py --force-colors
@@ -54,15 +55,15 @@ jobs:
   lint-crlf:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - name: Check line endings
         run: scripts-dev/check_line_terminators.sh
 
   lint-newsfile:
-    if: ${{ github.base_ref == 'develop'  || contains(github.base_ref, 'release-') }}
+    if: ${{ (github.base_ref == 'develop'  || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           ref: ${{ github.event.pull_request.head.sha }}
           fetch-depth: 0
@@ -75,7 +76,7 @@ jobs:
   lint-pydantic:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           ref: ${{ github.event.pull_request.head.sha }}
       - uses: matrix-org/setup-python-poetry@v1
@@ -89,7 +90,7 @@ jobs:
     if: ${{ needs.changes.outputs.rust == 'true' }}
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -107,7 +108,7 @@ jobs:
     if: ${{ needs.changes.outputs.rust == 'true' }}
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -140,7 +141,7 @@ jobs:
     needs: linting-done
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: actions/setup-python@v2
       - id: get-matrix
         run: .ci/scripts/calculate_jobs.py
@@ -157,7 +158,7 @@ jobs:
         job:  ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - run: sudo apt-get -qq install xmlsec1
       - name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
         if: ${{ matrix.job.postgres-version }}
@@ -199,7 +200,7 @@ jobs:
     needs: linting-done
     runs-on: ubuntu-20.04
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -270,7 +271,7 @@ jobs:
         extras: ["all"]
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       # Install libs necessary for PyPy to build binary wheels for dependencies
       - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
       - uses: matrix-org/setup-python-poetry@v1
@@ -313,7 +314,7 @@ jobs:
         job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - name: Prepare test blacklist
         run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
 
@@ -331,7 +332,7 @@ jobs:
         if: ${{ always() }}
         run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
       - name: Upload SyTest logs
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         if: ${{ always() }}
         with:
           name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -361,7 +362,7 @@ jobs:
           --health-retries 5
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - run: sudo apt-get -qq install xmlsec1 postgresql-client
       - uses: matrix-org/setup-python-poetry@v1
         with:
@@ -402,7 +403,7 @@ jobs:
           --health-retries 5
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - run: sudo apt-get -qq install xmlsec1 postgresql-client
       - uses: matrix-org/setup-python-poetry@v1
         with:
@@ -444,8 +445,8 @@ jobs:
             database: Postgres
 
     steps:
-      - name: Run actions/checkout@v2 for synapse
-        uses: actions/checkout@v2
+      - name: Run actions/checkout@v3 for synapse
+        uses: actions/checkout@v3
         with:
           path: synapse
 
@@ -473,7 +474,7 @@ jobs:
       - changes
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 8fa2fbdea0..b4e26118c0 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -15,7 +15,7 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -40,7 +40,7 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - run: sudo apt-get -qq install xmlsec1
 
       - name: Install Rust
@@ -81,7 +81,7 @@ jobs:
         - ${{ github.workspace }}:/src
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
       - name: Install Rust
         uses: actions-rs/toolchain@v1
@@ -112,7 +112,7 @@ jobs:
         if: ${{ always() }}
         run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
       - name: Upload SyTest logs
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         if: ${{ always() }}
         with:
           name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -138,8 +138,8 @@ jobs:
             database: Postgres
 
     steps:
-      - name: Run actions/checkout@v2 for synapse
-        uses: actions/checkout@v2
+      - name: Run actions/checkout@v3 for synapse
+        uses: actions/checkout@v3
         with:
           path: synapse
 
@@ -177,7 +177,7 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/CHANGES.md b/CHANGES.md
index 5d90ad9513..f2a23c4d69 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,110 @@
+Synapse 1.69.0rc1 (2022-10-04)
+==============================
+
+Features
+--------
+
+- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`, per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
+- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
+- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
+- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
+- Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722))
+- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
+- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
+- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
+- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
+- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
+- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
+- Ask mail servers receiving emails from Synapse to not send automatic reply (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
+
+
+Bugfixes
+--------
+
+- Send invite push notifications for invite over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
+- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
+- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
+- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
+- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
+- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
+- Fix a bug introduced in 1.66 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+- Fix long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
+- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
+- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
+- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
+- Fix bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
+- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
+
+
+Improved Documentation
+----------------------
+
+- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
+- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
+- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
+- Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
+- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
+- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
+- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
+- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
+- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
+- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
+- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
+
+
+Internal Changes
+----------------
+
+- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
+- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
+- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
+- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
+- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
+- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
+- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
+- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
+- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
+- Fix unstable MSC3882 endpoint being incorrectly available on stable API versions. ([\#13868](https://github.com/matrix-org/synapse/issues/13868))
+- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
+- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
+- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
+- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
+- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
+- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
+- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
+- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
+- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
+- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
+- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
+- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
+- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
+- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
+- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
+- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
+- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
+- Bump docker/login-action from 1 to 2. ([\#13978](https://github.com/matrix-org/synapse/issues/13978))
+- Bump actions/download-artifact from 2 to 3. ([\#13979](https://github.com/matrix-org/synapse/issues/13979))
+- Bump actions/cache from 2 to 3. ([\#13980](https://github.com/matrix-org/synapse/issues/13980))
+- Bump actions/checkout from 2 to 3. ([\#13982](https://github.com/matrix-org/synapse/issues/13982))
+- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
+- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
+- Bump docker/setup-buildx-action from 1 to 2. ([\#14015](https://github.com/matrix-org/synapse/issues/14015))
+- Bump docker/setup-qemu-action from 1 to 2. ([\#14019](https://github.com/matrix-org/synapse/issues/14019))
+- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
+- Bump docker/build-push-action from 2 to 3. ([\#14022](https://github.com/matrix-org/synapse/issues/14022))
+- Bump actions/upload-artifact from 2 to 3. ([\#14023](https://github.com/matrix-org/synapse/issues/14023))
+
+
 Synapse 1.68.0 (2022-09-27)
 ===========================
 
diff --git a/build_rust.py b/build_rust.py
index 5c5e557ee8..662474dcb4 100644
--- a/build_rust.py
+++ b/build_rust.py
@@ -15,6 +15,9 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
         path=cargo_toml_path,
         binding=Binding.PyO3,
         py_limited_api=True,
+        # We force always building in release mode, as we can't tell the
+        # difference between using `poetry` in development vs production.
+        debug=False,
     )
     setup_kwargs.setdefault("rust_extensions", []).append(extension)
     setup_kwargs["zip_safe"] = False
diff --git a/changelog.d/13487.misc b/changelog.d/13487.misc
deleted file mode 100644
index 761adc8b05..0000000000
--- a/changelog.d/13487.misc
+++ /dev/null
@@ -1 +0,0 @@
-Speed up creation of DM rooms.
diff --git a/changelog.d/13556.feature b/changelog.d/13556.feature
deleted file mode 100644
index f9d63db6c0..0000000000
--- a/changelog.d/13556.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)).
diff --git a/changelog.d/13635.feature b/changelog.d/13635.feature
deleted file mode 100644
index d86bf7ed80..0000000000
--- a/changelog.d/13635.feature
+++ /dev/null
@@ -1 +0,0 @@
-Exponentially backoff from backfilling the same event over and over.
diff --git a/changelog.d/13667.feature b/changelog.d/13667.feature
deleted file mode 100644
index a0b3cfe18c..0000000000
--- a/changelog.d/13667.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add cache invalidation across workers to module API.
diff --git a/changelog.d/13719.bugfix b/changelog.d/13719.bugfix
deleted file mode 100644
index 4318f4daff..0000000000
--- a/changelog.d/13719.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Send invite push notifications for invite over federation.
diff --git a/changelog.d/13722.feature b/changelog.d/13722.feature
deleted file mode 100644
index 588d143c0f..0000000000
--- a/changelog.d/13722.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session.
diff --git a/changelog.d/13768.misc b/changelog.d/13768.misc
deleted file mode 100644
index 28bddb7059..0000000000
--- a/changelog.d/13768.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port push rules to using Rust.
diff --git a/changelog.d/13772.doc b/changelog.d/13772.doc
deleted file mode 100644
index 3398ff3765..0000000000
--- a/changelog.d/13772.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add `worker_main_http_uri` for the worker generator bash script.
diff --git a/changelog.d/13782.feature b/changelog.d/13782.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13782.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13787.misc b/changelog.d/13787.misc
deleted file mode 100644
index a9b93717f0..0000000000
--- a/changelog.d/13787.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13792.misc b/changelog.d/13792.misc
deleted file mode 100644
index 36ac91400a..0000000000
--- a/changelog.d/13792.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update the script which makes full schema dumps.
diff --git a/changelog.d/13796.misc b/changelog.d/13796.misc
deleted file mode 100644
index 9ed1662394..0000000000
--- a/changelog.d/13796.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13799.feature b/changelog.d/13799.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13799.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13800.misc b/changelog.d/13800.misc
deleted file mode 100644
index 761adc8b05..0000000000
--- a/changelog.d/13800.misc
+++ /dev/null
@@ -1 +0,0 @@
-Speed up creation of DM rooms.
diff --git a/changelog.d/13809.misc b/changelog.d/13809.misc
deleted file mode 100644
index c2dacca2f2..0000000000
--- a/changelog.d/13809.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the `synapse.api.auth.Auth` mock used in unit tests.
diff --git a/changelog.d/13818.doc b/changelog.d/13818.doc
deleted file mode 100644
index 16b31f5071..0000000000
--- a/changelog.d/13818.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update URL for the NixOS module for Synapse.
diff --git a/changelog.d/13823.misc b/changelog.d/13823.misc
deleted file mode 100644
index 527d79f4b2..0000000000
--- a/changelog.d/13823.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server.
\ No newline at end of file
diff --git a/changelog.d/13830.bugfix b/changelog.d/13830.bugfix
deleted file mode 100644
index e6215806cd..0000000000
--- a/changelog.d/13830.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join.
diff --git a/changelog.d/13831.feature b/changelog.d/13831.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13831.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13832.feature b/changelog.d/13832.feature
deleted file mode 100644
index 1dc1d66efe..0000000000
--- a/changelog.d/13832.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint.
diff --git a/changelog.d/13836.doc b/changelog.d/13836.doc
deleted file mode 100644
index f2edab00f4..0000000000
--- a/changelog.d/13836.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`.
diff --git a/changelog.d/13838.misc b/changelog.d/13838.misc
deleted file mode 100644
index 28bddb7059..0000000000
--- a/changelog.d/13838.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port push rules to using Rust.
diff --git a/changelog.d/13839.misc b/changelog.d/13839.misc
deleted file mode 100644
index 549872c90f..0000000000
--- a/changelog.d/13839.misc
+++ /dev/null
@@ -1 +0,0 @@
-Carry IdP Session IDs through user-mapping sessions.
diff --git a/changelog.d/13840.bugfix b/changelog.d/13840.bugfix
deleted file mode 100644
index 0f014439a8..0000000000
--- a/changelog.d/13840.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward.
diff --git a/changelog.d/13843.removal b/changelog.d/13843.removal
deleted file mode 100644
index f6caaa8895..0000000000
--- a/changelog.d/13843.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0.
diff --git a/changelog.d/13850.misc b/changelog.d/13850.misc
deleted file mode 100644
index a973118aaf..0000000000
--- a/changelog.d/13850.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix the release script not publishing binary wheels.
\ No newline at end of file
diff --git a/changelog.d/13855.bugfix b/changelog.d/13855.bugfix
deleted file mode 100644
index 5ea8539bd8..0000000000
--- a/changelog.d/13855.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix access token leak to logs from proxy agent.
diff --git a/changelog.d/13859.misc b/changelog.d/13859.misc
deleted file mode 100644
index 2780a4af3c..0000000000
--- a/changelog.d/13859.misc
+++ /dev/null
@@ -1 +0,0 @@
-Raise issue if complement fails with latest deps.
diff --git a/changelog.d/13860.feature b/changelog.d/13860.feature
deleted file mode 100644
index 6c8e5cffe2..0000000000
--- a/changelog.d/13860.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13863.bugfix b/changelog.d/13863.bugfix
deleted file mode 100644
index 74264a4fab..0000000000
--- a/changelog.d/13863.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls.
diff --git a/changelog.d/13867.misc b/changelog.d/13867.misc
deleted file mode 100644
index 1205214598..0000000000
--- a/changelog.d/13867.misc
+++ /dev/null
@@ -1 +0,0 @@
-Correct the comments in the complement dockerfile.
diff --git a/changelog.d/13868.misc b/changelog.d/13868.misc
deleted file mode 100644
index d7a99c042a..0000000000
--- a/changelog.d/13868.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix unstable MSC3882 endpoint being incorrectly available on stable API versions.
\ No newline at end of file
diff --git a/changelog.d/13870.doc b/changelog.d/13870.doc
deleted file mode 100644
index 2598bc270c..0000000000
--- a/changelog.d/13870.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation.
diff --git a/changelog.d/13872.bugfix b/changelog.d/13872.bugfix
deleted file mode 100644
index 67d3d9e643..0000000000
--- a/changelog.d/13872.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room.
diff --git a/changelog.d/13873.misc b/changelog.d/13873.misc
deleted file mode 100644
index f4342482f0..0000000000
--- a/changelog.d/13873.misc
+++ /dev/null
@@ -1 +0,0 @@
-Create a new snapshot of the database schema.
diff --git a/changelog.d/13874.misc b/changelog.d/13874.misc
deleted file mode 100644
index 499e488c35..0000000000
--- a/changelog.d/13874.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Send device list updates to most servers in rooms with partial state.
diff --git a/changelog.d/13876.misc b/changelog.d/13876.misc
deleted file mode 100644
index ef37100115..0000000000
--- a/changelog.d/13876.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console.
\ No newline at end of file
diff --git a/changelog.d/13879.misc b/changelog.d/13879.misc
deleted file mode 100644
index 3cc2a2420f..0000000000
--- a/changelog.d/13879.misc
+++ /dev/null
@@ -1 +0,0 @@
-Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`.
diff --git a/changelog.d/13885.misc b/changelog.d/13885.misc
deleted file mode 100644
index bc76b862df..0000000000
--- a/changelog.d/13885.misc
+++ /dev/null
@@ -1 +0,0 @@
-Correctly handle a race with device lists when a remote user leaves during a partial join.
diff --git a/changelog.d/13888.misc b/changelog.d/13888.misc
deleted file mode 100644
index 4ffd9bcede..0000000000
--- a/changelog.d/13888.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests.
diff --git a/changelog.d/13889.misc b/changelog.d/13889.misc
deleted file mode 100644
index 28bddb7059..0000000000
--- a/changelog.d/13889.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port push rules to using Rust.
diff --git a/changelog.d/13890.misc b/changelog.d/13890.misc
deleted file mode 100644
index bf76cf7be7..0000000000
--- a/changelog.d/13890.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve backfill robustness by trying more servers when we get a `4xx` error back.
\ No newline at end of file
diff --git a/changelog.d/13892.feature b/changelog.d/13892.feature
deleted file mode 100644
index df3f576536..0000000000
--- a/changelog.d/13892.feature
+++ /dev/null
@@ -1 +0,0 @@
-Faster remote room joins: record _when_ we first partial-join to a room.
diff --git a/changelog.d/13893.feature b/changelog.d/13893.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13893.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13904.bugfix b/changelog.d/13904.bugfix
deleted file mode 100644
index 397a3108ac..0000000000
--- a/changelog.d/13904.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in 1.66 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico.
diff --git a/changelog.d/13905.misc b/changelog.d/13905.misc
deleted file mode 100644
index efe3bed5f1..0000000000
--- a/changelog.d/13905.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix mypy errors with canonicaljson 1.6.3.
diff --git a/changelog.d/13909.bugfix b/changelog.d/13909.bugfix
deleted file mode 100644
index 883dd72919..0000000000
--- a/changelog.d/13909.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix packaging to include `Cargo.lock` in `sdist`.
diff --git a/changelog.d/13911.doc b/changelog.d/13911.doc
deleted file mode 100644
index 7cc3206501..0000000000
--- a/changelog.d/13911.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
\ No newline at end of file
diff --git a/changelog.d/13913.misc b/changelog.d/13913.misc
deleted file mode 100644
index 30b4401049..0000000000
--- a/changelog.d/13913.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster remote room joins: correctly handle remote device list updates during a partial join.
diff --git a/changelog.d/13914.misc b/changelog.d/13914.misc
deleted file mode 100644
index c29bc25d38..0000000000
--- a/changelog.d/13914.misc
+++ /dev/null
@@ -1 +0,0 @@
-Complement image: propagate SIGTERM to all workers.
diff --git a/changelog.d/13915.doc b/changelog.d/13915.doc
deleted file mode 100644
index 828cc30536..0000000000
--- a/changelog.d/13915.doc
+++ /dev/null
@@ -1 +0,0 @@
-Emphasize the right reasons when to use `(room_id, event_id)` in a database schema.
diff --git a/changelog.d/13920.feature b/changelog.d/13920.feature
deleted file mode 100644
index aee702bcd2..0000000000
--- a/changelog.d/13920.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715).
diff --git a/changelog.d/13922.bugfix b/changelog.d/13922.bugfix
deleted file mode 100644
index 7269d28dee..0000000000
--- a/changelog.d/13922.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix long-standing bug where device updates could cause delays sending out to-device messages over federation.
diff --git a/changelog.d/13924.misc b/changelog.d/13924.misc
deleted file mode 100644
index 7770b6f03f..0000000000
--- a/changelog.d/13924.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update an innaccurate comment in Synapse's upsert database helper.
diff --git a/changelog.d/13928.doc b/changelog.d/13928.doc
deleted file mode 100644
index 04cd06f19d..0000000000
--- a/changelog.d/13928.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame.
diff --git a/changelog.d/13930.doc b/changelog.d/13930.doc
deleted file mode 100644
index 7cc3206501..0000000000
--- a/changelog.d/13930.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
\ No newline at end of file
diff --git a/changelog.d/13931.doc b/changelog.d/13931.doc
deleted file mode 100644
index 85e74fbb3b..0000000000
--- a/changelog.d/13931.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify that the `auto_join_rooms` config option can also be used with Space aliases.
\ No newline at end of file
diff --git a/changelog.d/13932.feature b/changelog.d/13932.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13932.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13934.misc b/changelog.d/13934.misc
deleted file mode 100644
index 6610a9f567..0000000000
--- a/changelog.d/13934.misc
+++ /dev/null
@@ -1 +0,0 @@
-Correctly handle sending local device list updates to remote servers during a partial join.
diff --git a/changelog.d/13936.feature b/changelog.d/13936.feature
deleted file mode 100644
index d86bf7ed80..0000000000
--- a/changelog.d/13936.feature
+++ /dev/null
@@ -1 +0,0 @@
-Exponentially backoff from backfilling the same event over and over.
diff --git a/changelog.d/13937.feature b/changelog.d/13937.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13937.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13939.feature b/changelog.d/13939.feature
deleted file mode 100644
index d0cb902dff..0000000000
--- a/changelog.d/13939.feature
+++ /dev/null
@@ -1 +0,0 @@
-Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13947.feature b/changelog.d/13947.feature
deleted file mode 100644
index a0b3cfe18c..0000000000
--- a/changelog.d/13947.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add cache invalidation across workers to module API.
diff --git a/changelog.d/13952.bugfix b/changelog.d/13952.bugfix
deleted file mode 100644
index a6af20f051..0000000000
--- a/changelog.d/13952.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time.
diff --git a/changelog.d/13957.feature b/changelog.d/13957.feature
deleted file mode 100644
index 4080147357..0000000000
--- a/changelog.d/13957.feature
+++ /dev/null
@@ -1 +0,0 @@
-Ask mail servers receiving emails from Synapse to not send automatic reply (e.g. out-of-office responses).
diff --git a/changelog.d/13972.bugfix b/changelog.d/13972.bugfix
deleted file mode 100644
index 4c1e19ef8c..0000000000
--- a/changelog.d/13972.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0.
diff --git a/changelog.d/13973.misc b/changelog.d/13973.misc
deleted file mode 100644
index 58150a2b35..0000000000
--- a/changelog.d/13973.misc
+++ /dev/null
@@ -1 +0,0 @@
-Speed up calculating push actions in large rooms.
diff --git a/changelog.d/13974.doc b/changelog.d/13974.doc
deleted file mode 100644
index c4ab17db53..0000000000
--- a/changelog.d/13974.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add some cross references to worker documentation.
diff --git a/debian/changelog b/debian/changelog
index 01fa49aa05..0f4dd28081 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,10 @@
-matrix-synapse-py3 (1.69.0~rc1+nmu1) UNRELEASED; urgency=medium
+matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
 
   * The man page for the hash_password script has been updated to reflect
     the correct default value of 'bcrypt_rounds'.
+  * New Synapse release 1.69.0rc1.
 
- -- Synapse Packaging team <packages@matrix.org>  Mon, 26 Sep 2022 18:05:09 +0100
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 04 Oct 2022 11:17:16 +0100
 
 matrix-synapse-py3 (1.68.0) stable; urgency=medium
 
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index 279303a798..d8416b5a5f 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -135,6 +135,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
 convention of the upstream `prometheus_client`. The old names are
 considered deprecated and will be removed in a future version of
 Synapse.
+**The old names will be disabled by default in Synapse v1.71.0 and removed
+altogether in Synapse v1.73.0.**
 
 | New Name                                                                     | Old Name                                                               |
 | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@@ -146,6 +148,13 @@ Synapse.
 | synapse_federation_client_events_processed_total                             | synapse_federation_client_events_processed                             |
 | synapse_event_processing_loop_count_total                                    | synapse_event_processing_loop_count                                    |
 | synapse_event_processing_loop_room_count_total                               | synapse_event_processing_loop_room_count                               |
+| synapse_util_caches_cache_hits                                               | synapse_util_caches_cache:hits                                         |
+| synapse_util_caches_cache_size                                               | synapse_util_caches_cache:size                                         |
+| synapse_util_caches_cache_evicted_size                                       | synapse_util_caches_cache:evicted_size                                 |
+| synapse_util_caches_cache                                                    | synapse_util_caches_cache:total                                        |
+| synapse_util_caches_response_cache_size                                      | synapse_util_caches_response_cache:size                                |
+| synapse_util_caches_response_cache_hits                                      | synapse_util_caches_response_cache:hits                                |
+| synapse_util_caches_response_cache_evicted_size                              | synapse_util_caches_response_cache:evicted_size                        |
 | synapse_util_metrics_block_count_total                                       | synapse_util_metrics_block_count                                       |
 | synapse_util_metrics_block_time_seconds_total                                | synapse_util_metrics_block_time_seconds                                |
 | synapse_util_metrics_block_ru_utime_seconds_total                            | synapse_util_metrics_block_ru_utime_seconds                            |
@@ -261,7 +270,7 @@ Standard Metric Names
 
 As of synapse version 0.18.2, the format of the process-wide metrics has
 been changed to fit prometheus standard naming conventions. Additionally
-the units have been changed to seconds, from miliseconds.
+the units have been changed to seconds, from milliseconds.
 
 | New name                                 | Old name                          |
 | ---------------------------------------- | --------------------------------- |
diff --git a/docs/upgrade.md b/docs/upgrade.md
index c4db19e23d..002ef70059 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -100,6 +100,34 @@ vice versa.
 Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
 replication will resume as normal.
 
+
+## Deprecation of legacy Prometheus metric names
+
+In current versions of Synapse, some Prometheus metrics are emitted under two different names,
+with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
+and one of the names being newer but compliant.
+
+Synapse v1.71.0 will turn the old metric names off *by default*.
+For administrators that still rely on them and have not had chance to update their
+uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
+be possible to re-enable them.
+
+The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
+in the `contrib` directory in the Synapse repository have been updated to no longer
+rely on the legacy names. These can be used on a current version of Synapse
+because current versions of Synapse emit both old and new names.
+
+You may need to update your alerting rules or any other rules that depend on
+the names of Prometheus metrics.
+If you want to test your changes before legacy names are disabled by default,
+you may specify `enable_legacy_metrics: false` in your homeserver configuration.
+
+A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
+
+
 # Upgrading to v1.68.0
 
 Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index f46b4932fd..8a71a934ea 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -179,7 +179,7 @@ This will tell other servers to send traffic to port 443 instead.
 
 This option currently defaults to false.
 
-See https://matrix-org.github.io/synapse/latest/delegate.html for more
+See [Delegation of incoming federation traffic](../../delegate.md) for more
 information.
 
 Example configuration:
@@ -2436,6 +2436,31 @@ Example configuration:
 enable_metrics: true
 ```
 ---
+### `enable_legacy_metrics`
+
+Set to `true` to publish both legacy and non-legacy Prometheus metric names,
+or to `false` to only publish non-legacy Prometheus metric names.
+Defaults to `true`. Has no effect if `enable_metrics` is `false`.
+**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
+
+Legacy metric names include:
+- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
+- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
+
+These legacy metric names are unconventional and not compliant with OpenMetrics standards.
+They are included for backwards compatibility.
+
+Example configuration:
+```yaml
+enable_legacy_metrics: false
+```
+
+See https://github.com/matrix-org/synapse/issues/11106 for context.
+
+*Since v1.67.0.*
+
+**Will be removed in v1.73.0.**
+---
 ### `sentry`
 
 Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@@ -2952,7 +2977,7 @@ Options for each entry include:
 
      * `module`: The class name of a custom mapping module. Default is
        `synapse.handlers.oidc.JinjaOidcMappingProvider`.
-        See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
+        See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
         for information on implementing a custom mapping provider.
 
      * `config`: Configuration for the mapping provider module. This section will
@@ -3393,13 +3418,15 @@ This option has the following sub-options:
    the user directory. If false, search results will only contain users
     visible in public rooms and users sharing a room with the requester.
     Defaults to false.
+
     NB. If you set this to true, and the last time the user_directory search
     indexes were (re)built was before Synapse 1.44, you'll have to
     rebuild the indexes in order to search through all known users.
+    
     These indexes are built the first time Synapse starts; admins can
-    manually trigger a rebuild via API following the instructions at
-         https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
-    Set to true to return search results containing all known users, even if that
+    manually trigger a rebuild via the API following the instructions
+    [for running background updates](../administration/admin_api/background_updates.md#run),
+    set to true to return search results containing all known users, even if that
     user does not share a room with the requester.
 * `prefer_local_users`: Defines whether to prefer local users in search query results.
    If set to true, local users are more likely to appear above remote users when searching the
diff --git a/poetry.lock b/poetry.lock
index 0f6d1cfa69..63ef8573a0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -573,11 +573,11 @@ python-versions = "*"
 
 [[package]]
 name = "mypy"
-version = "0.950"
+version = "0.981"
 description = "Optional static typing for Python"
 category = "dev"
 optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
 
 [package.dependencies]
 mypy-extensions = ">=0.4.3"
@@ -600,14 +600,14 @@ python-versions = "*"
 
 [[package]]
 name = "mypy-zope"
-version = "0.3.7"
+version = "0.3.11"
 description = "Plugin for mypy to support zope interfaces"
 category = "dev"
 optional = false
 python-versions = "*"
 
 [package.dependencies]
-mypy = "0.950"
+mypy = "0.981"
 "zope.interface" = "*"
 "zope.schema" = "*"
 
@@ -2162,37 +2162,38 @@ msgpack = [
     {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
 ]
 mypy = [
-    {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
-    {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
-    {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
-    {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
-    {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
-    {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
-    {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
-    {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
-    {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
-    {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
-    {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
-    {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
-    {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
-    {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
-    {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
-    {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
-    {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
-    {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
-    {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
-    {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
-    {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
-    {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
-    {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
+    {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
+    {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
+    {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
+    {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
+    {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
+    {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
+    {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
+    {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
+    {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
+    {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
+    {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
+    {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
+    {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
+    {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
+    {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
+    {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
+    {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
+    {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
+    {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
+    {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
+    {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
+    {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
+    {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
+    {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
 ]
 mypy-extensions = [
     {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
     {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
 ]
 mypy-zope = [
-    {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
-    {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
+    {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
+    {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
 ]
 netaddr = [
     {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
diff --git a/pyproject.toml b/pyproject.toml
index b3e12962a9..c302cff54e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
 
 [tool.poetry]
 name = "matrix-synapse"
-version = "1.68.0"
+version = "1.69.0rc1"
 description = "Homeserver for the Matrix decentralised comms protocol"
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 license = "Apache-2.0"
diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py
index d0fb811bdb..9f2b7ded5b 100755
--- a/scripts-dev/check_pydantic_models.py
+++ b/scripts-dev/check_pydantic_models.py
@@ -88,10 +88,9 @@ def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
 
     @functools.wraps(factory)
     def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
-        # type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
-        if "strict" not in kwargs:  # type: ignore[attr-defined]
+        if "strict" not in kwargs:
             raise MissingStrictInConstrainedTypeException(factory.__name__)
-        if not kwargs["strict"]:  # type: ignore[index]
+        if not kwargs["strict"]:
             raise MissingStrictInConstrainedTypeException(factory.__name__)
         return factory(*args, **kwargs)
 
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 9a24bed0a0..000912e86e 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -98,9 +98,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
         func: Function to be called when sent a SIGHUP signal.
         *args, **kwargs: args and kwargs to be passed to the target function.
     """
-    # This type-ignore should be redundant once we use a mypy release with
-    # https://github.com/python/mypy/pull/12668.
-    _sighup_callbacks.append((func, args, kwargs))  # type: ignore[arg-type]
+    _sighup_callbacks.append((func, args, kwargs))
 
 
 def start_worker_reactor(
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index f3134834e5..bb065f9f2f 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -43,32 +43,6 @@ class MetricsConfig(Config):
     def read_config(self, config: JsonDict, **kwargs: Any) -> None:
         self.enable_metrics = config.get("enable_metrics", False)
 
-        """
-        ### `enable_legacy_metrics` (experimental)
-
-        **Experimental: this option may be removed or have its behaviour
-        changed at any time, with no notice.**
-
-        Set to `true` to publish both legacy and non-legacy Prometheus metric names,
-        or to `false` to only publish non-legacy Prometheus metric names.
-        Defaults to `true`. Has no effect if `enable_metrics` is `false`.
-
-        Legacy metric names include:
-        - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
-        - counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
-
-        These legacy metric names are unconventional and not compliant with OpenMetrics standards.
-        They are included for backwards compatibility.
-
-        Example configuration:
-        ```yaml
-        enable_legacy_metrics: false
-        ```
-
-        See https://github.com/matrix-org/synapse/issues/11106 for context.
-
-        *Since v1.67.0.*
-        """
         self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
 
         self.report_stats = config.get("report_stats", None)
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index abe2c1971a..6bd4742140 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Awaitable, Callable, Optional
 
 from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
 from synapse.api.errors import Codes, SynapseError
@@ -58,7 +58,12 @@ class FederationBase:
 
     @trace
     async def _check_sigs_and_hash(
-        self, room_version: RoomVersion, pdu: EventBase
+        self,
+        room_version: RoomVersion,
+        pdu: EventBase,
+        record_failure_callback: Optional[
+            Callable[[EventBase, str], Awaitable[None]]
+        ] = None,
     ) -> EventBase:
         """Checks that event is correctly signed by the sending server.
 
@@ -70,6 +75,11 @@ class FederationBase:
         Args:
             room_version: The room version of the PDU
             pdu: the event to be checked
+            record_failure_callback: A callback to run whenever the given event
+                fails signature or hash checks. This includes exceptions
+                that would be normally be thrown/raised but also things like
+                checking for event tampering where we just return the redacted
+                event.
 
         Returns:
               * the original event if the checks pass
@@ -80,7 +90,12 @@ class FederationBase:
           InvalidEventSignatureError if the signature check failed. Nothing
              will be logged in this case.
         """
-        await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+        try:
+            await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+        except InvalidEventSignatureError as exc:
+            if record_failure_callback:
+                await record_failure_callback(pdu, str(exc))
+            raise exc
 
         if not check_event_content_hash(pdu):
             # let's try to distinguish between failures because the event was
@@ -116,6 +131,10 @@ class FederationBase:
                         "event_id": pdu.event_id,
                     }
                 )
+                if record_failure_callback:
+                    await record_failure_callback(
+                        pdu, "Event content has been tampered with"
+                    )
             return redacted_event
 
         spam_check = await self.spam_checker.check_event_for_spam(pdu)
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 464672a3da..4dca711cd2 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -278,7 +278,7 @@ class FederationClient(FederationBase):
         pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
 
         # Check signatures and hash of pdus, removing any from the list that fail checks
-        pdus[:] = await self._check_sigs_and_hash_and_fetch(
+        pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
             dest, pdus, room_version=room_version
         )
 
@@ -328,7 +328,17 @@ class FederationClient(FederationBase):
 
             # Check signatures are correct.
             try:
-                signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
+
+                async def _record_failure_callback(
+                    event: EventBase, cause: str
+                ) -> None:
+                    await self.store.record_event_failed_pull_attempt(
+                        event.room_id, event.event_id, cause
+                    )
+
+                signed_pdu = await self._check_sigs_and_hash(
+                    room_version, pdu, _record_failure_callback
+                )
             except InvalidEventSignatureError as e:
                 errmsg = f"event id {pdu.event_id}: {e}"
                 logger.warning("%s", errmsg)
@@ -547,24 +557,28 @@ class FederationClient(FederationBase):
             len(auth_event_map),
         )
 
-        valid_auth_events = await self._check_sigs_and_hash_and_fetch(
+        valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
             destination, auth_event_map.values(), room_version
         )
 
-        valid_state_events = await self._check_sigs_and_hash_and_fetch(
-            destination, state_event_map.values(), room_version
+        valid_state_events = (
+            await self._check_sigs_and_hash_for_pulled_events_and_fetch(
+                destination, state_event_map.values(), room_version
+            )
         )
 
         return valid_state_events, valid_auth_events
 
     @trace
-    async def _check_sigs_and_hash_and_fetch(
+    async def _check_sigs_and_hash_for_pulled_events_and_fetch(
         self,
         origin: str,
         pdus: Collection[EventBase],
         room_version: RoomVersion,
     ) -> List[EventBase]:
-        """Checks the signatures and hashes of a list of events.
+        """
+        Checks the signatures and hashes of a list of pulled events we got from
+        federation and records any signature failures as failed pull attempts.
 
         If a PDU fails its signature check then we check if we have it in
         the database, and if not then request it from the sender's server (if that
@@ -597,11 +611,17 @@ class FederationClient(FederationBase):
 
         valid_pdus: List[EventBase] = []
 
+        async def _record_failure_callback(event: EventBase, cause: str) -> None:
+            await self.store.record_event_failed_pull_attempt(
+                event.room_id, event.event_id, cause
+            )
+
         async def _execute(pdu: EventBase) -> None:
             valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
                 pdu=pdu,
                 origin=origin,
                 room_version=room_version,
+                record_failure_callback=_record_failure_callback,
             )
 
             if valid_pdu:
@@ -618,6 +638,9 @@ class FederationClient(FederationBase):
         pdu: EventBase,
         origin: str,
         room_version: RoomVersion,
+        record_failure_callback: Optional[
+            Callable[[EventBase, str], Awaitable[None]]
+        ] = None,
     ) -> Optional[EventBase]:
         """Takes a PDU and checks its signatures and hashes.
 
@@ -634,6 +657,11 @@ class FederationClient(FederationBase):
             origin
             pdu
             room_version
+            record_failure_callback: A callback to run whenever the given event
+                fails signature or hash checks. This includes exceptions
+                that would be normally be thrown/raised but also things like
+                checking for event tampering where we just return the redacted
+                event.
 
         Returns:
             The PDU (possibly redacted) if it has valid signatures and hashes.
@@ -641,7 +669,9 @@ class FederationClient(FederationBase):
         """
 
         try:
-            return await self._check_sigs_and_hash(room_version, pdu)
+            return await self._check_sigs_and_hash(
+                room_version, pdu, record_failure_callback
+            )
         except InvalidEventSignatureError as e:
             logger.warning(
                 "Signature on retrieved event %s was invalid (%s). "
@@ -694,7 +724,7 @@ class FederationClient(FederationBase):
 
         auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
 
-        signed_auth = await self._check_sigs_and_hash_and_fetch(
+        signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
             destination, auth_chain, room_version=room_version
         )
 
@@ -1401,7 +1431,7 @@ class FederationClient(FederationBase):
                 event_from_pdu_json(e, room_version) for e in content.get("events", [])
             ]
 
-            signed_events = await self._check_sigs_and_hash_and_fetch(
+            signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
                 destination, events, room_version=room_version
             )
         except HttpResponseException as e:
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index 7163af8004..fc467bc7c1 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -130,6 +130,9 @@ class CasHandler:
         except PartialDownloadError as pde:
             # Twisted raises this error if the connection is closed,
             # even if that's being used old-http style to signal end-of-data
+            # Assertion is for mypy's benefit. Error.response is Optional[bytes],
+            # but a PartialDownloadError should always have a non-None response.
+            assert pde.response is not None
             body = pde.response
         except HttpResponseException as e:
             description = (
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 3fac256881..778d8869b3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -866,11 +866,6 @@ class FederationEventHandler:
                 event.room_id, event_id, str(err)
             )
             return
-        except Exception as exc:
-            await self._store.record_event_failed_pull_attempt(
-                event.room_id, event_id, str(exc)
-            )
-            raise exc
 
         try:
             try:
@@ -913,11 +908,6 @@ class FederationEventHandler:
                 logger.warning("Pulled event %s failed history check.", event_id)
             else:
                 raise
-        except Exception as exc:
-            await self._store.record_event_failed_pull_attempt(
-                event.room_id, event_id, str(exc)
-            )
-            raise exc
 
     @trace
     async def _compute_event_context_with_maybe_missing_prevs(
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 28d7093f08..63bc6a7aa5 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -78,6 +78,7 @@ class RelationsHandler:
         direction: str = "b",
         from_token: Optional[StreamToken] = None,
         to_token: Optional[StreamToken] = None,
+        include_original_event: bool = False,
     ) -> JsonDict:
         """Get related events of a event, ordered by topological ordering.
 
@@ -94,6 +95,7 @@ class RelationsHandler:
                 oldest first (`"f"`).
             from_token: Fetch rows from the given token, or from the start if None.
             to_token: Fetch rows up to the given token, or up to the end if None.
+            include_original_event: Whether to include the parent event.
 
         Returns:
             The pagination chunk.
@@ -138,25 +140,24 @@ class RelationsHandler:
             is_peeking=(member_event_id is None),
         )
 
-        now = self._clock.time_msec()
-        # Do not bundle aggregations when retrieving the original event because
-        # we want the content before relations are applied to it.
-        original_event = self._event_serializer.serialize_event(
-            event, now, bundle_aggregations=None
-        )
         # The relations returned for the requested event do include their
         # bundled aggregations.
         aggregations = await self.get_bundled_aggregations(
             events, requester.user.to_string()
         )
-        serialized_events = self._event_serializer.serialize_events(
-            events, now, bundle_aggregations=aggregations
-        )
 
-        return_value = {
-            "chunk": serialized_events,
-            "original_event": original_event,
+        now = self._clock.time_msec()
+        return_value: JsonDict = {
+            "chunk": self._event_serializer.serialize_events(
+                events, now, bundle_aggregations=aggregations
+            ),
         }
+        if include_original_event:
+            # Do not bundle aggregations when retrieving the original event because
+            # we want the content before relations are applied to it.
+            return_value["original_event"] = self._event_serializer.serialize_event(
+                event, now, bundle_aggregations=None
+            )
 
         if next_token:
             return_value["next_batch"] = await next_token.to_string(self._main_store)
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 3e88d87909..3fa78276d8 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -322,6 +322,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         require_consent: bool = True,
         outlier: bool = False,
         historical: bool = False,
+        origin_server_ts: Optional[int] = None,
     ) -> Tuple[str, int]:
         """
         Internal membership update function to get an existing event or create
@@ -361,6 +362,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             historical: Indicates whether the message is being inserted
                 back in time around some existing events. This is used to skip
                 a few checks and mark the event as backfilled.
+            origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+                the current timestamp if set to None.
 
         Returns:
             Tuple of event ID and stream ordering position
@@ -399,6 +402,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 "state_key": user_id,
                 # For backwards compatibility:
                 "membership": membership,
+                "origin_server_ts": origin_server_ts,
             },
             txn_id=txn_id,
             allow_no_prev_events=allow_no_prev_events,
@@ -504,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         prev_event_ids: Optional[List[str]] = None,
         state_event_ids: Optional[List[str]] = None,
         depth: Optional[int] = None,
+        origin_server_ts: Optional[int] = None,
     ) -> Tuple[str, int]:
         """Update a user's membership in a room.
 
@@ -542,6 +547,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             depth: Override the depth used to order the event in the DAG.
                 Should normally be set to None, which will cause the depth to be calculated
                 based on the prev_events.
+            origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+                the current timestamp if set to None.
 
         Returns:
             A tuple of the new event ID and stream ID.
@@ -597,6 +604,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                         prev_event_ids=prev_event_ids,
                         state_event_ids=state_event_ids,
                         depth=depth,
+                        origin_server_ts=origin_server_ts,
                     )
 
         return result
@@ -620,6 +628,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         prev_event_ids: Optional[List[str]] = None,
         state_event_ids: Optional[List[str]] = None,
         depth: Optional[int] = None,
+        origin_server_ts: Optional[int] = None,
     ) -> Tuple[str, int]:
         """Helper for update_membership.
 
@@ -660,6 +669,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             depth: Override the depth used to order the event in the DAG.
                 Should normally be set to None, which will cause the depth to be calculated
                 based on the prev_events.
+            origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+                the current timestamp if set to None.
 
         Returns:
             A tuple of the new event ID and stream ID.
@@ -799,6 +810,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 require_consent=require_consent,
                 outlier=outlier,
                 historical=historical,
+                origin_server_ts=origin_server_ts,
             )
 
         latest_event_ids = await self.store.get_prev_events_for_room(room_id)
@@ -1044,6 +1056,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             content=content,
             require_consent=require_consent,
             outlier=outlier,
+            origin_server_ts=origin_server_ts,
         )
 
     async def _should_perform_remote_join(
@@ -1164,8 +1177,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         logger.info("Transferring room state from %s to %s", old_room_id, room_id)
 
         # Find all local users that were in the old room and copy over each user's state
-        users = await self.store.get_users_in_room(old_room_id)
-        await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
+        local_users = await self.store.get_local_users_in_room(old_room_id)
+        await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
 
         # Add new room to the room directory if the old room was there
         # Remove old room from the room directory
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index a744d68c64..332edcca24 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -119,6 +119,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
         except PartialDownloadError as pde:
             # Twisted is silly
             data = pde.response
+            # For mypy's benefit. A general Error.response is Optional[bytes], but
+            # a PartialDownloadError.response should be bytes AFAICS.
+            assert data is not None
             resp_body = json_decoder.decode(data.decode("utf-8"))
 
         if "success" in resp_body:
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 8c3c52e1ca..3610b6bf78 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
 
 import synapse.metrics
 from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
@@ -379,7 +379,7 @@ class UserDirectoryHandler(StateDeltasHandler):
             user_id, event.content.get("displayname"), event.content.get("avatar_url")
         )
 
-    async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
+    async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
         """Someone's just joined a room. Update `users_in_public_rooms` or
         `users_who_share_private_rooms` as appropriate.
 
@@ -390,32 +390,44 @@ class UserDirectoryHandler(StateDeltasHandler):
             room_id
         )
         if is_public:
-            await self.store.add_users_in_public_rooms(room_id, (user_id,))
+            await self.store.add_users_in_public_rooms(room_id, (joining_user_id,))
         else:
             users_in_room = await self.store.get_users_in_room(room_id)
             other_users_in_room = [
                 other
                 for other in users_in_room
-                if other != user_id
+                if other != joining_user_id
                 and (
+                    # We can't apply any special rules to remote users so
+                    # they're always included
                     not self.is_mine_id(other)
+                    # Check the special rules whether the local user should be
+                    # included in the user directory
                     or await self.store.should_include_local_user_in_dir(other)
                 )
             ]
-            to_insert = set()
+            updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set()
 
-            # First, if they're our user then we need to update for every user
-            if self.is_mine_id(user_id):
+            # First, if the joining user is our local user then we need an
+            # update for every other user in the room.
+            if self.is_mine_id(joining_user_id):
                 for other_user_id in other_users_in_room:
-                    to_insert.add((user_id, other_user_id))
+                    updates_to_users_who_share_rooms.add(
+                        (joining_user_id, other_user_id)
+                    )
 
-            # Next we need to update for every local user in the room
+            # Next, we need an update for every other local user in the room
+            # that they now share a room with the joining user.
             for other_user_id in other_users_in_room:
                 if self.is_mine_id(other_user_id):
-                    to_insert.add((other_user_id, user_id))
+                    updates_to_users_who_share_rooms.add(
+                        (other_user_id, joining_user_id)
+                    )
 
-            if to_insert:
-                await self.store.add_users_who_share_private_room(room_id, to_insert)
+            if updates_to_users_who_share_rooms:
+                await self.store.add_users_who_share_private_room(
+                    room_id, updates_to_users_who_share_rooms
+                )
 
     async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
         """Called when when someone leaves a room. The user may be local or remote.
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index fd9cb97920..6a08ffed64 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -586,7 +586,7 @@ class LoggingContextFilter(logging.Filter):
             True to include the record in the log output.
         """
         context = current_context()
-        record.request = self._default_request  # type: ignore
+        record.request = self._default_request
 
         # context should never be None, but if it somehow ends up being, then
         # we end up in a death spiral of infinite loops, so let's check, for
@@ -594,21 +594,21 @@ class LoggingContextFilter(logging.Filter):
         if context is not None:
             # Logging is interested in the request ID. Note that for backwards
             # compatibility this is stored as the "request" on the record.
-            record.request = str(context)  # type: ignore
+            record.request = str(context)
 
             # Add some data from the HTTP request.
             request = context.request
             if request is None:
                 return True
 
-            record.ip_address = request.ip_address  # type: ignore
-            record.site_tag = request.site_tag  # type: ignore
-            record.requester = request.requester  # type: ignore
-            record.authenticated_entity = request.authenticated_entity  # type: ignore
-            record.method = request.method  # type: ignore
-            record.url = request.url  # type: ignore
-            record.protocol = request.protocol  # type: ignore
-            record.user_agent = request.user_agent  # type: ignore
+            record.ip_address = request.ip_address
+            record.site_tag = request.site_tag
+            record.requester = request.requester
+            record.authenticated_entity = request.authenticated_entity
+            record.method = request.method
+            record.url = request.url
+            record.protocol = request.protocol
+            record.user_agent = request.user_agent
 
         return True
 
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index ca2735dd6d..8ce5a2a338 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -992,9 +992,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
         # FIXME: We could update this to handle any type of function by ignoring the
         #   first argument only if it's named `self` or `cls`. This isn't fool-proof
         #   but handles the idiomatic cases.
-        for i, arg in enumerate(args[1:], start=1):  # type: ignore[index]
+        for i, arg in enumerate(args[1:], start=1):
             set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
-        set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))  # type: ignore[index]
+        set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
         set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
         yield
 
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 7bfe380543..4270438918 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -332,6 +332,11 @@ class BulkPushRuleEvaluator:
                 # Push rules say we should notify the user of this event
                 actions_by_user[uid] = actions
 
+        # If there aren't any actions then we can skip the rest of the
+        # processing.
+        if not actions_by_user:
+            return
+
         # This is a check for the case where user joins a room without being
         # allowed to see history, and then the server receives a delayed event
         # from before the user joined, which they should not be pushed for
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 205c556f64..7a25de5c85 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -82,6 +82,11 @@ class RelationPaginationServlet(RestServlet):
         if to_token_str:
             to_token = await StreamToken.from_string(self.store, to_token_str)
 
+        # The unstable version of this API returns an extra field for client
+        # compatibility, see https://github.com/matrix-org/synapse/issues/12930.
+        assert request.path is not None
+        include_original_event = request.path.startswith(b"/_matrix/client/unstable/")
+
         result = await self._relations_handler.get_relations(
             requester=requester,
             event_id=parent_id,
@@ -92,6 +97,7 @@ class RelationPaginationServlet(RestServlet):
             direction=direction,
             from_token=from_token,
             to_token=to_token,
+            include_original_event=include_original_event,
         )
 
         return 200, result
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 0bca012535..b6dedbed04 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -268,15 +268,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
 
         content = parse_json_object_from_request(request)
 
-        event_dict = {
-            "type": event_type,
-            "content": content,
-            "room_id": room_id,
-            "sender": requester.user.to_string(),
-        }
-
-        if state_key is not None:
-            event_dict["state_key"] = state_key
+        origin_server_ts = None
+        if requester.app_service:
+            origin_server_ts = parse_integer(request, "ts")
 
         try:
             if event_type == EventTypes.Member:
@@ -287,8 +281,22 @@ class RoomStateEventRestServlet(TransactionRestServlet):
                     room_id=room_id,
                     action=membership,
                     content=content,
+                    origin_server_ts=origin_server_ts,
                 )
             else:
+                event_dict: JsonDict = {
+                    "type": event_type,
+                    "content": content,
+                    "room_id": room_id,
+                    "sender": requester.user.to_string(),
+                }
+
+                if state_key is not None:
+                    event_dict["state_key"] = state_key
+
+                if origin_server_ts is not None:
+                    event_dict["origin_server_ts"] = origin_server_ts
+
                 (
                     event,
                     _,
@@ -333,10 +341,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
             "sender": requester.user.to_string(),
         }
 
-        # Twisted will have processed the args by now.
-        assert request.args is not None
-        if b"ts" in request.args and requester.app_service:
-            event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
+        if requester.app_service:
+            origin_server_ts = parse_integer(request, "ts")
+            if origin_server_ts is not None:
+                event_dict["origin_server_ts"] = origin_server_ts
 
         try:
             (
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index bb28ded1b5..b4469eb964 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -290,8 +290,7 @@ class LoggingTransaction:
         # LoggingTransaction isn't expecting there to be any callbacks; assert that
         # is not the case.
         assert self.after_callbacks is not None
-        # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
-        self.after_callbacks.append((callback, args, kwargs))  # type: ignore[arg-type]
+        self.after_callbacks.append((callback, args, kwargs))
 
     def async_call_after(
         self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
@@ -312,8 +311,7 @@ class LoggingTransaction:
         # LoggingTransaction isn't expecting there to be any callbacks; assert that
         # is not the case.
         assert self.async_after_callbacks is not None
-        # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
-        self.async_after_callbacks.append((callback, args, kwargs))  # type: ignore[arg-type]
+        self.async_after_callbacks.append((callback, args, kwargs))
 
     def call_on_exception(
         self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs
@@ -331,8 +329,7 @@ class LoggingTransaction:
         # LoggingTransaction isn't expecting there to be any callbacks; assert that
         # is not the case.
         assert self.exception_callbacks is not None
-        # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
-        self.exception_callbacks.append((callback, args, kwargs))  # type: ignore[arg-type]
+        self.exception_callbacks.append((callback, args, kwargs))
 
     def fetchone(self) -> Optional[Tuple]:
         return self.txn.fetchone()
@@ -421,10 +418,7 @@ class LoggingTransaction:
         sql = self.database_engine.convert_param_style(sql)
         if args:
             try:
-                # The type-ignore should be redundant once mypy releases a version with
-                # https://github.com/python/mypy/pull/12668. (`args` might be empty,
-                # (but we'll catch the index error if so.)
-                sql_logger.debug("[SQL values] {%s} %r", self.name, args[0])  # type: ignore[index]
+                sql_logger.debug("[SQL values] {%s} %r", self.name, args[0])
             except Exception:
                 # Don't let logging failures stop SQL from working
                 pass
@@ -655,9 +649,7 @@ class DatabasePool:
         # For now, we just log an error, and hope that it works on the first attempt.
         # TODO: raise an exception.
 
-        # Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see
-        # https://github.com/python/mypy/pull/12668
-        for i, arg in enumerate(args):  # type: ignore[arg-type, var-annotated]
+        for i, arg in enumerate(args):
             if inspect.isgenerator(arg):
                 logger.error(
                     "Programming error: generator passed to new_transaction as "
@@ -665,9 +657,7 @@ class DatabasePool:
                     i,
                     func,
                 )
-        # Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see
-        # https://github.com/python/mypy/pull/12668
-        for name, val in kwargs.items():  # type: ignore[attr-defined]
+        for name, val in kwargs.items():
             if inspect.isgenerator(val):
                 logger.error(
                     "Programming error: generator passed to new_transaction as "
@@ -2471,6 +2461,66 @@ def make_in_list_sql_clause(
         return "%s IN (%s)" % (column, ",".join("?" for _ in iterable)), list(iterable)
 
 
+# These overloads ensure that `columns` and `iterable` values have the same length.
+# Suppress "Single overload definition, multiple required" complaint.
+@overload  # type: ignore[misc]
+def make_tuple_in_list_sql_clause(
+    database_engine: BaseDatabaseEngine,
+    columns: Tuple[str, str],
+    iterable: Collection[Tuple[Any, Any]],
+) -> Tuple[str, list]:
+    ...
+
+
+def make_tuple_in_list_sql_clause(
+    database_engine: BaseDatabaseEngine,
+    columns: Tuple[str, ...],
+    iterable: Collection[Tuple[Any, ...]],
+) -> Tuple[str, list]:
+    """Returns an SQL clause that checks the given tuple of columns is in the iterable.
+
+    Args:
+        database_engine
+        columns: Names of the columns in the tuple.
+        iterable: The tuples to check the columns against.
+
+    Returns:
+        A tuple of SQL query and the args
+    """
+    if len(columns) == 0:
+        # Should be unreachable due to mypy, as long as the overloads are set up right.
+        if () in iterable:
+            return "TRUE", []
+        else:
+            return "FALSE", []
+
+    if len(columns) == 1:
+        # Use `= ANY(?)` on postgres.
+        return make_in_list_sql_clause(
+            database_engine, next(iter(columns)), [values[0] for values in iterable]
+        )
+
+    # There are multiple columns. Avoid using an `= ANY(?)` clause on postgres, as
+    # indices are not used when there are multiple columns. Instead, use an `IN`
+    # expression.
+    #
+    # `IN ((?, ...), ...)` with tuples is supported by postgres only, whereas
+    # `IN (VALUES (?, ...), ...)` is supported by both sqlite and postgres.
+    # Thus, the latter is chosen.
+
+    if len(iterable) == 0:
+        # A 0-length `VALUES` list is not allowed in sqlite or postgres.
+        # Also note that a 0-length `IN (...)` clause (not using `VALUES`) is not
+        # allowed in postgres.
+        return "FALSE", []
+
+    tuple_sql = "(%s)" % (",".join("?" for _ in columns),)
+    return "(%s) IN (VALUES %s)" % (
+        ",".join(column for column in columns),
+        ",".join(tuple_sql for _ in iterable),
+    ), [value for values in iterable for value in values]
+
+
 KV = TypeVar("KV")
 
 
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 8e9e1b0b4b..8a10ae800c 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -43,6 +43,7 @@ from synapse.storage.database import (
     LoggingDatabaseConnection,
     LoggingTransaction,
     make_in_list_sql_clause,
+    make_tuple_in_list_sql_clause,
 )
 from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 from synapse.storage.engines import PostgresEngine
@@ -278,7 +279,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
     def _get_e2e_device_keys_txn(
         self,
         txn: LoggingTransaction,
-        query_list: Collection[Tuple[str, str]],
+        query_list: Collection[Tuple[str, Optional[str]]],
         include_all_devices: bool = False,
         include_deleted_devices: bool = False,
     ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]:
@@ -288,8 +289,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
         cross-signing signatures which have been added subsequently (for which, see
         get_e2e_device_keys_and_signatures)
         """
-        query_clauses = []
-        query_params = []
+        query_clauses: List[str] = []
+        query_params_list: List[List[object]] = []
 
         if include_all_devices is False:
             include_deleted_devices = False
@@ -297,40 +298,64 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
         if include_deleted_devices:
             deleted_devices = set(query_list)
 
+        # Split the query list into queries for users and queries for particular
+        # devices.
+        user_list = []
+        user_device_list = []
         for (user_id, device_id) in query_list:
-            query_clause = "user_id = ?"
-            query_params.append(user_id)
-
-            if device_id is not None:
-                query_clause += " AND device_id = ?"
-                query_params.append(device_id)
-
-            query_clauses.append(query_clause)
-
-        sql = (
-            "SELECT user_id, device_id, "
-            "    d.display_name, "
-            "    k.key_json"
-            " FROM devices d"
-            "    %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
-            " WHERE %s AND NOT d.hidden"
-        ) % (
-            "LEFT" if include_all_devices else "INNER",
-            " OR ".join("(" + q + ")" for q in query_clauses),
-        )
+            if device_id is None:
+                user_list.append(user_id)
+            else:
+                user_device_list.append((user_id, device_id))
 
-        txn.execute(sql, query_params)
+        if user_list:
+            user_id_in_list_clause, user_args = make_in_list_sql_clause(
+                txn.database_engine, "user_id", user_list
+            )
+            query_clauses.append(user_id_in_list_clause)
+            query_params_list.append(user_args)
+
+        if user_device_list:
+            # Divide the device queries into batches, to avoid excessively large
+            # queries.
+            for user_device_batch in batch_iter(user_device_list, 1024):
+                (
+                    user_device_id_in_list_clause,
+                    user_device_args,
+                ) = make_tuple_in_list_sql_clause(
+                    txn.database_engine, ("user_id", "device_id"), user_device_batch
+                )
+                query_clauses.append(user_device_id_in_list_clause)
+                query_params_list.append(user_device_args)
 
         result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {}
-        for (user_id, device_id, display_name, key_json) in txn:
-            if include_deleted_devices:
-                deleted_devices.remove((user_id, device_id))
-            result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult(
-                display_name, db_to_json(key_json) if key_json else None
+        for query_clause, query_params in zip(query_clauses, query_params_list):
+            sql = (
+                "SELECT user_id, device_id, "
+                "    d.display_name, "
+                "    k.key_json"
+                " FROM devices d"
+                "    %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
+                " WHERE %s AND NOT d.hidden"
+            ) % (
+                "LEFT" if include_all_devices else "INNER",
+                query_clause,
             )
 
+            txn.execute(sql, query_params)
+
+            for (user_id, device_id, display_name, key_json) in txn:
+                assert device_id is not None
+                if include_deleted_devices:
+                    deleted_devices.remove((user_id, device_id))
+                result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult(
+                    display_name, db_to_json(key_json) if key_json else None
+                )
+
         if include_deleted_devices:
             for user_id, device_id in deleted_devices:
+                if device_id is None:
+                    continue
                 result.setdefault(user_id, {})[device_id] = None
 
         return result
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 3fdf128d9e..cdc9ee5a37 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -205,6 +205,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
     ):
         super().__init__(database, db_conn, hs)
 
+        # Track when the process started.
+        self._started_ts = self._clock.time_msec()
+
         # These get correctly set by _find_stream_orderings_for_times_txn
         self.stream_ordering_month_ago: Optional[int] = None
         self.stream_ordering_day_ago: Optional[int] = None
@@ -224,6 +227,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
                 self._rotate_notifs, 30 * 1000
             )
 
+            self._clear_old_staging_loop = self._clock.looping_call(
+                self._clear_old_push_actions_staging, 30 * 60 * 1000
+            )
+
         self.db_pool.updates.register_background_index_update(
             "event_push_summary_unique_index",
             index_name="event_push_summary_unique_index",
@@ -791,7 +798,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
         # can be used to insert into the `event_push_actions_staging` table.
         def _gen_entry(
             user_id: str, actions: Collection[Union[Mapping, str]]
-        ) -> Tuple[str, str, str, int, int, int, str]:
+        ) -> Tuple[str, str, str, int, int, int, str, int]:
             is_highlight = 1 if _action_has_highlight(actions) else 0
             notif = 1 if "notify" in actions else 0
             return (
@@ -802,6 +809,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
                 is_highlight,  # highlight column
                 int(count_as_unread),  # unread column
                 thread_id,  # thread_id column
+                self._clock.time_msec(),  # inserted_ts column
             )
 
         await self.db_pool.simple_insert_many(
@@ -814,6 +822,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
                 "highlight",
                 "unread",
                 "thread_id",
+                "inserted_ts",
             ),
             values=[
                 _gen_entry(user_id, actions)
@@ -1340,6 +1349,53 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             if done:
                 break
 
+    @wrap_as_background_process("_clear_old_push_actions_staging")
+    async def _clear_old_push_actions_staging(self) -> None:
+        """Clear out any old event push actions from the staging table for
+        events that we failed to persist.
+        """
+
+        # We delete anything more than an hour old, on the assumption that we'll
+        # never take more than an hour to persist an event.
+        delete_before_ts = self._clock.time_msec() - 60 * 60 * 1000
+
+        if self._started_ts > delete_before_ts:
+            # We need to wait for at least an hour before we started deleting,
+            # so that we know it's safe to delete rows with NULL `inserted_ts`.
+            return
+
+        # We don't have an index on `inserted_ts`, instead we assume that the
+        # number of "live" rows in `event_push_actions_staging` is small enough
+        # that an infrequent periodic scan won't cause a problem.
+        #
+        # Note: we also delete any columns with NULL `inserted_ts`, this is safe
+        # as we added a default value to new rows and so they must be at least
+        # an hour old.
+        limit = 1000
+        sql = """
+            DELETE FROM event_push_actions_staging WHERE event_id IN (
+                SELECT event_id FROM event_push_actions_staging WHERE
+                inserted_ts < ? OR inserted_ts IS NULL
+                LIMIT ?
+            )
+        """
+
+        def _clear_old_push_actions_staging_txn(txn: LoggingTransaction) -> bool:
+            txn.execute(sql, (delete_before_ts, limit))
+            return txn.rowcount >= limit
+
+        while True:
+            # Returns true if we have more stuff to delete from the table.
+            deleted = await self.db_pool.runInteraction(
+                "_clear_old_push_actions_staging", _clear_old_push_actions_staging_txn
+            )
+
+            if not deleted:
+                return
+
+            # We sleep to ensure that we don't overwhelm the DB.
+            await self._clock.sleep(1.0)
+
 
 class EventPushActionsStore(EventPushActionsWorkerStore):
     EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index bb489b8189..3e15827986 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -2174,7 +2174,7 @@ class PersistEventsStore:
             (
                 (event.event_id,)
                 for event, _ in all_events_and_contexts
-                if not event.internal_metadata.is_outlier()
+                if event.internal_metadata.is_notifiable()
             ),
         )
 
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 059eef5c22..7412bce255 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1134,6 +1134,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
             get_rooms_for_retention_period_in_range_txn,
         )
 
+    @cached(iterable=True)
     async def get_partial_state_servers_at_join(self, room_id: str) -> Sequence[str]:
         """Gets the list of servers in a partial state room at the time we joined it.
 
@@ -1216,6 +1217,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
             keyvalues={"room_id": room_id},
         )
         self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
+        self._invalidate_cache_and_stream(
+            txn, self.get_partial_state_servers_at_join, (room_id,)
+        )
 
         # We now delete anything from `device_lists_remote_pending` with a
         # stream ID less than the minimum
@@ -1862,6 +1866,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
             values=((room_id, s) for s in servers),
         )
         self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
+        self._invalidate_cache_and_stream(
+            txn, self.get_partial_state_servers_at_join, (room_id,)
+        )
 
     async def write_partial_state_rooms_join_event_id(
         self,
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index eda64dd5e7..ddcda6f1a7 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -641,7 +641,7 @@ class SearchStore(SearchBackgroundUpdateStore):
             raise Exception("Unrecognized database engine")
 
         # mypy expects to append only a `str`, not an `int`
-        args.append(limit)  # type: ignore[arg-type]
+        args.append(limit)
 
         results = await self.db_pool.execute(
             "search_rooms", self.db_pool.cursor_to_dict, sql, *args
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index f29424d17a..4a5c947699 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -85,6 +85,7 @@ Changes in SCHEMA_VERSION = 73;
       events over federation.
     - Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`,
       `batch_events`) to make it easy to delete all associated rows when purging a room.
+    - `inserted_ts` column is added to `event_push_actions_staging` table.
 """
 
 
diff --git a/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres
new file mode 100644
index 0000000000..4af1a8470b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres
@@ -0,0 +1,22 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a column so that we know when a push action was inserted, to make it
+-- easier to clear out old ones.
+ALTER TABLE event_push_actions_staging ADD COLUMN inserted_ts BIGINT;
+
+-- We now add a default for *new* rows. We don't do this above as we don't want
+-- to have to update every remove with the new default.
+ALTER TABLE event_push_actions_staging ALTER COLUMN inserted_ts SET DEFAULT extract(epoch from now()) * 1000;
diff --git a/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite
new file mode 100644
index 0000000000..7482dabba2
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite
@@ -0,0 +1,24 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- On SQLite we must be in monolith mode and updating the database from Synapse,
+-- so its safe to assume that `event_push_actions_staging` should be empty (as
+-- over restart an event must either have been fully persisted or we'll
+-- recalculate the push actions)
+DELETE FROM event_push_actions_staging;
+
+-- Add a column so that we know when a push action was inserted, to make it
+-- easier to clear out old ones.
+ALTER TABLE event_push_actions_staging ADD COLUMN inserted_ts BIGINT;
diff --git a/synapse/visibility.py b/synapse/visibility.py
index c810a05907..c4048d2477 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -162,6 +162,10 @@ async def filter_event_for_clients_with_state(
     if event.internal_metadata.is_soft_failed():
         return []
 
+    # Fast path if we don't have any user IDs to check.
+    if not user_ids:
+        return ()
+
     # Make a set for all user IDs that haven't been filtered out by a check.
     allowed_user_ids = set(user_ids)
 
diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py
index 50e376f695..a538215931 100644
--- a/tests/federation/test_federation_client.py
+++ b/tests/federation/test_federation_client.py
@@ -23,14 +23,23 @@ from twisted.test.proto_helpers import MemoryReactor
 
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase
+from synapse.rest import admin
+from synapse.rest.client import login, room
 from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util import Clock
 
+from tests.test_utils import event_injection
 from tests.unittest import FederatingHomeserverTestCase
 
 
 class FederationClientTest(FederatingHomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
     def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer):
         super().prepare(reactor, clock, homeserver)
 
@@ -231,6 +240,72 @@ class FederationClientTest(FederatingHomeserverTestCase):
 
         return remote_pdu
 
+    def test_backfill_invalid_signature_records_failed_pull_attempts(
+        self,
+    ) -> None:
+        """
+        Test to make sure that events from /backfill with invalid signatures get
+        recorded as failed pull attempts.
+        """
+        OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+        main_store = self.hs.get_datastores().main
+
+        # Create the room
+        user_id = self.register_user("kermit", "test")
+        tok = self.login("kermit", "test")
+        room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+        # We purposely don't run `add_hashes_and_signatures_from_other_server`
+        # over this because we want the signature check to fail.
+        pulled_event, _ = self.get_success(
+            event_injection.create_event(
+                self.hs,
+                room_id=room_id,
+                sender=OTHER_USER,
+                type="test_event_type",
+                content={"body": "garply"},
+            )
+        )
+
+        # We expect an outbound request to /backfill, so stub that out
+        self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
+            _mock_response(
+                {
+                    "origin": "yet.another.server",
+                    "origin_server_ts": 900,
+                    # Mimic the other server returning our new `pulled_event`
+                    "pdus": [pulled_event.get_pdu_json()],
+                }
+            )
+        )
+
+        self.get_success(
+            self.hs.get_federation_client().backfill(
+                # We use "yet.another.server" instead of
+                # `self.OTHER_SERVER_NAME` because we want to see the behavior
+                # from `_check_sigs_and_hash_and_fetch_one` where it tries to
+                # fetch the PDU again from the origin server if the signature
+                # fails. Just want to make sure that the failure is counted from
+                # both code paths.
+                dest="yet.another.server",
+                room_id=room_id,
+                limit=1,
+                extremities=[pulled_event.event_id],
+            ),
+        )
+
+        # Make sure our failed pull attempt was recorded
+        backfill_num_attempts = self.get_success(
+            main_store.db_pool.simple_select_one_onecol(
+                table="event_failed_pull_attempts",
+                keyvalues={"event_id": pulled_event.event_id},
+                retcol="num_attempts",
+            )
+        )
+        # This is 2 because it failed once from `self.OTHER_SERVER_NAME` and the
+        # other from "yet.another.server"
+        self.assertEqual(backfill_num_attempts, 2)
+
 
 def _mock_response(resp: JsonDict):
     body = json.dumps(resp).encode("utf-8")
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index fef3b72d76..988cdb746d 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -654,6 +654,14 @@ class RelationsTestCase(BaseRelationsTestCase):
         )
 
         # We also expect to get the original event (the id of which is self.parent_id)
+        # when requesting the unstable endpoint.
+        self.assertNotIn("original_event", channel.json_body)
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
         self.assertEqual(
             channel.json_body["original_event"]["event_id"], self.parent_id
         )
@@ -755,11 +763,6 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
             channel.json_body["chunk"][0],
         )
 
-        # We also expect to get the original event (the id of which is self.parent_id)
-        self.assertEqual(
-            channel.json_body["original_event"]["event_id"], self.parent_id
-        )
-
         # Make sure next_batch has something in it that looks like it could be a
         # valid token.
         self.assertIsInstance(
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index e281aef779..5e66b5b26c 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -20,7 +20,7 @@
 import json
 from http import HTTPStatus
 from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
-from unittest.mock import Mock, call
+from unittest.mock import Mock, call, patch
 from urllib import parse as urlparse
 
 from parameterized import param, parameterized
@@ -39,9 +39,10 @@ from synapse.api.constants import (
     RoomTypes,
 )
 from synapse.api.errors import Codes, HttpResponseException
+from synapse.appservice import ApplicationService
 from synapse.handlers.pagination import PurgeStatus
 from synapse.rest import admin
-from synapse.rest.client import account, directory, login, profile, room, sync
+from synapse.rest.client import account, directory, login, profile, register, room, sync
 from synapse.server import HomeServer
 from synapse.types import JsonDict, RoomAlias, UserID, create_requester
 from synapse.util import Clock
@@ -710,7 +711,7 @@ class RoomsCreateTestCase(RoomBase):
         self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
         self.assertTrue("room_id" in channel.json_body)
         assert channel.resource_usage is not None
-        self.assertEqual(35, channel.resource_usage.db_txn_count)
+        self.assertEqual(34, channel.resource_usage.db_txn_count)
 
     def test_post_room_initial_state(self) -> None:
         # POST with initial_state config key, expect new room id
@@ -723,7 +724,7 @@ class RoomsCreateTestCase(RoomBase):
         self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
         self.assertTrue("room_id" in channel.json_body)
         assert channel.resource_usage is not None
-        self.assertEqual(38, channel.resource_usage.db_txn_count)
+        self.assertEqual(37, channel.resource_usage.db_txn_count)
 
     def test_post_room_visibility_key(self) -> None:
         # POST with visibility config key, expect new room id
@@ -1252,6 +1253,120 @@ class RoomJoinTestCase(RoomBase):
         )
 
 
+class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        room.register_servlets,
+        synapse.rest.admin.register_servlets,
+        register.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.appservice_user, _ = self.register_appservice_user(
+            "as_user_potato", self.appservice.token
+        )
+
+        # Create a room as the appservice user.
+        args = {
+            "access_token": self.appservice.token,
+            "user_id": self.appservice_user,
+        }
+        channel = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/createRoom?{urlparse.urlencode(args)}",
+            content={"visibility": "public"},
+        )
+
+        assert channel.code == 200
+        self.room = channel.json_body["room_id"]
+
+        self.main_store = self.hs.get_datastores().main
+
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+        config = self.default_config()
+
+        self.appservice = ApplicationService(
+            token="i_am_an_app_service",
+            id="1234",
+            namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
+            # Note: this user does not have to match the regex above
+            sender="@as_main:test",
+        )
+
+        mock_load_appservices = Mock(return_value=[self.appservice])
+        with patch(
+            "synapse.storage.databases.main.appservice.load_appservices",
+            mock_load_appservices,
+        ):
+            hs = self.setup_test_homeserver(config=config)
+        return hs
+
+    def test_send_event_ts(self) -> None:
+        """Test sending a non-state event with a custom timestamp."""
+        ts = 1
+
+        url_params = {
+            "user_id": self.appservice_user,
+            "ts": ts,
+        }
+        channel = self.make_request(
+            "PUT",
+            path=f"/_matrix/client/r0/rooms/{self.room}/send/m.room.message/1234?"
+            + urlparse.urlencode(url_params),
+            content={"body": "test", "msgtype": "m.text"},
+            access_token=self.appservice.token,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        event_id = channel.json_body["event_id"]
+
+        # Ensure the event was persisted with the correct timestamp.
+        res = self.get_success(self.main_store.get_event(event_id))
+        self.assertEquals(ts, res.origin_server_ts)
+
+    def test_send_state_event_ts(self) -> None:
+        """Test sending a state event with a custom timestamp."""
+        ts = 1
+
+        url_params = {
+            "user_id": self.appservice_user,
+            "ts": ts,
+        }
+        channel = self.make_request(
+            "PUT",
+            path=f"/_matrix/client/r0/rooms/{self.room}/state/m.room.name?"
+            + urlparse.urlencode(url_params),
+            content={"name": "test"},
+            access_token=self.appservice.token,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        event_id = channel.json_body["event_id"]
+
+        # Ensure the event was persisted with the correct timestamp.
+        res = self.get_success(self.main_store.get_event(event_id))
+        self.assertEquals(ts, res.origin_server_ts)
+
+    def test_send_membership_event_ts(self) -> None:
+        """Test sending a membership event with a custom timestamp."""
+        ts = 1
+
+        url_params = {
+            "user_id": self.appservice_user,
+            "ts": ts,
+        }
+        channel = self.make_request(
+            "PUT",
+            path=f"/_matrix/client/r0/rooms/{self.room}/state/m.room.member/{self.appservice_user}?"
+            + urlparse.urlencode(url_params),
+            content={"membership": "join", "display_name": "test"},
+            access_token=self.appservice.token,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        event_id = channel.json_body["event_id"]
+
+        # Ensure the event was persisted with the correct timestamp.
+        res = self.get_success(self.main_store.get_event(event_id))
+        self.assertEquals(ts, res.origin_server_ts)
+
+
 class RoomJoinRatelimitTestCase(RoomBase):
     user_id = "@sid1:red"
 
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index e8b4a5644b..c55c4db970 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -96,8 +96,12 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
 
         # Test each of the registered users is marked as active
         timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1))
+        # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater.
+        # Check that timestamp really is an int.
+        assert timestamp is not None
         self.assertGreater(timestamp, 0)
         timestamp = self.get_success(self.store.user_last_seen_monthly_active(user2))
+        assert timestamp is not None
         self.assertGreater(timestamp, 0)
 
         # Test that users with reserved 3pids are not removed from the MAU table
@@ -166,10 +170,11 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         self.get_success(self.store.upsert_monthly_active_user(user_id2))
 
         result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))
+        assert result is not None
         self.assertGreater(result, 0)
 
         result = self.get_success(self.store.user_last_seen_monthly_active(user_id3))
-        self.assertNotEqual(result, 0)
+        self.assertIsNone(result)
 
     @override_config({"max_mau_value": 5})
     def test_reap_monthly_active_users(self):
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 779fad1f63..80e5c590d8 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -86,8 +86,8 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
 
         federation_event_handler._check_event_auth = _check_event_auth
         self.client = self.homeserver.get_federation_client()
-        self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
-            pdus
+        self.client._check_sigs_and_hash_for_pulled_events_and_fetch = (
+            lambda dest, pdus, **k: succeed(pdus)
         )
 
         # Send the join, it should return None (which is not an error)
diff --git a/tests/utils.py b/tests/utils.py
index 65db437697..045a8b5fa7 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -270,9 +270,7 @@ class MockClock:
         *args: P.args,
         **kwargs: P.kwargs,
     ) -> None:
-        # This type-ignore should be redundant once we use a mypy release with
-        # https://github.com/python/mypy/pull/12668.
-        self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs))  # type: ignore[arg-type]
+        self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs))
 
     def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None:
         if timer.expired: