diff options
author | Will Hunt <willh@matrix.org> | 2021-08-01 12:38:40 +0100 |
---|---|---|
committer | Will Hunt <willh@matrix.org> | 2021-08-01 12:38:40 +0100 |
commit | fad91897ec79d9ecfad78f9c1bbf5c6df1d8423a (patch) | |
tree | 245cc7876ac2bc9bf6aa27b3b035782ee1a10644 | |
parent | Merge remote-tracking branch 'origin/develop' into hs/hacked-together-event-c... (diff) | |
parent | Fix deb build script to set prerelease flag correctly (#10500) (diff) | |
download | synapse-fad91897ec79d9ecfad78f9c1bbf5c6df1d8423a.tar.xz |
Merge remote-tracking branch 'origin/develop' into hs/hacked-together-event-cache
81 files changed, 1819 insertions, 257 deletions
diff --git a/CHANGES.md b/CHANGES.md index 13d3654095..6533249281 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,31 @@ -Synapse 1.39.0rc2 (2021-07-22) +Synapse 1.39.0 (2021-07-29) +=========================== + +No significant changes. + + +Synapse 1.39.0rc3 (2021-07-28) ============================== Bugfixes -------- -- Always include `device_one_time_keys_count` key in `/sync` response to work around a bug in Element Android that broke encryption for new devices. ([\#10457](https://github.com/matrix-org/synapse/issues/10457)) +- Fix a bug introduced in Synapse 1.38 which caused an exception at startup when SAML authentication was enabled. ([\#10477](https://github.com/matrix-org/synapse/issues/10477)) +- Fix a long-standing bug where Synapse would not inform clients that a device had exhausted its one-time-key pool, potentially causing problems decrypting events. ([\#10485](https://github.com/matrix-org/synapse/issues/10485)) +- Fix reporting old R30 stats as R30v2 stats. Introduced in v1.39.0rc1. ([\#10486](https://github.com/matrix-org/synapse/issues/10486)) + + +Internal Changes +---------------- + +- Fix an error which prevented the Github Actions workflow to build the docker images from running. ([\#10461](https://github.com/matrix-org/synapse/issues/10461)) +- Fix release script to correctly version debian changelog when doing RCs. ([\#10465](https://github.com/matrix-org/synapse/issues/10465)) + + +Synapse 1.39.0rc2 (2021-07-22) +============================== + +This release also includes the changes in v1.38.1. Internal Changes diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 80ef6aa235..e7eef23419 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -155,7 +155,7 @@ source ./env/bin/activate ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder ``` -## Run the unit tests. +## Run the unit tests (Twisted trial). The unit tests run parts of Synapse, including your changes, to see if anything was broken. They are slower than the linters but will typically catch more errors. @@ -186,7 +186,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests ``` -## Run the integration tests. +## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)). The integration tests are a more comprehensive suite of tests. They run a full version of Synapse, including your changes, to check if @@ -203,6 +203,43 @@ $ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md). +## Run the integration tests ([Complement](https://github.com/matrix-org/complement)). + +[Complement](https://github.com/matrix-org/complement) is a suite of black box tests that can be run on any homeserver implementation. It can also be thought of as end-to-end (e2e) tests. + +It's often nice to develop on Synapse and write Complement tests at the same time. +Here is how to run your local Synapse checkout against your local Complement checkout. + +(checkout [`complement`](https://github.com/matrix-org/complement) alongside your `synapse` checkout) +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh +``` + +To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output: + +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory +``` + +To run a specific test, you can specify the whole name structure: + +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order +``` + + +### Access database for homeserver after Complement test runs. + +If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse: + + 1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests complete + 1. Start the Complement tests + 1. Find the name of the container, `docker ps -f name=complement_` (this will filter for just the Compelement related Docker containers) + 1. Access the container replacing the name with what you found in the previous step: `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash` + 1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3` + 1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path comes from the Synapse homeserver.yaml) + + # 9. Submit your patch. Once you're happy with your patch, it's time to prepare a Pull Request. diff --git a/changelog.d/10245.feature b/changelog.d/10245.feature new file mode 100644 index 0000000000..b3c48cc2cc --- /dev/null +++ b/changelog.d/10245.feature @@ -0,0 +1 @@ +Make historical events discoverable from backfill for servers without any scrollback history (part of MSC2716). diff --git a/changelog.d/10407.feature b/changelog.d/10407.feature new file mode 100644 index 0000000000..db277d9ecd --- /dev/null +++ b/changelog.d/10407.feature @@ -0,0 +1 @@ +Add a buffered logging handler which periodically flushes itself. diff --git a/changelog.d/10410.bugfix b/changelog.d/10410.bugfix new file mode 100644 index 0000000000..65b418fd35 --- /dev/null +++ b/changelog.d/10410.bugfix @@ -0,0 +1 @@ +Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. diff --git a/changelog.d/10411.feature b/changelog.d/10411.feature new file mode 100644 index 0000000000..ef0ab84b17 --- /dev/null +++ b/changelog.d/10411.feature @@ -0,0 +1 @@ +Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. \ No newline at end of file diff --git a/changelog.d/10413.feature b/changelog.d/10413.feature new file mode 100644 index 0000000000..3964db7e0e --- /dev/null +++ b/changelog.d/10413.feature @@ -0,0 +1 @@ +Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/10415.misc b/changelog.d/10415.misc new file mode 100644 index 0000000000..3b9501acbb --- /dev/null +++ b/changelog.d/10415.misc @@ -0,0 +1 @@ +Remove shebang line from module files. diff --git a/changelog.d/10439.bugfix b/changelog.d/10439.bugfix new file mode 100644 index 0000000000..74e5a25126 --- /dev/null +++ b/changelog.d/10439.bugfix @@ -0,0 +1 @@ +Fix events with floating outlier state being rejected over federation. diff --git a/changelog.d/10447.feature b/changelog.d/10447.feature new file mode 100644 index 0000000000..df8bb51167 --- /dev/null +++ b/changelog.d/10447.feature @@ -0,0 +1 @@ +Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/changelog.d/10450.misc b/changelog.d/10450.misc new file mode 100644 index 0000000000..aa646f0841 --- /dev/null +++ b/changelog.d/10450.misc @@ -0,0 +1 @@ + Update type annotations to work with forthcoming Twisted 21.7.0 release. diff --git a/changelog.d/10455.bugfix b/changelog.d/10455.bugfix new file mode 100644 index 0000000000..23c74a3c89 --- /dev/null +++ b/changelog.d/10455.bugfix @@ -0,0 +1 @@ +Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. diff --git a/changelog.d/10461.misc b/changelog.d/10461.misc deleted file mode 100644 index 5035e26825..0000000000 --- a/changelog.d/10461.misc +++ /dev/null @@ -1 +0,0 @@ -Fix an error which prevented the Github Actions workflow to build the docker images from running. diff --git a/changelog.d/10483.doc b/changelog.d/10483.doc new file mode 100644 index 0000000000..0f699fafdd --- /dev/null +++ b/changelog.d/10483.doc @@ -0,0 +1 @@ +Document how to use Complement while developing a new Synapse feature. diff --git a/changelog.d/10489.feature b/changelog.d/10489.feature new file mode 100644 index 0000000000..df8bb51167 --- /dev/null +++ b/changelog.d/10489.feature @@ -0,0 +1 @@ +Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/changelog.d/10490.misc b/changelog.d/10490.misc new file mode 100644 index 0000000000..630c31adae --- /dev/null +++ b/changelog.d/10490.misc @@ -0,0 +1 @@ +Fix up type annotations to work with Twisted 21.7. diff --git a/changelog.d/10491.misc b/changelog.d/10491.misc new file mode 100644 index 0000000000..3867cf2682 --- /dev/null +++ b/changelog.d/10491.misc @@ -0,0 +1 @@ +Improve type annotations for `ObservableDeferred`. diff --git a/changelog.d/10499.bugfix b/changelog.d/10499.bugfix new file mode 100644 index 0000000000..6487af6c96 --- /dev/null +++ b/changelog.d/10499.bugfix @@ -0,0 +1 @@ +Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. diff --git a/changelog.d/10500.misc b/changelog.d/10500.misc new file mode 100644 index 0000000000..dbaff57364 --- /dev/null +++ b/changelog.d/10500.misc @@ -0,0 +1 @@ +Fix a bug which caused production debian packages to be incorrectly marked as 'prerelease'. diff --git a/debian/changelog b/debian/changelog index ce8e2105e7..341c1ac992 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,18 @@ matrix-synapse-py3 (1.39.0ubuntu1) UNRELEASED; urgency=medium -- Richard van der Hoff <richard@matrix.org> Tue, 20 Jul 2021 00:10:03 +0100 +matrix-synapse-py3 (1.39.0) stable; urgency=medium + + * New synapse release 1.39.0. + + -- Synapse Packaging team <packages@matrix.org> Thu, 29 Jul 2021 09:59:00 +0100 + +matrix-synapse-py3 (1.39.0~rc3) stable; urgency=medium + + * New synapse release 1.39.0~rc3. + + -- Synapse Packaging team <packages@matrix.org> Wed, 28 Jul 2021 13:30:58 +0100 + matrix-synapse-py3 (1.38.1) stable; urgency=medium * New synapse release 1.38.1. diff --git a/docker/build_debian.sh b/docker/build_debian.sh index f572ed9aa0..801ff45471 100644 --- a/docker/build_debian.sh +++ b/docker/build_debian.sh @@ -11,10 +11,6 @@ DIST=`cut -d ':' -f2 <<< $distro` cp -aT /synapse/source /synapse/build cd /synapse/build -# add an entry to the changelog for this distribution -dch -M -l "+$DIST" "build for $DIST" -dch -M -r "" --force-distribution --distribution "$DIST" - # if this is a prerelease, set the Section accordingly. # # When the package is later added to the package repo, reprepro will use the @@ -23,11 +19,14 @@ dch -M -r "" --force-distribution --distribution "$DIST" DEB_VERSION=`dpkg-parsechangelog -SVersion` case $DEB_VERSION in - *rc*|*a*|*b*|*c*) + *~rc*|*~a*|*~b*|*~c*) sed -ie '/^Section:/c\Section: prerelease' debian/control ;; esac +# add an entry to the changelog for this distribution +dch -M -l "+$DIST" "build for $DIST" +dch -M -r "" --force-distribution --distribution "$DIST" dpkg-buildpackage -us -uc diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 669e600081..b088c83405 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -28,7 +28,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -36,6 +36,9 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 5bfaa4ad2f..cff433af2a 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -139,6 +139,11 @@ def run(): # Switch to the release branch. parsed_new_version = version.parse(new_version) + + # We assume for debian changelogs that we only do RCs or full releases. + assert not parsed_new_version.is_devrelease + assert not parsed_new_version.is_postrelease + release_branch_name = ( f"release-v{parsed_new_version.major}.{parsed_new_version.minor}" ) @@ -190,12 +195,21 @@ def run(): # Generate changelogs subprocess.run("python3 -m towncrier", shell=True) - # Generate debian changelogs if its not an RC. - if not rc: - subprocess.run( - f'dch -M -v {new_version} "New synapse release {new_version}."', shell=True - ) - subprocess.run('dch -M -r -D stable ""', shell=True) + # Generate debian changelogs + if parsed_new_version.pre is not None: + # If this is an RC then we need to coerce the version string to match + # Debian norms, e.g. 1.39.0rc2 gets converted to 1.39.0~rc2. + base_ver = parsed_new_version.base_version + pre_type, pre_num = parsed_new_version.pre + debian_version = f"{base_ver}~{pre_type}{pre_num}" + else: + debian_version = new_version + + subprocess.run( + f'dch -M -v {debian_version} "New synapse release {debian_version}."', + shell=True, + ) + subprocess.run('dch -M -r -D stable ""', shell=True) # Show the user the changes and ask if they want to edit the change log. repo.git.add("-u") diff --git a/synapse/__init__.py b/synapse/__init__.py index 01d6bf17f0..5da6c924fc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ try: except ImportError: pass -__version__ = "1.39.0rc2" +__version__ = "1.39.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py index 01dc0c4237..9de913db88 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 4caafc0ac9..a986fdb47a 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -128,6 +128,14 @@ class ToDeviceEventTypes: RoomKeyRequest = "m.room_key_request" +class DeviceKeyAlgorithms: + """Spec'd algorithms for the generation of per-device keys""" + + ED25519 = "ed25519" + CURVE25519 = "curve25519" + SIGNED_CURVE25519 = "signed_curve25519" + + class EduTypes: Presence = "m.presence" @@ -198,9 +206,6 @@ class EventContentFields: MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" # For "marker" events MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" - MSC2716_MARKER_INSERTION_PREV_EVENTS = ( - "org.matrix.msc2716.marker.insertion_prev_events" - ) class RoomTypes: @@ -224,3 +229,7 @@ class HistoryVisibility: JOINED = "joined" SHARED = "shared" WORLD_READABLE = "world_readable" + + +class ReadReceiptEventFields: + MSC2285_HIDDEN = "org.matrix.msc2285.hidden" diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 697319e52d..bc678efe49 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -73,6 +73,9 @@ class RoomVersion: # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending # m.room.membership event with membership 'knock'. msc2403_knocking = attr.ib(type=bool) + # MSC2716: Adds m.room.power_levels -> content.historical field to control + # whether "insertion", "chunk", "marker" events can be sent + msc2716_historical = attr.ib(type=bool) class RoomVersions: @@ -88,6 +91,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V2 = RoomVersion( "2", @@ -101,6 +105,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V3 = RoomVersion( "3", @@ -114,6 +119,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V4 = RoomVersion( "4", @@ -127,6 +133,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V5 = RoomVersion( "5", @@ -140,6 +147,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V6 = RoomVersion( "6", @@ -153,6 +161,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -166,6 +175,7 @@ class RoomVersions: msc2176_redaction_rules=True, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) MSC3083 = RoomVersion( "org.matrix.msc3083.v2", @@ -179,6 +189,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=True, msc2403_knocking=False, + msc2716_historical=False, ) V7 = RoomVersion( "7", @@ -192,6 +203,21 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=True, + msc2716_historical=False, + ) + MSC2716 = RoomVersion( + "org.matrix.msc2716", + RoomDisposition.STABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=False, + msc2403_knocking=True, + msc2716_historical=True, ) @@ -207,6 +233,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.MSC2176, RoomVersions.MSC3083, RoomVersions.V7, + RoomVersions.MSC2716, ) } diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 2878d2c140..3234d9ebba 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 57af28f10a..885454ed44 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index c3d4992518..3b7131af8f 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 920b34d97b..7dae163c1a 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 96defac1d2..86ad7337a9 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -109,7 +109,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process): for name, count in r30_results.items(): stats["r30_users_" + name] = count - r30v2_results = await store.count_r30_users() + r30v2_results = await store.count_r30v2_users() for name, count in r30v2_results.items(): stats["r30v2_users_" + name] = count diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2d50060ffb..de1bcee0a7 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index a368efb354..14bde27179 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 040c4504d8..4c60ee8c28 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -33,5 +33,8 @@ class ExperimentalConfig(Config): # MSC2716 (backfill existing history) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) + # MSC2285 (hidden read receipts) + self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) + # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index ad4e6e61c3..dcd3ed1dac 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -71,7 +71,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -79,6 +79,9 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index cc92d35477..4c92e9a2d4 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -205,6 +205,13 @@ def check( if event.type == EventTypes.Redaction: check_redaction(room_version_obj, event, auth_events) + if ( + event.type == EventTypes.MSC2716_INSERTION + or event.type == EventTypes.MSC2716_CHUNK + or event.type == EventTypes.MSC2716_MARKER + ): + check_historical(room_version_obj, event, auth_events) + logger.debug("Allowing! %s", event) @@ -539,6 +546,37 @@ def check_redaction( raise AuthError(403, "You don't have permission to redact events") +def check_historical( + room_version_obj: RoomVersion, + event: EventBase, + auth_events: StateMap[EventBase], +) -> None: + """Check whether the event sender is allowed to send historical related + events like "insertion", "chunk", and "marker". + + Returns: + None + + Raises: + AuthError if the event sender is not allowed to send historical related events + ("insertion", "chunk", and "marker"). + """ + # Ignore the auth checks in room versions that do not support historical + # events + if not room_version_obj.msc2716_historical: + return + + user_level = get_user_power_level(event.user_id, auth_events) + + historical_level = get_named_level(auth_events, "historical", 100) + + if user_level < historical_level: + raise AuthError( + 403, + 'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")', + ) + + def _check_power_levels( room_version_obj: RoomVersion, event: EventBase, @@ -654,7 +692,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: power_level_event = get_power_level_event(auth_events) if power_level_event: level = power_level_event.content.get("users", {}).get(user_id) - if not level: + if level is None: level = power_level_event.content.get("users_default", 0) if level is None: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ec96999e4e..a0c07f62f4 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -109,6 +109,8 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict: add_fields("creator") elif event_type == EventTypes.JoinRules: add_fields("join_rule") + if room_version.msc3083_join_rules: + add_fields("allow") elif event_type == EventTypes.PowerLevels: add_fields( "users", @@ -124,6 +126,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict: if room_version.msc2176_redaction_rules: add_fields("invite") + if room_version.msc2716_historical: + add_fields("historical") + elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index dbadf102f2..b7a10da15a 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -22,6 +22,7 @@ from typing import ( Awaitable, Callable, Collection, + Container, Dict, Iterable, List, @@ -513,6 +514,7 @@ class FederationClient(FederationBase): description: str, destinations: Iterable[str], callback: Callable[[str], Awaitable[T]], + failover_errcodes: Optional[Container[str]] = None, failover_on_unknown_endpoint: bool = False, ) -> T: """Try an operation on a series of servers, until it succeeds @@ -533,6 +535,9 @@ class FederationClient(FederationBase): next server tried. Normally the stacktrace is logged but this is suppressed if the exception is an InvalidResponseError. + failover_errcodes: Error codes (specific to this endpoint) which should + cause a failover when received as part of an HTTP 400 error. + failover_on_unknown_endpoint: if True, we will try other servers if it looks like a server doesn't support the endpoint. This is typically useful if the endpoint in question is new or experimental. @@ -544,6 +549,9 @@ class FederationClient(FederationBase): SynapseError if the chosen remote server returns a 300/400 code, or no servers were reachable. """ + if failover_errcodes is None: + failover_errcodes = () + for destination in destinations: if destination == self.server_name: continue @@ -558,11 +566,17 @@ class FederationClient(FederationBase): synapse_error = e.to_synapse_error() failover = False - # Failover on an internal server error, or if the destination - # doesn't implemented the endpoint for some reason. + # Failover should occur: + # + # * On internal server errors. + # * If the destination responds that it cannot complete the request. + # * If the destination doesn't implemented the endpoint for some reason. if 500 <= e.code < 600: failover = True + elif e.code == 400 and synapse_error.errcode in failover_errcodes: + failover = True + elif failover_on_unknown_endpoint and self._is_unknown_endpoint( e, synapse_error ): @@ -678,8 +692,20 @@ class FederationClient(FederationBase): return destination, ev, room_version + # MSC3083 defines additional error codes for room joins. Unfortunately + # we do not yet know the room version, assume these will only be returned + # by valid room versions. + failover_errcodes = ( + (Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN) + if membership == Membership.JOIN + else None + ) + return await self._try_destination_list( - "make_" + membership, destinations, send_request + "make_" + membership, + destinations, + send_request, + failover_errcodes=failover_errcodes, ) async def send_join( @@ -818,7 +844,14 @@ class FederationClient(FederationBase): origin=destination, ) + # MSC3083 defines additional error codes for room joins. + failover_errcodes = None if room_version.msc3083_join_rules: + failover_errcodes = ( + Codes.UNABLE_AUTHORISE_JOIN, + Codes.UNABLE_TO_GRANT_JOIN, + ) + # If the join is being authorised via allow rules, we need to send # the /send_join back to the same server that was originally used # with /make_join. @@ -827,7 +860,9 @@ class FederationClient(FederationBase): get_domain_from_id(pdu.content["join_authorised_via_users_server"]) ] - return await self._try_destination_list("send_join", destinations, send_request) + return await self._try_destination_list( + "send_join", destinations, send_request, failover_errcodes=failover_errcodes + ) async def _do_send_join( self, room_version: RoomVersion, destination: str, pdu: EventBase diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 525f3d39b1..6a05a65305 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -15,8 +15,6 @@ import logging from typing import TYPE_CHECKING, Optional -import synapse.state -import synapse.storage import synapse.types from synapse.api.constants import EventTypes, Membership from synapse.api.ratelimiting import Ratelimiter diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index aba095d2e1..8197b60b76 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2748,9 +2748,11 @@ class FederationHandler(BaseHandler): event.event_id, e.event_id, ) - context = await self.state_handler.compute_event_context(e) + missing_auth_event_context = ( + await self.state_handler.compute_event_context(e) + ) await self._auth_and_persist_event( - origin, e, context, auth_events=auth + origin, e, missing_auth_event_context, auth_events=auth ) if e.event_id in event_auth_events: diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5d49640760..e1c544a3c9 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -21,6 +21,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state +from synapse.handlers.receipts import ReceiptEventSource from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.roommember import RoomsForUser from synapse.streams.config import PaginationConfig @@ -134,6 +135,8 @@ class InitialSyncHandler(BaseHandler): joined_rooms, to_key=int(now_token.receipt_key), ) + if self.hs.config.experimental.msc2285_enabled: + receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id) tags_by_room = await self.store.get_tags_for_user(user_id) @@ -430,7 +433,9 @@ class InitialSyncHandler(BaseHandler): room_id, to_key=now_token.receipt_key ) if not receipts: - receipts = [] + return [] + if self.hs.config.experimental.msc2285_enabled: + receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id) return receipts presence, receipts, (messages, token) = await make_deferred_yieldable( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 283483fc2c..b9085bbccb 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -14,9 +14,10 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple +from synapse.api.constants import ReadReceiptEventFields from synapse.appservice import ApplicationService from synapse.handlers._base import BaseHandler -from synapse.types import JsonDict, ReadReceipt, get_domain_from_id +from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id if TYPE_CHECKING: from synapse.server import HomeServer @@ -137,7 +138,7 @@ class ReceiptsHandler(BaseHandler): return True async def received_client_receipt( - self, room_id: str, receipt_type: str, user_id: str, event_id: str + self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. @@ -147,23 +148,67 @@ class ReceiptsHandler(BaseHandler): receipt_type=receipt_type, user_id=user_id, event_ids=[event_id], - data={"ts": int(self.clock.time_msec())}, + data={"ts": int(self.clock.time_msec()), "hidden": hidden}, ) is_new = await self._handle_new_receipts([receipt]) if not is_new: return - if self.federation_sender: + if self.federation_sender and not ( + self.hs.config.experimental.msc2285_enabled and hidden + ): await self.federation_sender.send_read_receipt(receipt) class ReceiptEventSource: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() + self.config = hs.config + + @staticmethod + def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]: + visible_events = [] + + # filter out hidden receipts the user shouldn't see + for event in events: + content = event.get("content", {}) + new_event = event.copy() + new_event["content"] = {} + + for event_id in content.keys(): + event_content = content.get(event_id, {}) + m_read = event_content.get("m.read", {}) + + # If m_read is missing copy over the original event_content as there is nothing to process here + if not m_read: + new_event["content"][event_id] = event_content.copy() + continue + + new_users = {} + for rr_user_id, user_rr in m_read.items(): + hidden = user_rr.get("hidden", None) + if hidden is not True or rr_user_id == user_id: + new_users[rr_user_id] = user_rr.copy() + # If hidden has a value replace hidden with the correct prefixed key + if hidden is not None: + new_users[rr_user_id].pop("hidden") + new_users[rr_user_id][ + ReadReceiptEventFields.MSC2285_HIDDEN + ] = hidden + + # Set new users unless empty + if len(new_users.keys()) > 0: + new_event["content"][event_id] = {"m.read": new_users} + + # Append new_event to visible_events unless empty + if len(new_event["content"].keys()) > 0: + visible_events.append(new_event) + + return visible_events async def get_new_events( - self, from_key: int, room_ids: List[str], **kwargs + self, from_key: int, room_ids: List[str], user: UserID, **kwargs ) -> Tuple[List[JsonDict], int]: from_key = int(from_key) to_key = self.get_current_key() @@ -175,6 +220,9 @@ class ReceiptEventSource: room_ids, from_key=from_key, to_key=to_key ) + if self.config.experimental.msc2285_enabled: + events = ReceiptEventSource.filter_out_hidden(events, user.to_string()) + return (events, to_key) async def get_new_events_as( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 370561e549..b33fe09f77 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -951,6 +951,7 @@ class RoomCreationHandler(BaseHandler): "kick": 50, "redact": 50, "invite": 50, + "historical": 100, } if config["original_invitees_have_ops"]: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 150a4f291e..f30bfcc93c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1093,6 +1093,10 @@ class SyncHandler: one_time_key_counts: JsonDict = {} unused_fallback_key_types: List[str] = [] if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id ) diff --git a/synapse/http/client.py b/synapse/http/client.py index 2ac76b15c2..c2ea51ee16 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -847,7 +847,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): def read_body_with_max_size( response: IResponse, stream: ByteWriteable, max_size: Optional[int] -) -> defer.Deferred: +) -> "defer.Deferred[int]": """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. @@ -862,7 +862,7 @@ def read_body_with_max_size( Returns: A Deferred which resolves to the length of the read body. """ - d = defer.Deferred() + d: "defer.Deferred[int]" = defer.Deferred() # If the Content-Length header gives a size larger than the maximum allowed # size, do not bother downloading the body. diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index f7193e60bd..19e987f118 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -14,21 +14,32 @@ import base64 import logging import re -from typing import Optional, Tuple -from urllib.request import getproxies_environment, proxy_bypass_environment +from typing import Any, Dict, Optional, Tuple +from urllib.parse import urlparse +from urllib.request import ( # type: ignore[attr-defined] + getproxies_environment, + proxy_bypass_environment, +) import attr from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS +from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint from twisted.python.failure import Failure -from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase +from twisted.web.client import ( + URI, + BrowserLikePolicyForHTTPS, + HTTPConnectionPool, + _AgentBase, +) from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IPolicyForHTTPS +from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint +from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -63,35 +74,38 @@ class ProxyAgent(_AgentBase): reactor might have some blacklisting applied (i.e. for DNS queries), but we need unblocked access to the proxy. - contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the + contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. The default is to use a `BrowserLikePolicyForHTTPS`, so unless you have special requirements you can leave this as-is. - connectTimeout (Optional[float]): The amount of time that this Agent will wait + connectTimeout: The amount of time that this Agent will wait for the peer to accept a connection, in seconds. If 'None', HostnameEndpoint's default (30s) will be used. - This is used for connections to both proxies and destination servers. - bindAddress (bytes): The local address for client sockets to bind to. + bindAddress: The local address for client sockets to bind to. - pool (HTTPConnectionPool|None): connection pool to be used. If None, a + pool: connection pool to be used. If None, a non-persistent pool instance will be created. - use_proxy (bool): Whether proxy settings should be discovered and used + use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. + + Raises: + ValueError if use_proxy is set and the environment variables + contain an invalid proxy specification. """ def __init__( self, - reactor, - proxy_reactor=None, + reactor: IReactorCore, + proxy_reactor: Optional[ISynapseReactor] = None, contextFactory: Optional[IPolicyForHTTPS] = None, - connectTimeout=None, - bindAddress=None, - pool=None, - use_proxy=False, + connectTimeout: Optional[float] = None, + bindAddress: Optional[bytes] = None, + pool: Optional[HTTPConnectionPool] = None, + use_proxy: bool = False, ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -102,7 +116,7 @@ class ProxyAgent(_AgentBase): else: self.proxy_reactor = proxy_reactor - self._endpoint_kwargs = {} + self._endpoint_kwargs: Dict[str, Any] = {} if connectTimeout is not None: self._endpoint_kwargs["timeout"] = connectTimeout if bindAddress is not None: @@ -117,16 +131,12 @@ class ProxyAgent(_AgentBase): https_proxy = proxies["https"].encode() if "https" in proxies else None no_proxy = proxies["no"] if "no" in proxies else None - # Parse credentials from http and https proxy connection string if present - self.http_proxy_creds, http_proxy = parse_username_password(http_proxy) - self.https_proxy_creds, https_proxy = parse_username_password(https_proxy) - - self.http_proxy_endpoint = _http_proxy_endpoint( - http_proxy, self.proxy_reactor, **self._endpoint_kwargs + self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint( + http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs ) - self.https_proxy_endpoint = _http_proxy_endpoint( - https_proxy, self.proxy_reactor, **self._endpoint_kwargs + self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint( + https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs ) self.no_proxy = no_proxy @@ -134,7 +144,13 @@ class ProxyAgent(_AgentBase): self._policy_for_https = contextFactory self._reactor = reactor - def request(self, method, uri, headers=None, bodyProducer=None): + def request( + self, + method: bytes, + uri: bytes, + headers: Optional[Headers] = None, + bodyProducer: Optional[IBodyProducer] = None, + ) -> defer.Deferred: """ Issue a request to the server indicated by the given uri. @@ -146,16 +162,15 @@ class ProxyAgent(_AgentBase): See also: twisted.web.iweb.IAgent.request Args: - method (bytes): The request method to use, such as `GET`, `POST`, etc + method: The request method to use, such as `GET`, `POST`, etc - uri (bytes): The location of the resource to request. + uri: The location of the resource to request. - headers (Headers|None): Extra headers to send with the request + headers: Extra headers to send with the request - bodyProducer (IBodyProducer|None): An object which can generate bytes to - make up the body of this request (for example, the properly encoded - contents of a file for a file upload). Or, None if the request is to - have no body. + bodyProducer: An object which can generate bytes to make up the body of + this request (for example, the properly encoded contents of a file for + a file upload). Or, None if the request is to have no body. Returns: Deferred[IResponse]: completes when the header of the response has @@ -253,70 +268,89 @@ class ProxyAgent(_AgentBase): ) -def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs): +def _http_proxy_endpoint( + proxy: Optional[bytes], + reactor: IReactorCore, + tls_options_factory: IPolicyForHTTPS, + **kwargs, +) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy Args: - proxy: the proxy setting in the form: [<username>:<password>@]<host>[:<port>] - Note that compared to other apps, this function currently lacks support - for specifying a protocol schema (i.e. protocol://...). + proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>] + This currently supports http:// and https:// proxies. + A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy + tls_options_factory: the TLS options to use when connecting through a https proxy + kwargs: other args to be passed to HostnameEndpoint Returns: - interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy, - or None + a tuple of + endpoint to use to connect to the proxy, or None + ProxyCredentials or if no credentials were found, or None + + Raise: + ValueError if proxy has no hostname or unsupported scheme. """ if proxy is None: - return None + return None, None - # Parse the connection string - host, port = parse_host_port(proxy, default_port=1080) - return HostnameEndpoint(reactor, host, port, **kwargs) + # Note: urlsplit/urlparse cannot be used here as that does not work (for Python + # 3.9+) on scheme-less proxies, e.g. host:port. + scheme, host, port, credentials = parse_proxy(proxy) + proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs) -def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]: - """ - Parses the username and password from a proxy declaration e.g - username:password@hostname:port. + if scheme == b"https": + tls_options = tls_options_factory.creatorForNetloc(host, port) + proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) - Args: - proxy: The proxy connection string. + return proxy_endpoint, credentials - Returns - An instance of ProxyCredentials and the proxy connection string with any credentials - stripped, i.e u:p@host:port -> host:port. If no credentials were found, the - ProxyCredentials instance is replaced with None. - """ - if proxy and b"@" in proxy: - # We use rsplit here as the password could contain an @ character - credentials, proxy_without_credentials = proxy.rsplit(b"@", 1) - return ProxyCredentials(credentials), proxy_without_credentials - return None, proxy +def parse_proxy( + proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080 +) -> Tuple[bytes, bytes, int, Optional[ProxyCredentials]]: + """ + Parse a proxy connection string. + Given a HTTP proxy URL, breaks it down into components and checks that it + has a hostname (otherwise it is not useful to us when trying to find a + proxy) and asserts that the URL has a scheme we support. -def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]: - """ - Parse the hostname and port from a proxy connection byte string. Args: - hostport: The proxy connection string. Must be in the form 'host[:port]'. - default_port: The default port to return if one is not found in `hostport`. + proxy: The proxy connection string. Must be in the form '[scheme://][<username>:<password>@]host[:port]'. + default_scheme: The default scheme to return if one is not found in `proxy`. Defaults to http + default_port: The default port to return if one is not found in `proxy`. Defaults to 1080 Returns: - A tuple containing the hostname and port. Uses `default_port` if one was not found. + A tuple containing the scheme, hostname, port and ProxyCredentials. + If no credentials were found, the ProxyCredentials instance is replaced with None. + + Raise: + ValueError if proxy has no hostname or unsupported scheme. """ - if b":" in hostport: - host, port = hostport.rsplit(b":", 1) - try: - port = int(port) - return host, port - except ValueError: - # the thing after the : wasn't a valid port; presumably this is an - # IPv6 address. - pass + # First check if we have a scheme present + # Note: urlsplit/urlparse cannot be used (for Python # 3.9+) on scheme-less proxies, e.g. host:port. + if b"://" not in proxy: + proxy = b"".join([default_scheme, b"://", proxy]) + + url = urlparse(proxy) + + if not url.hostname: + raise ValueError("Proxy URL did not contain a hostname! Please specify one.") + + if url.scheme not in (b"http", b"https"): + raise ValueError( + f"Unknown proxy scheme {url.scheme!s}; only 'http' and 'https' is supported." + ) + + credentials = None + if url.username and url.password: + credentials = ProxyCredentials(b"".join([url.username, b":", url.password])) - return hostport, default_port + return url.scheme, url.hostname, url.port or default_port, credentials diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py new file mode 100644 index 0000000000..a6c212f300 --- /dev/null +++ b/synapse/logging/handlers.py @@ -0,0 +1,88 @@ +import logging +import time +from logging import Handler, LogRecord +from logging.handlers import MemoryHandler +from threading import Thread +from typing import Optional + +from twisted.internet.interfaces import IReactorCore + + +class PeriodicallyFlushingMemoryHandler(MemoryHandler): + """ + This is a subclass of MemoryHandler that additionally spawns a background + thread to periodically flush the buffer. + + This prevents messages from being buffered for too long. + + Additionally, all messages will be immediately flushed if the reactor has + not yet been started. + """ + + def __init__( + self, + capacity: int, + flushLevel: int = logging.ERROR, + target: Optional[Handler] = None, + flushOnClose: bool = True, + period: float = 5.0, + reactor: Optional[IReactorCore] = None, + ) -> None: + """ + period: the period between automatic flushes + + reactor: if specified, a custom reactor to use. If not specifies, + defaults to the globally-installed reactor. + Log entries will be flushed immediately until this reactor has + started. + """ + super().__init__(capacity, flushLevel, target, flushOnClose) + + self._flush_period: float = period + self._active: bool = True + self._reactor_started = False + + self._flushing_thread: Thread = Thread( + name="PeriodicallyFlushingMemoryHandler flushing thread", + target=self._flush_periodically, + ) + self._flushing_thread.start() + + def on_reactor_running(): + self._reactor_started = True + + reactor_to_use: IReactorCore + if reactor is None: + from twisted.internet import reactor as global_reactor + + reactor_to_use = global_reactor # type: ignore[assignment] + else: + reactor_to_use = reactor + + # call our hook when the reactor start up + reactor_to_use.callWhenRunning(on_reactor_running) + + def shouldFlush(self, record: LogRecord) -> bool: + """ + Before reactor start-up, log everything immediately. + Otherwise, fall back to original behaviour of waiting for the buffer to fill. + """ + + if self._reactor_started: + return super().shouldFlush(record) + else: + return True + + def _flush_periodically(self): + """ + Whilst this handler is active, flush the handler periodically. + """ + + while self._active: + # flush is thread-safe; it acquires and releases the lock internally + self.flush() + time.sleep(self._flush_period) + + def close(self) -> None: + self._active = False + super().close() diff --git a/synapse/notifier.py b/synapse/notifier.py index c5fbebc17d..bbe337949a 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -111,8 +111,9 @@ class _NotifierUserStream: self.last_notified_token = current_token self.last_notified_ms = time_now_ms - with PreserveLoggingContext(): - self.notify_deferred = ObservableDeferred(defer.Deferred()) + self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred( + defer.Deferred() + ) def notify( self, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 85621f33ef..a1436f3930 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 9d4859798b..3fd2811713 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -285,7 +285,7 @@ class ReplicationDataHandler: # Create a new deferred that times out after N seconds, as we don't want # to wedge here forever. - deferred = Deferred() + deferred: "Deferred[None]" = Deferred() deferred = timeout_deferred( deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor ) @@ -393,6 +393,11 @@ class FederationSenderHandler: # we only want to send on receipts for our own users if not self._is_mine_id(receipt.user_id): continue + if ( + receipt.data.get("hidden", False) + and self._hs.config.experimental.msc2285_enabled + ): + continue receipt_info = ReadReceipt( receipt.room_id, receipt.receipt_type, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 25ba52c624..502a917588 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -504,7 +504,6 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet): events_to_create = body["events"] - prev_event_ids = prev_events_from_query inherited_depth = await self._inherit_depth_from_prev_ids( prev_events_from_query ) @@ -516,6 +515,10 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet): chunk_id_to_connect_to = chunk_id_from_query base_insertion_event = None if chunk_id_from_query: + # All but the first base insertion event should point at a fake + # event, which causes the HS to ask for the state at the start of + # the chunk later. + prev_event_ids = [fake_prev_event_id] # TODO: Verify the chunk_id_from_query corresponds to an insertion event pass # Otherwise, create an insertion event to act as a starting point. @@ -526,6 +529,8 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet): # an insertion event), in which case we just create a new insertion event # that can then get pointed to by a "marker" event later. else: + prev_event_ids = prev_events_from_query + base_insertion_event_dict = self._create_insertion_event_dict( sender=requester.user.to_string(), room_id=room_id, diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 5988fa47e5..027f8b81fa 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -14,6 +14,8 @@ import logging +from synapse.api.constants import ReadReceiptEventFields +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -37,14 +39,24 @@ class ReadMarkerRestServlet(RestServlet): await self.presence_handler.bump_presence_active_time(requester.user) body = parse_json_object_from_request(request) - read_event_id = body.get("m.read", None) + hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) + + if not isinstance(hidden, bool): + raise SynapseError( + 400, + "Param %s must be a boolean, if given" + % ReadReceiptEventFields.MSC2285_HIDDEN, + Codes.BAD_JSON, + ) + if read_event_id: await self.receipts_handler.received_client_receipt( room_id, "m.read", user_id=requester.user.to_string(), event_id=read_event_id, + hidden=hidden, ) read_marker_event_id = body.get("m.fully_read", None) diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 8cf4aebdbe..4b98979b47 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -14,8 +14,9 @@ import logging -from synapse.api.errors import SynapseError -from synapse.http.servlet import RestServlet +from synapse.api.constants import ReadReceiptEventFields +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -42,10 +43,25 @@ class ReceiptRestServlet(RestServlet): if receipt_type != "m.read": raise SynapseError(400, "Receipt type must be 'm.read'") + body = parse_json_object_from_request(request) + hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) + + if not isinstance(hidden, bool): + raise SynapseError( + 400, + "Param %s must be a boolean, if given" + % ReadReceiptEventFields.MSC2285_HIDDEN, + Codes.BAD_JSON, + ) + await self.presence_handler.bump_presence_active_time(requester.user) await self.receipts_handler.received_client_receipt( - room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id + room_id, + receipt_type, + user_id=requester.user.to_string(), + event_id=event_id, + hidden=hidden, ) return 200, {} diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 4582c274c7..fa2e4e9cba 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -82,6 +82,8 @@ class VersionsRestServlet(RestServlet): "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private, # Supports the busy presence state described in MSC3026. "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, + # Supports receiving hidden read receipts as per MSC2285 + "org.matrix.msc2285": self.config.experimental.msc2285_enabled, }, }, ) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 172212ee3a..0f051d4041 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -58,9 +58,11 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -_charset_match = re.compile(br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9-]+)"?', flags=re.I) +_charset_match = re.compile( + br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9_-]+)"?', flags=re.I +) _xml_encoding_match = re.compile( - br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9-]+)"', flags=re.I + br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9_-]+)"', flags=re.I ) _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 78ae68ec68..1edc96042b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -21,6 +21,7 @@ from canonicaljson import encode_canonical_json from twisted.enterprise.adbapi import Connection +from synapse.api.constants import DeviceKeyAlgorithms from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool, make_in_list_sql_clause @@ -381,9 +382,15 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore): " GROUP BY algorithm" ) txn.execute(sql, (user_id, device_id)) - result = {} + + # Initially set the key count to 0. This ensures that the client will always + # receive *some count*, even if it's 0. + result = {DeviceKeyAlgorithms.SIGNED_CURVE25519: 0} + + # Override entries with the count of any keys we pulled from the database for algorithm, key_count in txn: result[algorithm] = key_count + return result return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index d39368c20e..547e43ab98 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -936,15 +936,46 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas # We want to make sure that we do a breadth-first, "depth" ordered # search. - query = ( - "SELECT depth, prev_event_id FROM event_edges" - " INNER JOIN events" - " ON prev_event_id = events.event_id" - " WHERE event_edges.event_id = ?" - " AND event_edges.is_state = ?" - " LIMIT ?" - ) + # Look for the prev_event_id connected to the given event_id + query = """ + SELECT depth, prev_event_id FROM event_edges + /* Get the depth of the prev_event_id from the events table */ + INNER JOIN events + ON prev_event_id = events.event_id + /* Find an event which matches the given event_id */ + WHERE event_edges.event_id = ? + AND event_edges.is_state = ? + LIMIT ? + """ + + # Look for the "insertion" events connected to the given event_id + connected_insertion_event_query = """ + SELECT e.depth, i.event_id FROM insertion_event_edges AS i + /* Get the depth of the insertion event from the events table */ + INNER JOIN events AS e USING (event_id) + /* Find an insertion event which points via prev_events to the given event_id */ + WHERE i.insertion_prev_event_id = ? + LIMIT ? + """ + + # Find any chunk connections of a given insertion event + chunk_connection_query = """ + SELECT e.depth, c.event_id FROM insertion_events AS i + /* Find the chunk that connects to the given insertion event */ + INNER JOIN chunk_events AS c + ON i.next_chunk_id = c.chunk_id + /* Get the depth of the chunk start event from the events table */ + INNER JOIN events AS e USING (event_id) + /* Find an insertion event which matches the given event_id */ + WHERE i.event_id = ? + LIMIT ? + """ + # In a PriorityQueue, the lowest valued entries are retrieved first. + # We're using depth as the priority in the queue. + # Depth is lowest at the oldest-in-time message and highest and + # newest-in-time message. We add events to the queue with a negative depth so that + # we process the newest-in-time messages first going backwards in time. queue = PriorityQueue() for event_id in event_list: @@ -970,9 +1001,48 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas event_results.add(event_id) + # Try and find any potential historical chunks of message history. + # + # First we look for an insertion event connected to the current + # event (by prev_event). If we find any, we need to go and try to + # find any chunk events connected to the insertion event (by + # chunk_id). If we find any, we'll add them to the queue and + # navigate up the DAG like normal in the next iteration of the loop. + txn.execute( + connected_insertion_event_query, (event_id, limit - len(event_results)) + ) + connected_insertion_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: connected_insertion_event_query %s", + connected_insertion_event_id_results, + ) + for row in connected_insertion_event_id_results: + connected_insertion_event_depth = row[0] + connected_insertion_event = row[1] + queue.put((-connected_insertion_event_depth, connected_insertion_event)) + + # Find any chunk connections for the given insertion event + txn.execute( + chunk_connection_query, + (connected_insertion_event, limit - len(event_results)), + ) + chunk_start_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: chunk_start_event_id_results %s", + chunk_start_event_id_results, + ) + for row in chunk_start_event_id_results: + if row[1] not in event_results: + queue.put((-row[0], row[1])) + + # Navigate up the DAG by prev_event txn.execute(query, (event_id, False, limit - len(event_results))) + prev_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: prev_event_ids %s", prev_event_id_results + ) - for row in txn: + for row in prev_event_id_results: if row[1] not in event_results: queue.put((-row[0], row[1])) @@ -1227,12 +1297,15 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas (count,) = txn.fetchone() txn.execute( - "SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging" + "SELECT min(received_ts) FROM federation_inbound_events_staging" ) (received_ts,) = txn.fetchone() - age = self._clock.time_msec() - received_ts + # If there is nothing in the staging area default it to 0. + age = 0 + if received_ts is not None: + age = self._clock.time_msec() - received_ts return count, age diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a396a201d4..86baf397fb 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1502,6 +1502,9 @@ class PersistEventsStore: self._handle_event_relations(txn, event) + self._handle_insertion_event(txn, event) + self._handle_chunk_event(txn, event) + # Store the labels for this event. labels = event.content.get(EventContentFields.LABELS) if labels: @@ -1754,6 +1757,94 @@ class PersistEventsStore: if rel_type == RelationTypes.REPLACE: txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) + def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase): + """Handles keeping track of insertion events and edges/connections. + Part of MSC2716. + + Args: + txn: The database transaction object + event: The event to process + """ + + if event.type != EventTypes.MSC2716_INSERTION: + # Not a insertion event + return + + # Skip processing a insertion event if the room version doesn't + # support it. + room_version = self.store.get_room_version_txn(txn, event.room_id) + if not room_version.msc2716_historical: + return + + next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID) + if next_chunk_id is None: + # Invalid insertion event without next chunk ID + return + + logger.debug( + "_handle_insertion_event (next_chunk_id=%s) %s", next_chunk_id, event + ) + + # Keep track of the insertion event and the chunk ID + self.db_pool.simple_insert_txn( + txn, + table="insertion_events", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "next_chunk_id": next_chunk_id, + }, + ) + + # Insert an edge for every prev_event connection + for prev_event_id in event.prev_events: + self.db_pool.simple_insert_txn( + txn, + table="insertion_event_edges", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "insertion_prev_event_id": prev_event_id, + }, + ) + + def _handle_chunk_event(self, txn: LoggingTransaction, event: EventBase): + """Handles inserting the chunk edges/connections between the chunk event + and an insertion event. Part of MSC2716. + + Args: + txn: The database transaction object + event: The event to process + """ + + if event.type != EventTypes.MSC2716_CHUNK: + # Not a chunk event + return + + # Skip processing a chunk event if the room version doesn't + # support it. + room_version = self.store.get_room_version_txn(txn, event.room_id) + if not room_version.msc2716_historical: + return + + chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID) + if chunk_id is None: + # Invalid chunk event without a chunk ID + return + + logger.debug("_handle_chunk_event chunk_id=%s %s", chunk_id, event) + + # Keep track of the insertion event and the chunk ID + self.db_pool.simple_insert_txn( + txn, + table="chunk_events", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "chunk_id": chunk_id, + }, + ) + def _handle_redaction(self, txn, redacted_event_id): """Handles receiving a redaction and checking whether we need to remove any redacted relations from the database. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 1757064a68..8e22da99ae 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -22,7 +22,7 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool +from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.state import StateFilter @@ -58,15 +58,32 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_room_version(self, room_id: str) -> RoomVersion: """Get the room_version of a given room - Raises: NotFoundError: if the room is unknown + UnsupportedRoomVersionError: if the room uses an unknown room version. + Typically this happens if support for the room's version has been + removed from Synapse. + """ + return await self.db_pool.runInteraction( + "get_room_version_txn", + self.get_room_version_txn, + room_id, + ) + def get_room_version_txn( + self, txn: LoggingTransaction, room_id: str + ) -> RoomVersion: + """Get the room_version of a given room + Args: + txn: Transaction object + room_id: The room_id of the room you are trying to get the version for + Raises: + NotFoundError: if the room is unknown UnsupportedRoomVersionError: if the room uses an unknown room version. Typically this happens if support for the room's version has been removed from Synapse. """ - room_version_id = await self.get_room_version_id(room_id) + room_version_id = self.get_room_version_id_txn(txn, room_id) v = KNOWN_ROOM_VERSIONS.get(room_version_id) if not v: @@ -80,7 +97,20 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @cached(max_entries=10000) async def get_room_version_id(self, room_id: str) -> str: """Get the room_version of a given room + Raises: + NotFoundError: if the room is unknown + """ + return await self.db_pool.runInteraction( + "get_room_version_id_txn", + self.get_room_version_id_txn, + room_id, + ) + def get_room_version_id_txn(self, txn: LoggingTransaction, room_id: str) -> str: + """Get the room_version of a given room + Args: + txn: Transaction object + room_id: The room_id of the room you are trying to get the version for Raises: NotFoundError: if the room is unknown """ @@ -88,24 +118,22 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # First we try looking up room version from the database, but for old # rooms we might not have added the room version to it yet so we fall # back to previous behaviour and look in current state events. - + # # We really should have an entry in the rooms table for every room we # care about, but let's be a bit paranoid (at least while the background # update is happening) to avoid breaking existing rooms. - version = await self.db_pool.simple_select_one_onecol( + room_version = self.db_pool.simple_select_one_onecol_txn( + txn, table="rooms", keyvalues={"room_id": room_id}, retcol="room_version", - desc="get_room_version", allow_none=True, ) - if version is not None: - return version + if room_version is None: + raise NotFoundError("Could not room_version for %s" % (room_id,)) - # Retrieve the room's create event - create_event = await self.get_create_event_for_room(room_id) - return create_event.content.get("room_version", "1") + return room_version async def get_room_predecessor(self, room_id: str) -> Optional[dict]: """Get the predecessor of an upgraded room if it exists. diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index a39877f0d5..0e8270746d 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -170,7 +170,9 @@ class _EventPeristenceQueue(Generic[_PersistResult]): end_item = queue[-1] else: # need to make a new queue item - deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True) + deferred: ObservableDeferred[_PersistResult] = ObservableDeferred( + defer.Deferred(), consumeErrors=True + ) end_item = _EventPersistQueueItem( events_and_contexts=[], diff --git a/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql b/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql new file mode 100644 index 0000000000..7d7bafc631 --- /dev/null +++ b/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql @@ -0,0 +1,49 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a table that keeps track of "insertion" events and +-- their next_chunk_id's so we can navigate to the next chunk of history. +CREATE TABLE IF NOT EXISTS insertion_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + next_chunk_id TEXT NOT NULL +); +CREATE UNIQUE INDEX IF NOT EXISTS insertion_events_event_id ON insertion_events(event_id); +CREATE INDEX IF NOT EXISTS insertion_events_next_chunk_id ON insertion_events(next_chunk_id); + +-- Add a table that keeps track of all of the events we are inserting between. +-- We use this when navigating the DAG and when we hit an event which matches +-- `insertion_prev_event_id`, it should backfill from the "insertion" event and +-- navigate the historical messages from there. +CREATE TABLE IF NOT EXISTS insertion_event_edges( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + insertion_prev_event_id TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS insertion_event_edges_event_id ON insertion_event_edges(event_id); +CREATE INDEX IF NOT EXISTS insertion_event_edges_insertion_room_id ON insertion_event_edges(room_id); +CREATE INDEX IF NOT EXISTS insertion_event_edges_insertion_prev_event_id ON insertion_event_edges(insertion_prev_event_id); + +-- Add a table that keeps track of how each chunk is labeled. The chunks are +-- connected together based on an insertion events `next_chunk_id`. +CREATE TABLE IF NOT EXISTS chunk_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + chunk_id TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS chunk_events_event_id ON chunk_events(event_id); +CREATE INDEX IF NOT EXISTS chunk_events_chunk_id ON chunk_events(chunk_id); diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 014db1355b..a3b65aee27 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -23,6 +23,7 @@ from typing import ( Awaitable, Callable, Dict, + Generic, Hashable, Iterable, List, @@ -39,6 +40,7 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.internet.interfaces import IReactorTime from twisted.python import failure +from twisted.python.failure import Failure from synapse.logging.context import ( PreserveLoggingContext, @@ -49,8 +51,10 @@ from synapse.util import Clock, unwrapFirstError logger = logging.getLogger(__name__) +_T = TypeVar("_T") -class ObservableDeferred: + +class ObservableDeferred(Generic[_T]): """Wraps a deferred object so that we can add observer deferreds. These observer deferreds do not affect the callback chain of the original deferred. @@ -68,7 +72,7 @@ class ObservableDeferred: __slots__ = ["_deferred", "_observers", "_result"] - def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): + def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) @@ -113,7 +117,7 @@ class ObservableDeferred: deferred.addCallbacks(callback, errback) - def observe(self) -> defer.Deferred: + def observe(self) -> "defer.Deferred[_T]": """Observe the underlying deferred. This returns a brand new deferred that is resolved when the underlying @@ -121,7 +125,7 @@ class ObservableDeferred: effect the underlying deferred. """ if not self._result: - d = defer.Deferred() + d: "defer.Deferred[_T]" = defer.Deferred() def remove(r): self._observers.discard(d) @@ -135,7 +139,7 @@ class ObservableDeferred: success, res = self._result return defer.succeed(res) if success else defer.fail(res) - def observers(self) -> List[defer.Deferred]: + def observers(self) -> "List[defer.Deferred[_T]]": return self._observers def has_called(self) -> bool: @@ -144,7 +148,7 @@ class ObservableDeferred: def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True - def get_result(self) -> Any: + def get_result(self) -> Union[_T, Failure]: return self._result[1] def __getattr__(self, name: str) -> Any: @@ -415,7 +419,7 @@ class ReadWriteLock: self.key_to_current_writer: Dict[str, defer.Deferred] = {} async def read(self, key: str) -> ContextManager: - new_defer = defer.Deferred() + new_defer: "defer.Deferred[None]" = defer.Deferred() curr_readers = self.key_to_current_readers.setdefault(key, set()) curr_writer = self.key_to_current_writer.get(key, None) @@ -438,7 +442,7 @@ class ReadWriteLock: return _ctx_manager() async def write(self, key: str) -> ContextManager: - new_defer = defer.Deferred() + new_defer: "defer.Deferred[None]" = defer.Deferred() curr_readers = self.key_to_current_readers.get(key, set()) curr_writer = self.key_to_current_writer.get(key, None) @@ -471,10 +475,8 @@ R = TypeVar("R") def timeout_deferred( - deferred: defer.Deferred, - timeout: float, - reactor: IReactorTime, -) -> defer.Deferred: + deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime +) -> "defer.Deferred[_T]": """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new deferred that wraps and times out the given deferred, correctly handling @@ -497,7 +499,7 @@ def timeout_deferred( Returns: A new Deferred, which will errback with defer.TimeoutError on timeout. """ - new_d = defer.Deferred() + new_d: "defer.Deferred[_T]" = defer.Deferred() timed_out = [False] diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index 891bee0b33..e58dd91eda 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum from typing import Awaitable, Callable, Generic, Optional, TypeVar, Union from twisted.internet.defer import Deferred @@ -22,6 +22,10 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background TV = TypeVar("TV") +class _Sentinel(enum.Enum): + sentinel = object() + + class CachedCall(Generic[TV]): """A wrapper for asynchronous calls whose results should be shared @@ -65,7 +69,7 @@ class CachedCall(Generic[TV]): """ self._callable: Optional[Callable[[], Awaitable[TV]]] = f self._deferred: Optional[Deferred] = None - self._result: Union[None, Failure, TV] = None + self._result: Union[_Sentinel, TV, Failure] = _Sentinel.sentinel async def get(self) -> TV: """Kick off the call if necessary, and return the result""" @@ -78,8 +82,9 @@ class CachedCall(Generic[TV]): self._callable = None # once the deferred completes, store the result. We cannot simply leave the - # result in the deferred, since if it's a Failure, GCing the deferred - # would then log a critical error about unhandled Failures. + # result in the deferred, since `awaiting` a deferred destroys its result. + # (Also, if it's a Failure, GCing the deferred would log a critical error + # about unhandled Failures) def got_result(r): self._result = r @@ -92,13 +97,15 @@ class CachedCall(Generic[TV]): # and any eventual exception may not be reported. # we can now await the deferred, and once it completes, return the result. - await make_deferred_yieldable(self._deferred) + if isinstance(self._result, _Sentinel): + await make_deferred_yieldable(self._deferred) + assert not isinstance(self._result, _Sentinel) + + if isinstance(self._result, Failure): + self._result.raiseException() + raise AssertionError("unexpected return from Failure.raiseException") - # I *think* this is the easiest way to correctly raise a Failure without having - # to gut-wrench into the implementation of Deferred. - d = Deferred() - d.callback(self._result) - return await d + return self._result class RetryOnExceptionCachedCall(Generic[TV]): diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 8c6fafc677..b6456392cd 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -16,7 +16,16 @@ import enum import threading -from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union +from typing import ( + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + TypeVar, + Union, + cast, +) from prometheus_client import Gauge @@ -166,7 +175,7 @@ class DeferredCache(Generic[KT, VT]): def set( self, key: KT, - value: defer.Deferred, + value: "defer.Deferred[VT]", callback: Optional[Callable[[], None]] = None, ) -> defer.Deferred: """Adds a new entry to the cache (or updates an existing one). @@ -214,7 +223,7 @@ class DeferredCache(Generic[KT, VT]): if value.called: result = value.result if not isinstance(result, failure.Failure): - self.cache.set(key, result, callbacks) + self.cache.set(key, cast(VT, result), callbacks) return value # otherwise, we'll add an entry to the _pending_deferred_cache for now, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1e8e6b1d01..1ca31e41ac 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -413,7 +413,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): # relevant result for that key. deferreds_map = {} for arg in missing: - deferred = defer.Deferred() + deferred: "defer.Deferred[Any]" = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) cache.set(key, deferred, callback=invalidate_callback) diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index dfa30a6229..cb08af7385 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 9274ce4c39..e2a5fc018c 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -301,6 +301,49 @@ class PruneEventTestCase(unittest.TestCase): room_version=RoomVersions.MSC2176, ) + def test_join_rules(self): + """Join rules events have changed behavior starting with MSC3083.""" + self.run_test( + { + "type": "m.room.join_rules", + "event_id": "$test:domain", + "content": { + "join_rule": "invite", + "allow": [], + "other_key": "stripped", + }, + }, + { + "type": "m.room.join_rules", + "event_id": "$test:domain", + "content": {"join_rule": "invite"}, + "signatures": {}, + "unsigned": {}, + }, + ) + + # After MSC3083, alias events have no special behavior. + self.run_test( + { + "type": "m.room.join_rules", + "content": { + "join_rule": "invite", + "allow": [], + "other_key": "stripped", + }, + }, + { + "type": "m.room.join_rules", + "content": { + "join_rule": "invite", + "allow": [], + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3083, + ) + class SerializeEventTestCase(unittest.TestCase): def serialize(self, ev, fields): diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index e0a24824cc..39e7b1ab25 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -47,12 +47,16 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): "alg2:k3": {"key": "key3"}, } + # Note that "signed_curve25519" is always returned in key count responses. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. res = self.get_success( self.handler.upload_keys_for_user( local_user, device_id, {"one_time_keys": keys} ) ) - self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}}) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} + ) # we should be able to change the signature without a problem keys["alg2:k2"]["signatures"]["k1"] = "sig2" @@ -61,7 +65,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): local_user, device_id, {"one_time_keys": keys} ) ) - self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}}) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} + ) def test_change_one_time_keys(self): """attempts to change one-time-keys should be rejected""" @@ -79,7 +85,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): local_user, device_id, {"one_time_keys": keys} ) ) - self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1, "alg2": 2}}) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} + ) # Error when changing string key self.get_failure( @@ -89,7 +97,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): SynapseError, ) - # Error when replacing dict key with strin + # Error when replacing dict key with string self.get_failure( self.handler.upload_keys_for_user( local_user, device_id, {"one_time_keys": {"alg2:k3": "key2"}} @@ -131,7 +139,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): local_user, device_id, {"one_time_keys": keys} ) ) - self.assertDictEqual(res, {"one_time_key_counts": {"alg1": 1}}) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 1, "signed_curve25519": 0}} + ) res2 = self.get_success( self.handler.claim_one_time_keys( diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index ba8cf44f46..4140fcefc2 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List from unittest import TestCase from synapse.api.constants import EventTypes @@ -22,6 +23,7 @@ from synapse.federation.federation_base import event_from_pdu_json from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.util.stringutils import random_string from tests import unittest @@ -39,6 +41,8 @@ class FederationTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastore() + self.state_store = hs.get_storage().state + self._event_auth_handler = hs.get_event_auth_handler() return hs def test_exchange_revoked_invite(self): @@ -190,6 +194,133 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(sg, sg2) + def test_backfill_floating_outlier_membership_auth(self): + """ + As the local homeserver, check that we can properly process a federated + event from the OTHER_SERVER with auth_events that include a floating + membership event from the OTHER_SERVER. + + Regression test, see #10439. + """ + OTHER_SERVER = "otherserver" + OTHER_USER = "@otheruser:" + OTHER_SERVER + + # create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as( + room_creator=user_id, + is_public=True, + tok=tok, + extra_content={ + "preset": "public_chat", + }, + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) + ( + most_recent_prev_event_id, + most_recent_prev_event_depth, + ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) + # mapping from (type, state_key) -> state_event_id + prev_state_map = self.get_success( + self.state_store.get_state_ids_for_event(most_recent_prev_event_id) + ) + # List of state event ID's + prev_state_ids = list(prev_state_map.values()) + auth_event_ids = prev_state_ids + auth_events = list( + self.get_success(self.store.get_events(auth_event_ids)).values() + ) + + # build a floating outlier member state event + fake_prev_event_id = "$" + random_string(43) + member_event_dict = { + "type": EventTypes.Member, + "content": { + "membership": "join", + }, + "state_key": OTHER_USER, + "room_id": room_id, + "sender": OTHER_USER, + "depth": most_recent_prev_event_depth, + "prev_events": [fake_prev_event_id], + "origin_server_ts": self.clock.time_msec(), + "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, + } + builder = self.hs.get_event_builder_factory().for_room_version( + room_version, member_event_dict + ) + member_event = self.get_success( + builder.build( + prev_event_ids=member_event_dict["prev_events"], + auth_event_ids=self._event_auth_handler.compute_auth_events( + builder, + prev_state_map, + for_verification=False, + ), + depth=member_event_dict["depth"], + ) + ) + # Override the signature added from "test" homeserver that we created the event with + member_event.signatures = member_event_dict["signatures"] + + # Add the new member_event to the StateMap + prev_state_map[ + (member_event.type, member_event.state_key) + ] = member_event.event_id + auth_events.append(member_event) + + # build and send an event authed based on the member event + message_event_dict = { + "type": EventTypes.Message, + "content": {}, + "room_id": room_id, + "sender": OTHER_USER, + "depth": most_recent_prev_event_depth, + "prev_events": prev_event_ids.copy(), + "origin_server_ts": self.clock.time_msec(), + "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, + } + builder = self.hs.get_event_builder_factory().for_room_version( + room_version, message_event_dict + ) + message_event = self.get_success( + builder.build( + prev_event_ids=message_event_dict["prev_events"], + auth_event_ids=self._event_auth_handler.compute_auth_events( + builder, + prev_state_map, + for_verification=False, + ), + depth=message_event_dict["depth"], + ) + ) + # Override the signature added from "test" homeserver that we created the event with + message_event.signatures = message_event_dict["signatures"] + + # Stub the /event_auth response from the OTHER_SERVER + async def get_event_auth( + destination: str, room_id: str, event_id: str + ) -> List[EventBase]: + return auth_events + + self.handler.federation_client.get_event_auth = get_event_auth + + with LoggingContext("receive_pdu"): + # Fake the OTHER_SERVER federating the message event over to our local homeserver + d = run_in_background( + self.handler.on_receive_pdu, OTHER_SERVER, message_event + ) + self.get_success(d) + + # Now try and get the events on our local homeserver + stored_event = self.get_success( + self.store.get_event(message_event.event_id, allow_none=True) + ) + self.assertTrue(stored_event is not None) + @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py new file mode 100644 index 0000000000..93a9a084b2 --- /dev/null +++ b/tests/handlers/test_receipts.py @@ -0,0 +1,294 @@ +# Copyright 2021 Å imon Brandner <simon.bra.ag@gmail.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List + +from synapse.api.constants import ReadReceiptEventFields +from synapse.types import JsonDict + +from tests import unittest + + +class ReceiptsTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.event_source = hs.get_event_sources().sources["receipt"] + + # In the first param of _test_filters_hidden we use "hidden" instead of + # ReadReceiptEventFields.MSC2285_HIDDEN. We do this because we're mocking + # the data from the database which doesn't use the prefix + + def test_filters_out_hidden_receipt(self): + self._test_filters_hidden( + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [], + ) + + def test_does_not_filter_out_our_hidden_receipt(self): + self._test_filters_hidden( + [ + { + "content": { + "$1435641916hfgh4394fHBLK:matrix.org": { + "m.read": { + "@me:server.org": { + "ts": 1436451550453, + "hidden": True, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1435641916hfgh4394fHBLK:matrix.org": { + "m.read": { + "@me:server.org": { + "ts": 1436451550453, + ReadReceiptEventFields.MSC2285_HIDDEN: True, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_hidden_receipt_and_ignores_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + "@user:jki.re": { + "ts": 1436451550453, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419edgd14394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + } + }, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_handles_missing_content_of_m_read(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_handles_empty_event(self): + self._test_filters_hidden( + [ + { + "content": { + "$143564gdfg6114394fHBLK:matrix.org": {}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$143564gdfg6114394fHBLK:matrix.org": {}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_receipt_event_with_only_hidden_receipt_and_ignores_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419edgd14394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + }, + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + }, + ], + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def _test_filters_hidden( + self, events: List[JsonDict], expected_output: List[JsonDict] + ): + """Tests that the _filter_out_hidden returns the expected output""" + filtered_events = self.event_source.filter_out_hidden(events, "@me:server.org") + self.assertEquals(filtered_events, expected_output) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 437113929a..e5865c161d 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -14,19 +14,22 @@ import base64 import logging import os -from typing import Optional +from typing import Iterable, Optional from unittest.mock import patch import treq from netaddr import IPSet +from parameterized import parameterized from twisted.internet import interfaces # noqa: F401 +from twisted.internet.endpoints import HostnameEndpoint, _WrapperEndpoint +from twisted.internet.interfaces import IProtocol, IProtocolFactory from twisted.internet.protocol import Factory -from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from synapse.http.client import BlacklistingReactorWrapper -from synapse.http.proxyagent import ProxyAgent +from synapse.http.proxyagent import ProxyAgent, ProxyCredentials, parse_proxy from tests.http import TestServerTLSConnectionFactory, get_test_https_policy from tests.server import FakeTransport, ThreadedMemoryReactorClock @@ -37,33 +40,208 @@ logger = logging.getLogger(__name__) HTTPFactory = Factory.forProtocol(HTTPChannel) +class ProxyParserTests(TestCase): + """ + Values for test + [ + proxy_string, + expected_scheme, + expected_hostname, + expected_port, + expected_credentials, + ] + """ + + @parameterized.expand( + [ + # host + [b"localhost", b"http", b"localhost", 1080, None], + [b"localhost:9988", b"http", b"localhost", 9988, None], + # host+scheme + [b"https://localhost", b"https", b"localhost", 1080, None], + [b"https://localhost:1234", b"https", b"localhost", 1234, None], + # ipv4 + [b"1.2.3.4", b"http", b"1.2.3.4", 1080, None], + [b"1.2.3.4:9988", b"http", b"1.2.3.4", 9988, None], + # ipv4+scheme + [b"https://1.2.3.4", b"https", b"1.2.3.4", 1080, None], + [b"https://1.2.3.4:9988", b"https", b"1.2.3.4", 9988, None], + # ipv6 - without brackets is broken + # [ + # b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + # b"http", + # b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + # 1080, + # None, + # ], + # [ + # b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + # b"http", + # b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + # 1080, + # None, + # ], + # [b"::1", b"http", b"::1", 1080, None], + # [b"::ffff:0.0.0.0", b"http", b"::ffff:0.0.0.0", 1080, None], + # ipv6 - with brackets + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:effe]", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 1080, + None, + ], + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:1234]", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 1080, + None, + ], + [b"[::1]", b"http", b"::1", 1080, None], + [b"[::ffff:0.0.0.0]", b"http", b"::ffff:0.0.0.0", 1080, None], + # ipv6+port + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:effe]:9988", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 9988, + None, + ], + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:1234]:9988", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 9988, + None, + ], + [b"[::1]:9988", b"http", b"::1", 9988, None], + [b"[::ffff:0.0.0.0]:9988", b"http", b"::ffff:0.0.0.0", 9988, None], + # ipv6+scheme + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:effe]", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 1080, + None, + ], + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:1234]", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 1080, + None, + ], + [b"https://[::1]", b"https", b"::1", 1080, None], + [b"https://[::ffff:0.0.0.0]", b"https", b"::ffff:0.0.0.0", 1080, None], + # ipv6+scheme+port + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:effe]:9988", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 9988, + None, + ], + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:1234]:9988", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 9988, + None, + ], + [b"https://[::1]:9988", b"https", b"::1", 9988, None], + # with credentials + [ + b"https://user:pass@1.2.3.4:9988", + b"https", + b"1.2.3.4", + 9988, + b"user:pass", + ], + [b"user:pass@1.2.3.4:9988", b"http", b"1.2.3.4", 9988, b"user:pass"], + [ + b"https://user:pass@proxy.local:9988", + b"https", + b"proxy.local", + 9988, + b"user:pass", + ], + [ + b"user:pass@proxy.local:9988", + b"http", + b"proxy.local", + 9988, + b"user:pass", + ], + ] + ) + def test_parse_proxy( + self, + proxy_string: bytes, + expected_scheme: bytes, + expected_hostname: bytes, + expected_port: int, + expected_credentials: Optional[bytes], + ): + """ + Tests that a given proxy URL will be broken into the components. + Args: + proxy_string: The proxy connection string. + expected_scheme: Expected value of proxy scheme. + expected_hostname: Expected value of proxy hostname. + expected_port: Expected value of proxy port. + expected_credentials: Expected value of credentials. + Must be in form '<username>:<password>' or None + """ + proxy_cred = None + if expected_credentials: + proxy_cred = ProxyCredentials(expected_credentials) + self.assertEqual( + ( + expected_scheme, + expected_hostname, + expected_port, + proxy_cred, + ), + parse_proxy(proxy_string), + ) + + class MatrixFederationAgentTests(TestCase): def setUp(self): self.reactor = ThreadedMemoryReactorClock() def _make_connection( - self, client_factory, server_factory, ssl=False, expected_sni=None - ): + self, + client_factory: IProtocolFactory, + server_factory: IProtocolFactory, + ssl: bool = False, + expected_sni: Optional[bytes] = None, + tls_sanlist: Optional[Iterable[bytes]] = None, + ) -> IProtocol: """Builds a test server, and completes the outgoing client connection Args: - client_factory (interfaces.IProtocolFactory): the the factory that the + client_factory: the the factory that the application is trying to use to make the outbound connection. We will invoke it to build the client Protocol - server_factory (interfaces.IProtocolFactory): a factory to build the + server_factory: a factory to build the server-side protocol - ssl (bool): If true, we will expect an ssl connection and wrap + ssl: If true, we will expect an ssl connection and wrap server_factory with a TLSMemoryBIOFactory - expected_sni (bytes|None): the expected SNI value + expected_sni: the expected SNI value + + tls_sanlist: list of SAN entries for the TLS cert presented by the server. + Defaults to [b'DNS:test.com'] Returns: - IProtocol: the server Protocol returned by server_factory + the server Protocol returned by server_factory """ if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory) + server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) server_protocol = server_factory.buildProtocol(None) @@ -98,22 +276,28 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) return http_protocol - def _test_request_direct_connection(self, agent, scheme, hostname, path): + def _test_request_direct_connection( + self, + agent: ProxyAgent, + scheme: bytes, + hostname: bytes, + path: bytes, + ): """Runs a test case for a direct connection not going through a proxy. Args: - agent (ProxyAgent): the proxy agent being tested + agent: the proxy agent being tested - scheme (bytes): expected to be either "http" or "https" + scheme: expected to be either "http" or "https" - hostname (bytes): the hostname to connect to in the test + hostname: the hostname to connect to in the test - path (bytes): the path to connect to in the test + path: the path to connect to in the test """ is_https = scheme == b"https" @@ -208,7 +392,7 @@ class MatrixFederationAgentTests(TestCase): """ Tests that requests can be made through a proxy. """ - self._do_http_request_via_proxy(auth_credentials=None) + self._do_http_request_via_proxy(ssl=False, auth_credentials=None) @patch.dict( os.environ, @@ -218,12 +402,28 @@ class MatrixFederationAgentTests(TestCase): """ Tests that authenticated requests can be made through a proxy. """ - self._do_http_request_via_proxy(auth_credentials="bob:pinkponies") + self._do_http_request_via_proxy(ssl=False, auth_credentials=b"bob:pinkponies") + + @patch.dict( + os.environ, {"http_proxy": "https://proxy.com:8888", "no_proxy": "unused.com"} + ) + def test_http_request_via_https_proxy(self): + self._do_http_request_via_proxy(ssl=True, auth_credentials=None) + + @patch.dict( + os.environ, + { + "http_proxy": "https://bob:pinkponies@proxy.com:8888", + "no_proxy": "unused.com", + }, + ) + def test_http_request_via_https_proxy_with_auth(self): + self._do_http_request_via_proxy(ssl=True, auth_credentials=b"bob:pinkponies") @patch.dict(os.environ, {"https_proxy": "proxy.com", "no_proxy": "unused.com"}) def test_https_request_via_proxy(self): """Tests that TLS-encrypted requests can be made through a proxy""" - self._do_https_request_via_proxy(auth_credentials=None) + self._do_https_request_via_proxy(ssl=False, auth_credentials=None) @patch.dict( os.environ, @@ -231,16 +431,40 @@ class MatrixFederationAgentTests(TestCase): ) def test_https_request_via_proxy_with_auth(self): """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" - self._do_https_request_via_proxy(auth_credentials="bob:pinkponies") + self._do_https_request_via_proxy(ssl=False, auth_credentials=b"bob:pinkponies") + + @patch.dict( + os.environ, {"https_proxy": "https://proxy.com", "no_proxy": "unused.com"} + ) + def test_https_request_via_https_proxy(self): + """Tests that TLS-encrypted requests can be made through a proxy""" + self._do_https_request_via_proxy(ssl=True, auth_credentials=None) + + @patch.dict( + os.environ, + {"https_proxy": "https://bob:pinkponies@proxy.com", "no_proxy": "unused.com"}, + ) + def test_https_request_via_https_proxy_with_auth(self): + """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" + self._do_https_request_via_proxy(ssl=True, auth_credentials=b"bob:pinkponies") def _do_http_request_via_proxy( self, - auth_credentials: Optional[str] = None, + ssl: bool = False, + auth_credentials: Optional[bytes] = None, ): + """Send a http request via an agent and check that it is correctly received at + the proxy. The proxy can use either http or https. + Args: + ssl: True if we expect the request to connect via https to proxy + auth_credentials: credentials to authenticate at proxy """ - Tests that requests can be made through a proxy. - """ - agent = ProxyAgent(self.reactor, use_proxy=True) + if ssl: + agent = ProxyAgent( + self.reactor, use_proxy=True, contextFactory=get_test_https_policy() + ) + else: + agent = ProxyAgent(self.reactor, use_proxy=True) self.reactor.lookups["proxy.com"] = "1.2.3.5" d = agent.request(b"GET", b"http://test.com") @@ -254,7 +478,11 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, _get_test_protocol_factory() + client_factory, + _get_test_protocol_factory(), + ssl=ssl, + tls_sanlist=[b"DNS:proxy.com"] if ssl else None, + expected_sni=b"proxy.com" if ssl else None, ) # the FakeTransport is async, so we need to pump the reactor @@ -272,7 +500,7 @@ class MatrixFederationAgentTests(TestCase): if auth_credentials is not None: # Compute the correct header value for Proxy-Authorization - encoded_credentials = base64.b64encode(b"bob:pinkponies") + encoded_credentials = base64.b64encode(auth_credentials) expected_header_value = b"Basic " + encoded_credentials # Validate the header's value @@ -295,8 +523,15 @@ class MatrixFederationAgentTests(TestCase): def _do_https_request_via_proxy( self, - auth_credentials: Optional[str] = None, + ssl: bool = False, + auth_credentials: Optional[bytes] = None, ): + """Send a https request via an agent and check that it is correctly received at + the proxy and client. The proxy can use either http or https. + Args: + ssl: True if we expect the request to connect via https to proxy + auth_credentials: credentials to authenticate at proxy + """ agent = ProxyAgent( self.reactor, contextFactory=get_test_https_policy(), @@ -313,18 +548,15 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(host, "1.2.3.5") self.assertEqual(port, 1080) - # make a test HTTP server, and wire up the client + # make a test server to act as the proxy, and wire up the client proxy_server = self._make_connection( - client_factory, _get_test_protocol_factory() + client_factory, + _get_test_protocol_factory(), + ssl=ssl, + tls_sanlist=[b"DNS:proxy.com"] if ssl else None, + expected_sni=b"proxy.com" if ssl else None, ) - - # fish the transports back out so that we can do the old switcheroo - s2c_transport = proxy_server.transport - client_protocol = s2c_transport.other - c2s_transport = client_protocol.transport - - # the FakeTransport is async, so we need to pump the reactor - self.reactor.advance(0) + assert isinstance(proxy_server, HTTPChannel) # now there should be a pending CONNECT request self.assertEqual(len(proxy_server.requests), 1) @@ -340,7 +572,7 @@ class MatrixFederationAgentTests(TestCase): if auth_credentials is not None: # Compute the correct header value for Proxy-Authorization - encoded_credentials = base64.b64encode(b"bob:pinkponies") + encoded_credentials = base64.b64encode(auth_credentials) expected_header_value = b"Basic " + encoded_credentials # Validate the header's value @@ -352,31 +584,49 @@ class MatrixFederationAgentTests(TestCase): # tell the proxy server not to close the connection proxy_server.persistent = True - # this just stops the http Request trying to do a chunked response - # request.setHeader(b"Content-Length", b"0") request.finish() - # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel - ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory()) - ssl_protocol = ssl_factory.buildProtocol(None) - http_server = ssl_protocol.wrappedProtocol + # now we make another test server to act as the upstream HTTP server. + server_ssl_protocol = _wrap_server_factory_for_tls( + _get_test_protocol_factory() + ).buildProtocol(None) - ssl_protocol.makeConnection( - FakeTransport(client_protocol, self.reactor, ssl_protocol) - ) - c2s_transport.other = ssl_protocol + # Tell the HTTP server to send outgoing traffic back via the proxy's transport. + proxy_server_transport = proxy_server.transport + server_ssl_protocol.makeConnection(proxy_server_transport) + + # ... and replace the protocol on the proxy's transport with the + # TLSMemoryBIOProtocol for the test server, so that incoming traffic + # to the proxy gets sent over to the HTTP(s) server. + # + # This needs a bit of gut-wrenching, which is different depending on whether + # the proxy is using TLS or not. + # + # (an alternative, possibly more elegant, approach would be to use a custom + # Protocol to implement the proxy, which starts out by forwarding to an + # HTTPChannel (to implement the CONNECT command) and can then be switched + # into a mode where it forwards its traffic to another Protocol.) + if ssl: + assert isinstance(proxy_server_transport, TLSMemoryBIOProtocol) + proxy_server_transport.wrappedProtocol = server_ssl_protocol + else: + assert isinstance(proxy_server_transport, FakeTransport) + client_protocol = proxy_server_transport.other + c2s_transport = client_protocol.transport + c2s_transport.other = server_ssl_protocol self.reactor.advance(0) - server_name = ssl_protocol._tlsConnection.get_servername() + server_name = server_ssl_protocol._tlsConnection.get_servername() expected_sni = b"test.com" self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) # now there should be a pending request + http_server = server_ssl_protocol.wrappedProtocol self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] @@ -510,7 +760,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) # now there should be a pending request @@ -529,16 +779,48 @@ class MatrixFederationAgentTests(TestCase): body = self.successResultOf(treq.content(resp)) self.assertEqual(body, b"result") + @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) + def test_proxy_with_no_scheme(self): + http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") + self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + + @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) + def test_proxy_with_unsupported_scheme(self): + with self.assertRaises(ValueError): + ProxyAgent(self.reactor, use_proxy=True) + + @patch.dict(os.environ, {"http_proxy": "http://proxy.com:8888"}) + def test_proxy_with_http_scheme(self): + http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") + self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + + @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) + def test_proxy_with_https_scheme(self): + https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) + self.assertEqual( + https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._hostStr, "proxy.com" + ) + self.assertEqual( + https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._port, 8888 + ) + -def _wrap_server_factory_for_tls(factory, sanlist=None): +def _wrap_server_factory_for_tls( + factory: IProtocolFactory, sanlist: Iterable[bytes] = None +) -> IProtocolFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` Args: - factory (interfaces.IProtocolFactory): protocol factory to wrap - sanlist (iterable[bytes]): list of domains the cert should be valid for + factory: protocol factory to wrap + sanlist: list of domains the cert should be valid for Returns: interfaces.IProtocolFactory @@ -552,7 +834,7 @@ def _wrap_server_factory_for_tls(factory, sanlist=None): ) -def _get_test_protocol_factory(): +def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: @@ -566,6 +848,6 @@ def _get_test_protocol_factory(): return server_factory -def _log_request(request): +def _log_request(request: str): """Implements Factory.log, which is expected by Request.finish""" - logger.info("Completed request %s", request) + logger.info(f"Completed request {request}") diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index cdca3a3e23..f6ae9ae181 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -15,9 +15,14 @@ import json import synapse.rest.admin -from synapse.api.constants import EventContentFields, EventTypes, RelationTypes +from synapse.api.constants import ( + EventContentFields, + EventTypes, + ReadReceiptEventFields, + RelationTypes, +) from synapse.rest.client.v1 import login, room -from synapse.rest.client.v2_alpha import knock, read_marker, sync +from synapse.rest.client.v2_alpha import knock, read_marker, receipts, sync from tests import unittest from tests.federation.transport.test_knocking import ( @@ -368,6 +373,76 @@ class SyncKnockTestCase( ) +class ReadReceiptsTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + receipts.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.url = "/sync?since=%s" + self.next_batch = "s0" + + # Register the first user + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + # Create the room + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + # Register the second user + self.user2 = self.register_user("kermit2", "monkey") + self.tok2 = self.login("kermit2", "monkey") + + # Join the second user + self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) + + @override_config({"experimental_features": {"msc2285_enabled": True}}) + def test_hidden_read_receipts(self): + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt to tell the server the first user's message was read + body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") + channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + body, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that the first user can't see the other user's hidden read receipt + self.assertEqual(self._get_read_receipt(), None) + + def _get_read_receipt(self): + """Syncs and returns the read receipt.""" + + # Checks if event is a read receipt + def is_read_receipt(event): + return event["type"] == "m.receipt" + + # Sync + channel = self.make_request( + "GET", + self.url % self.next_batch, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Store the next batch for the next request. + self.next_batch = channel.json_body["next_batch"] + + # Return the read receipt + ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ + "ephemeral" + ]["events"] + return next(filter(is_read_receipt, ephemeral_events), None) + + class UnreadMessagesTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -375,6 +450,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): read_marker.register_servlets, room.register_servlets, sync.register_servlets, + receipts.register_servlets, ] def prepare(self, reactor, clock, hs): @@ -448,6 +524,23 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # Check that the unread counter is back to 0. self._check_unread_count(0) + # Check that hidden read receipts don't break unread counts + res = self.helper.send(self.room_id, "hello", tok=self.tok2) + self._check_unread_count(1) + + # Send a read receipt to tell the server we've read the latest event. + body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") + channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + body, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the unread counter is back to 0. + self._check_unread_count(0) + # Check that room name changes increase the unread counter. self.helper.send_state( self.room_id, diff --git a/tests/test_preview.py b/tests/test_preview.py index cac3d81ac1..48e792b55b 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -325,6 +325,19 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(encoding, "ascii") + def test_meta_charset_underscores(self): + """A character encoding contains underscore.""" + encoding = get_html_media_encoding( + b""" + <html> + <head><meta charset="Shift_JIS"> + </head> + </html> + """, + "text/html", + ) + self.assertEqual(encoding, "Shift_JIS") + def test_xml_encoding(self): """A character encoding is found via the meta tag.""" encoding = get_html_media_encoding( |