diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index be4a289ec1..6b5af2dbb6 100644
--- a/tests/rest/client/test_media.py
+++ b/tests/rest/client/test_media.py
@@ -19,31 +19,54 @@
#
#
import base64
+import io
import json
import os
import re
-from typing import Any, Dict, Optional, Sequence, Tuple, Type
+from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Sequence, Tuple, Type
+from unittest.mock import MagicMock, Mock, patch
+from urllib import parse
from urllib.parse import quote, urlencode
+from parameterized import parameterized_class
+
+from twisted.internet import defer
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address, IPv6Address
+from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IAddress, IResolutionReceiver
+from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import UNKNOWN_LENGTH, IResponse
from twisted.web.resource import Resource
+from synapse.api.errors import HttpResponseException
+from synapse.api.ratelimiting import Ratelimiter
from synapse.config.oembed import OEmbedEndpointConfig
+from synapse.http.client import MultipartResponse
+from synapse.http.types import QueryParams
+from synapse.logging.context import make_deferred_yieldable
from synapse.media._base import FileInfo
from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS
from synapse.rest import admin
from synapse.rest.client import login, media
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
from synapse.util import Clock
from synapse.util.stringutils import parse_and_validate_mxc_uri
from tests import unittest
-from tests.server import FakeTransport, ThreadedMemoryReactorClock
+from tests.media.test_media_storage import (
+ SVG,
+ TestImage,
+ empty_file,
+ small_lossless_webp,
+ small_png,
+ small_png_with_transparency,
+)
+from tests.server import FakeChannel, FakeTransport, ThreadedMemoryReactorClock
from tests.test_utils import SMALL_PNG
from tests.unittest import override_config
@@ -1607,3 +1630,583 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
self.assertEqual(
channel.json_body["m.upload.size"], self.hs.config.media.max_upload_size
)
+
+
+class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ media.register_servlets,
+ login.register_servlets,
+ admin.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ return self.setup_test_homeserver(config=config)
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.repo = hs.get_media_repository()
+ self.client = hs.get_federation_http_client()
+ self.store = hs.get_datastores().main
+ self.user = self.register_user("user", "pass")
+ self.tok = self.login("user", "pass")
+
+ # mock actually reading file body
+ def read_multipart_response_30MiB(*args: Any, **kwargs: Any) -> Deferred:
+ d: Deferred = defer.Deferred()
+ d.callback(MultipartResponse(b"{}", 31457280, b"img/png", None))
+ return d
+
+ def read_multipart_response_50MiB(*args: Any, **kwargs: Any) -> Deferred:
+ d: Deferred = defer.Deferred()
+ d.callback(MultipartResponse(b"{}", 31457280, b"img/png", None))
+ return d
+
+ @patch(
+ "synapse.http.matrixfederationclient.read_multipart_response",
+ read_multipart_response_30MiB,
+ )
+ def test_download_ratelimit_default(self) -> None:
+ """
+ Test remote media download ratelimiting against default configuration - 500MB bucket
+ and 87kb/second drain rate
+ """
+
+ # mock out actually sending the request, returns a 30MiB response
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = 31457280
+ resp.headers = Headers(
+ {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+ )
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # first request should go through
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abc",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel.code == 200
+
+ # next 15 should go through
+ for i in range(15):
+ channel2 = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/remote.org/abc{i}",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel2.code == 200
+
+ # 17th will hit ratelimit
+ channel3 = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcd",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel3.code == 429
+
+ # however, a request from a different IP will go through
+ channel4 = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcde",
+ shorthand=False,
+ client_ip="187.233.230.159",
+ access_token=self.tok,
+ )
+ assert channel4.code == 200
+
+ # at 87Kib/s it should take about 2 minutes for enough to drain from bucket that another
+ # 30MiB download is authorized - The last download was blocked at 503,316,480.
+ # The next download will be authorized when bucket hits 492,830,720
+ # (524,288,000 total capacity - 31,457,280 download size) so 503,316,480 - 492,830,720 ~= 10,485,760
+ # needs to drain before another download will be authorized, that will take ~=
+ # 2 minutes (10,485,760/89,088/60)
+ self.reactor.pump([2.0 * 60.0])
+
+ # enough has drained and next request goes through
+ channel5 = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcdef",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel5.code == 200
+
+ @override_config(
+ {
+ "remote_media_download_per_second": "50M",
+ "remote_media_download_burst_count": "50M",
+ }
+ )
+ @patch(
+ "synapse.http.matrixfederationclient.read_multipart_response",
+ read_multipart_response_50MiB,
+ )
+ def test_download_rate_limit_config(self) -> None:
+ """
+ Test that download rate limit config options are correctly picked up and applied
+ """
+
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = 52428800
+ resp.headers = Headers(
+ {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+ )
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # first request should go through
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abc",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel.code == 200
+
+ # immediate second request should fail
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcd",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel.code == 429
+
+ # advance half a second
+ self.reactor.pump([0.5])
+
+ # request still fails
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcde",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel.code == 429
+
+ # advance another half second
+ self.reactor.pump([0.5])
+
+ # enough has drained from bucket and request is successful
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcdef",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel.code == 200
+
+ @patch(
+ "synapse.http.matrixfederationclient.read_multipart_response",
+ read_multipart_response_30MiB,
+ )
+ def test_download_ratelimit_max_size_sub(self) -> None:
+ """
+ Test that if no content-length is provided, the default max size is applied instead
+ """
+
+ # mock out actually sending the request
+ async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+ resp = MagicMock(spec=IResponse)
+ resp.code = 200
+ resp.length = UNKNOWN_LENGTH
+ resp.headers = Headers(
+ {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+ )
+ resp.phrase = b"OK"
+ return resp
+
+ self.client._send_request = _send_request # type: ignore
+
+ # ten requests should go through using the max size (500MB/50MB)
+ for i in range(10):
+ channel2 = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/remote.org/abc{i}",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel2.code == 200
+
+ # eleventh will hit ratelimit
+ channel3 = self.make_request(
+ "GET",
+ "/_matrix/client/v1/media/download/remote.org/abcd",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ assert channel3.code == 429
+
+ def test_file_download(self) -> None:
+ content = io.BytesIO(b"file_to_stream")
+ content_uri = self.get_success(
+ self.repo.create_content(
+ "text/plain",
+ "test_upload",
+ content,
+ 46,
+ UserID.from_string("@user_id:whatever.org"),
+ )
+ )
+ # test with a text file
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/test/{content_uri.media_id}",
+ shorthand=False,
+ access_token=self.tok,
+ )
+ self.pump()
+ self.assertEqual(200, channel.code)
+
+
+test_images = [
+ small_png,
+ small_png_with_transparency,
+ small_lossless_webp,
+ empty_file,
+ SVG,
+]
+input_values = [(x,) for x in test_images]
+
+
+@parameterized_class(("test_image",), input_values)
+class DownloadTestCase(unittest.HomeserverTestCase):
+ test_image: ClassVar[TestImage]
+ servlets = [
+ media.register_servlets,
+ login.register_servlets,
+ admin.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.fetches: List[
+ Tuple[
+ "Deferred[Any]",
+ str,
+ str,
+ Optional[QueryParams],
+ ]
+ ] = []
+
+ def federation_get_file(
+ destination: str,
+ path: str,
+ output_stream: BinaryIO,
+ download_ratelimiter: Ratelimiter,
+ ip_address: Any,
+ max_size: int,
+ args: Optional[QueryParams] = None,
+ retry_on_dns_fail: bool = True,
+ ignore_backoff: bool = False,
+ follow_redirects: bool = False,
+ ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]], bytes]]":
+ """A mock for MatrixFederationHttpClient.federation_get_file."""
+
+ def write_to(
+ r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]
+ ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
+ data, response = r
+ output_stream.write(data)
+ return response
+
+ def write_err(f: Failure) -> Failure:
+ f.trap(HttpResponseException)
+ output_stream.write(f.value.response)
+ return f
+
+ d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]] = (
+ Deferred()
+ )
+ self.fetches.append((d, destination, path, args))
+ # Note that this callback changes the value held by d.
+ d_after_callback = d.addCallbacks(write_to, write_err)
+ return make_deferred_yieldable(d_after_callback)
+
+ def get_file(
+ destination: str,
+ path: str,
+ output_stream: BinaryIO,
+ download_ratelimiter: Ratelimiter,
+ ip_address: Any,
+ max_size: int,
+ args: Optional[QueryParams] = None,
+ retry_on_dns_fail: bool = True,
+ ignore_backoff: bool = False,
+ follow_redirects: bool = False,
+ ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]":
+ """A mock for MatrixFederationHttpClient.get_file."""
+
+ def write_to(
+ r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]
+ ) -> Tuple[int, Dict[bytes, List[bytes]]]:
+ data, response = r
+ output_stream.write(data)
+ return response
+
+ def write_err(f: Failure) -> Failure:
+ f.trap(HttpResponseException)
+ output_stream.write(f.value.response)
+ return f
+
+ d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred()
+ self.fetches.append((d, destination, path, args))
+ # Note that this callback changes the value held by d.
+ d_after_callback = d.addCallbacks(write_to, write_err)
+ return make_deferred_yieldable(d_after_callback)
+
+ # Mock out the homeserver's MatrixFederationHttpClient
+ client = Mock()
+ client.federation_get_file = federation_get_file
+ client.get_file = get_file
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+
+ config = self.default_config()
+ config["media_store_path"] = self.media_store_path
+ config["max_image_pixels"] = 2000000
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+ config["media_storage_providers"] = [provider_config]
+ config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
+
+ hs = self.setup_test_homeserver(config=config, federation_http_client=client)
+
+ return hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.media_repo = hs.get_media_repository()
+
+ self.remote = "example.com"
+ self.media_id = "12345"
+
+ self.user = self.register_user("user", "pass")
+ self.tok = self.login("user", "pass")
+
+ def _req(
+ self, content_disposition: Optional[bytes], include_content_type: bool = True
+ ) -> FakeChannel:
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/{self.remote}/{self.media_id}",
+ shorthand=False,
+ await_result=False,
+ access_token=self.tok,
+ )
+ self.pump()
+
+ # We've made one fetch, to example.com, using the federation media URL
+ self.assertEqual(len(self.fetches), 1)
+ self.assertEqual(self.fetches[0][1], "example.com")
+ self.assertEqual(
+ self.fetches[0][2], "/_matrix/federation/v1/media/download/" + self.media_id
+ )
+ self.assertEqual(
+ self.fetches[0][3],
+ {"timeout_ms": "20000"},
+ )
+
+ headers = {
+ b"Content-Length": [b"%d" % (len(self.test_image.data))],
+ }
+
+ if include_content_type:
+ headers[b"Content-Type"] = [self.test_image.content_type]
+
+ if content_disposition:
+ headers[b"Content-Disposition"] = [content_disposition]
+
+ self.fetches[0][0].callback(
+ (self.test_image.data, (len(self.test_image.data), headers, b"{}"))
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ return channel
+
+ def test_handle_missing_content_type(self) -> None:
+ channel = self._req(
+ b"attachment; filename=out" + self.test_image.extension,
+ include_content_type=False,
+ )
+ headers = channel.headers
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"]
+ )
+
+ def test_disposition_filename_ascii(self) -> None:
+ """
+ If the filename is filename=<ascii> then Synapse will decode it as an
+ ASCII string, and use filename= in the response.
+ """
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+ headers = channel.headers
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+ )
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Disposition"),
+ [
+ (b"inline" if self.test_image.is_inline else b"attachment")
+ + b"; filename=out"
+ + self.test_image.extension
+ ],
+ )
+
+ def test_disposition_filenamestar_utf8escaped(self) -> None:
+ """
+ If the filename is filename=*utf8''<utf8 escaped> then Synapse will
+ correctly decode it as the UTF-8 string, and use filename* in the
+ response.
+ """
+ filename = parse.quote("\u2603".encode()).encode("ascii")
+ channel = self._req(
+ b"attachment; filename*=utf-8''" + filename + self.test_image.extension
+ )
+
+ headers = channel.headers
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+ )
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Disposition"),
+ [
+ (b"inline" if self.test_image.is_inline else b"attachment")
+ + b"; filename*=utf-8''"
+ + filename
+ + self.test_image.extension
+ ],
+ )
+
+ def test_disposition_none(self) -> None:
+ """
+ If there is no filename, Content-Disposition should only
+ be a disposition type.
+ """
+ channel = self._req(None)
+
+ headers = channel.headers
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
+ )
+ self.assertEqual(
+ headers.getRawHeaders(b"Content-Disposition"),
+ [b"inline" if self.test_image.is_inline else b"attachment"],
+ )
+
+ def test_x_robots_tag_header(self) -> None:
+ """
+ Tests that the `X-Robots-Tag` header is present, which informs web crawlers
+ to not index, archive, or follow links in media.
+ """
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+ headers = channel.headers
+ self.assertEqual(
+ headers.getRawHeaders(b"X-Robots-Tag"),
+ [b"noindex, nofollow, noarchive, noimageindex"],
+ )
+
+ def test_cross_origin_resource_policy_header(self) -> None:
+ """
+ Test that the Cross-Origin-Resource-Policy header is set to "cross-origin"
+ allowing web clients to embed media from the downloads API.
+ """
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
+
+ headers = channel.headers
+
+ self.assertEqual(
+ headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
+ [b"cross-origin"],
+ )
+
+ def test_unknown_federation_endpoint(self) -> None:
+ """
+ Test that if the downloadd request to remote federation endpoint returns a 404
+ we fall back to the _matrix/media endpoint
+ """
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/{self.remote}/{self.media_id}",
+ shorthand=False,
+ await_result=False,
+ access_token=self.tok,
+ )
+ self.pump()
+
+ # We've made one fetch, to example.com, using the media URL, and asking
+ # the other server not to do a remote fetch
+ self.assertEqual(len(self.fetches), 1)
+ self.assertEqual(self.fetches[0][1], "example.com")
+ self.assertEqual(
+ self.fetches[0][2], f"/_matrix/federation/v1/media/download/{self.media_id}"
+ )
+
+ # The result which says the endpoint is unknown.
+ unknown_endpoint = b'{"errcode":"M_UNRECOGNIZED","error":"Unknown request"}'
+ self.fetches[0][0].errback(
+ HttpResponseException(404, "NOT FOUND", unknown_endpoint)
+ )
+
+ self.pump()
+
+ # There should now be another request to the _matrix/media/v3/download URL.
+ self.assertEqual(len(self.fetches), 2)
+ self.assertEqual(self.fetches[1][1], "example.com")
+ self.assertEqual(
+ self.fetches[1][2],
+ f"/_matrix/media/v3/download/example.com/{self.media_id}",
+ )
+
+ headers = {
+ b"Content-Length": [b"%d" % (len(self.test_image.data))],
+ }
+
+ self.fetches[1][0].callback(
+ (self.test_image.data, (len(self.test_image.data), headers))
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 12c11f342c..966c622e14 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -31,12 +31,13 @@ from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
+ HistoryVisibility,
ReceiptTypes,
RelationTypes,
)
from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync
from synapse.server import HomeServer
-from synapse.types import JsonDict, RoomStreamToken, StreamKeyType
+from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID
from synapse.util import Clock
from tests import unittest
@@ -1326,7 +1327,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
def test_sync_list(self) -> None:
"""
- Test that room IDs show up in the Sliding Sync lists
+ Test that room IDs show up in the Sliding Sync `lists`
"""
alice_user_id = self.register_user("alice", "correcthorse")
alice_access_token = self.login(alice_user_id, "correcthorse")
@@ -1425,15 +1426,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
channel.await_result(timeout_ms=200)
self.assertEqual(channel.code, 200, channel.json_body)
- # We expect the `next_pos` in the result to be the same as what we requested
+ # We expect the next `pos` in the result to be the same as what we requested
# with because we weren't able to find anything new yet.
- self.assertEqual(
- channel.json_body["next_pos"], future_position_token_serialized
- )
+ self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
def test_filter_list(self) -> None:
"""
- Test that filters apply to lists
+ Test that filters apply to `lists`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
@@ -1564,7 +1563,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
def test_sort_list(self) -> None:
"""
- Test that the lists are sorted by `stream_ordering`
+ Test that the `lists` are sorted by `stream_ordering`
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
@@ -1618,3 +1617,1067 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
],
channel.json_body["lists"]["foo-list"],
)
+
+ def test_sliced_windows(self) -> None:
+ """
+ Test that the `lists` `ranges` are sliced correctly. Both sides of each range
+ are inclusive.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+ room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+ # Make the Sliding Sync request for a single room
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 0]],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Make sure it has the foo-list we requested
+ self.assertListEqual(
+ list(channel.json_body["lists"].keys()),
+ ["foo-list"],
+ channel.json_body["lists"].keys(),
+ )
+ # Make sure the list is sorted in the way we expect
+ self.assertListEqual(
+ list(channel.json_body["lists"]["foo-list"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 0],
+ "room_ids": [room_id3],
+ }
+ ],
+ channel.json_body["lists"]["foo-list"],
+ )
+
+ # Make the Sliding Sync request for the first two rooms
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ ["m.room.join_rules", ""],
+ ["m.room.history_visibility", ""],
+ ["m.space.child", "*"],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Make sure it has the foo-list we requested
+ self.assertListEqual(
+ list(channel.json_body["lists"].keys()),
+ ["foo-list"],
+ channel.json_body["lists"].keys(),
+ )
+ # Make sure the list is sorted in the way we expect
+ self.assertListEqual(
+ list(channel.json_body["lists"]["foo-list"]["ops"]),
+ [
+ {
+ "op": "SYNC",
+ "range": [0, 1],
+ "room_ids": [room_id3, room_id2],
+ }
+ ],
+ channel.json_body["lists"]["foo-list"],
+ )
+
+ def test_rooms_limited_initial_sync(self) -> None:
+ """
+ Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
+ on initial sync.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity1", tok=user2_tok)
+ self.helper.send(room_id1, "activity2", tok=user2_tok)
+ event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
+ event_pos3 = self.get_success(
+ self.store.get_position_for_event(event_response3["event_id"])
+ )
+ event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
+ event_pos4 = self.get_success(
+ self.store.get_position_for_event(event_response4["event_id"])
+ )
+ event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
+ user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 3,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ True,
+ channel.json_body["rooms"][room_id1],
+ )
+ # Check to make sure the latest events are returned
+ self.assertEqual(
+ [
+ event["event_id"]
+ for event in channel.json_body["rooms"][room_id1]["timeline"]
+ ],
+ [
+ event_response4["event_id"],
+ event_response5["event_id"],
+ user1_join_response["event_id"],
+ ],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+
+ # Check to make sure the `prev_batch` points at the right place
+ prev_batch_token = self.get_success(
+ StreamToken.from_string(
+ self.store, channel.json_body["rooms"][room_id1]["prev_batch"]
+ )
+ )
+ prev_batch_room_stream_token_serialized = self.get_success(
+ prev_batch_token.room_key.to_string(self.store)
+ )
+ # If we use the `prev_batch` token to look backwards, we should see `event3`
+ # next so make sure the token encompasses it
+ self.assertEqual(
+ event_pos3.persisted_after(prev_batch_token.room_key),
+ False,
+ f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}",
+ )
+ # If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
+ # anymore since it was just returned in this response.
+ self.assertEqual(
+ event_pos4.persisted_after(prev_batch_token.room_key),
+ True,
+ f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}",
+ )
+
+ # With no `from_token` (initial sync), it's all historical since there is no
+ # "live" range
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 0,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_not_limited_initial_sync(self) -> None:
+ """
+ Test that we mark `rooms` as `limited=False` when there are no more events to
+ paginate to.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity1", tok=user2_tok)
+ self.helper.send(room_id1, "activity2", tok=user2_tok)
+ self.helper.send(room_id1, "activity3", tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Make the Sliding Sync request
+ timeline_limit = 100
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": timeline_limit,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # The timeline should be `limited=False` because we have all of the events (no
+ # more to paginate to)
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ False,
+ channel.json_body["rooms"][room_id1],
+ )
+ expected_number_of_events = 9
+ # We're just looking to make sure we got all of the events before hitting the `timeline_limit`
+ self.assertEqual(
+ len(channel.json_body["rooms"][room_id1]["timeline"]),
+ expected_number_of_events,
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+ self.assertLessEqual(expected_number_of_events, timeline_limit)
+
+ # With no `from_token` (initial sync), it's all historical since there is no
+ # "live" token range.
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 0,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_incremental_sync(self) -> None:
+ """
+ Test `rooms` data during an incremental sync after an initial sync.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
+
+ # Make an initial Sliding Sync request to grab a token. This is also a sanity
+ # check that we can go from initial to incremental sync.
+ sync_params = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 3,
+ }
+ }
+ }
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ sync_params,
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ next_pos = channel.json_body["pos"]
+
+ # Send some events but don't send enough to saturate the `timeline_limit`.
+ # We want to later test that we only get the new events since the `next_pos`
+ event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
+ event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+ # Make an incremental Sliding Sync request (what we're trying to test)
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint + f"?pos={next_pos}",
+ sync_params,
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We only expect to see the new events since the last sync which isn't enough to
+ # fill up the `timeline_limit`.
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ False,
+ f'Our `timeline_limit` was {sync_params["lists"]["foo-list"]["timeline_limit"]} '
+ + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+ + str(channel.json_body["rooms"][room_id1]),
+ )
+ # Check to make sure the latest events are returned
+ self.assertEqual(
+ [
+ event["event_id"]
+ for event in channel.json_body["rooms"][room_id1]["timeline"]
+ ],
+ [
+ event_response2["event_id"],
+ event_response3["event_id"],
+ ],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+
+ # All events are "live"
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 2,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_newly_joined_incremental_sync(self) -> None:
+ """
+ Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
+ able to see some historical events before the `from_token`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity before token1", tok=user2_tok)
+ event_response2 = self.helper.send(
+ room_id1, "activity before token2", tok=user2_tok
+ )
+
+ from_token = self.event_sources.get_current_token()
+
+ # Join the room after the `from_token` which will make us consider this room as
+ # `newly_joined`.
+ user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Send some events but don't send enough to saturate the `timeline_limit`.
+ # We want to later test that we only get the new events since the `next_pos`
+ event_response3 = self.helper.send(
+ room_id1, "activity after token3", tok=user2_tok
+ )
+ event_response4 = self.helper.send(
+ room_id1, "activity after token4", tok=user2_tok
+ )
+
+ # The `timeline_limit` is set to 4 so we can at least see one historical event
+ # before the `from_token`. We should see historical events because this is a
+ # `newly_joined` room.
+ timeline_limit = 4
+ # Make an incremental Sliding Sync request (what we're trying to test)
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint
+ + f"?pos={self.get_success(from_token.to_string(self.store))}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": timeline_limit,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We should see the new events and the rest should be filled with historical
+ # events which will make us `limited=True` since there are more to paginate to.
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ True,
+ f"Our `timeline_limit` was {timeline_limit} "
+ + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+ + str(channel.json_body["rooms"][room_id1]),
+ )
+ # Check to make sure that the "live" and historical events are returned
+ self.assertEqual(
+ [
+ event["event_id"]
+ for event in channel.json_body["rooms"][room_id1]["timeline"]
+ ],
+ [
+ event_response2["event_id"],
+ user1_join_response["event_id"],
+ event_response3["event_id"],
+ event_response4["event_id"],
+ ],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+
+ # Only events after the `from_token` are "live" (join, event3, event4)
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 3,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_invite_shared_history_initial_sync(self) -> None:
+ """
+ Test that `rooms` we are invited to have some stripped `invite_state` during an
+ initial sync.
+
+ This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+ but we also shouldn't see any timeline events because the history visiblity is
+ `shared` and we haven't joined the room yet.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user1 = UserID.from_string(user1_id)
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+ user2 = UserID.from_string(user2_id)
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Ensure we're testing with a room with `shared` history visibility which means
+ # history visible until you actually join the room.
+ history_visibility_response = self.helper.get_state(
+ room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+ )
+ self.assertEqual(
+ history_visibility_response.get("history_visibility"),
+ HistoryVisibility.SHARED,
+ )
+
+ self.helper.send(room_id1, "activity before1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before2", tok=user2_tok)
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity after3", tok=user2_tok)
+ self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 3,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # `timeline` is omitted for `invite` rooms with `stripped_state`
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("timeline"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("num_live"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("limited"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("prev_batch"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # We should have some `stripped_state` so the potential joiner can identify the
+ # room (we don't care about the order).
+ self.assertCountEqual(
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ [
+ {
+ "content": {"creator": user2_id, "room_version": "10"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.create",
+ },
+ {
+ "content": {"join_rule": "public"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.join_rules",
+ },
+ {
+ "content": {"displayname": user2.localpart, "membership": "join"},
+ "sender": user2_id,
+ "state_key": user2_id,
+ "type": "m.room.member",
+ },
+ {
+ "content": {"displayname": user1.localpart, "membership": "invite"},
+ "sender": user2_id,
+ "state_key": user1_id,
+ "type": "m.room.member",
+ },
+ ],
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ )
+
+ def test_rooms_invite_shared_history_incremental_sync(self) -> None:
+ """
+ Test that `rooms` we are invited to have some stripped `invite_state` during an
+ incremental sync.
+
+ This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+ but we also shouldn't see any timeline events because the history visiblity is
+ `shared` and we haven't joined the room yet.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user1 = UserID.from_string(user1_id)
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+ user2 = UserID.from_string(user2_id)
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Ensure we're testing with a room with `shared` history visibility which means
+ # history visible until you actually join the room.
+ history_visibility_response = self.helper.get_state(
+ room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+ )
+ self.assertEqual(
+ history_visibility_response.get("history_visibility"),
+ HistoryVisibility.SHARED,
+ )
+
+ self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+ self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+ from_token = self.event_sources.get_current_token()
+
+ self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+ self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint
+ + f"?pos={self.get_success(from_token.to_string(self.store))}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 3,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # `timeline` is omitted for `invite` rooms with `stripped_state`
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("timeline"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("num_live"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("limited"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("prev_batch"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # We should have some `stripped_state` so the potential joiner can identify the
+ # room (we don't care about the order).
+ self.assertCountEqual(
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ [
+ {
+ "content": {"creator": user2_id, "room_version": "10"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.create",
+ },
+ {
+ "content": {"join_rule": "public"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.join_rules",
+ },
+ {
+ "content": {"displayname": user2.localpart, "membership": "join"},
+ "sender": user2_id,
+ "state_key": user2_id,
+ "type": "m.room.member",
+ },
+ {
+ "content": {"displayname": user1.localpart, "membership": "invite"},
+ "sender": user2_id,
+ "state_key": user1_id,
+ "type": "m.room.member",
+ },
+ ],
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ )
+
+ def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
+ """
+ Test that `rooms` we are invited to have some stripped `invite_state` during an
+ initial sync.
+
+ This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+ but depending on the semantics we decide, we could potentially see some
+ historical events before/after the `from_token` because the history is
+ `world_readable`. Same situation for events after the `from_token` if the
+ history visibility was set to `invited`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user1 = UserID.from_string(user1_id)
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+ user2 = UserID.from_string(user2_id)
+
+ room_id1 = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "preset": "public_chat",
+ "initial_state": [
+ {
+ "content": {
+ "history_visibility": HistoryVisibility.WORLD_READABLE
+ },
+ "state_key": "",
+ "type": EventTypes.RoomHistoryVisibility,
+ }
+ ],
+ },
+ )
+ # Ensure we're testing with a room with `world_readable` history visibility
+ # which means events are visible to anyone even without membership.
+ history_visibility_response = self.helper.get_state(
+ room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+ )
+ self.assertEqual(
+ history_visibility_response.get("history_visibility"),
+ HistoryVisibility.WORLD_READABLE,
+ )
+
+ self.helper.send(room_id1, "activity before1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before2", tok=user2_tok)
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity after3", tok=user2_tok)
+ self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ # Large enough to see the latest events and before the invite
+ "timeline_limit": 4,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # `timeline` is omitted for `invite` rooms with `stripped_state`
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("timeline"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("num_live"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("limited"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("prev_batch"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # We should have some `stripped_state` so the potential joiner can identify the
+ # room (we don't care about the order).
+ self.assertCountEqual(
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ [
+ {
+ "content": {"creator": user2_id, "room_version": "10"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.create",
+ },
+ {
+ "content": {"join_rule": "public"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.join_rules",
+ },
+ {
+ "content": {"displayname": user2.localpart, "membership": "join"},
+ "sender": user2_id,
+ "state_key": user2_id,
+ "type": "m.room.member",
+ },
+ {
+ "content": {"displayname": user1.localpart, "membership": "invite"},
+ "sender": user2_id,
+ "state_key": user1_id,
+ "type": "m.room.member",
+ },
+ ],
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ )
+
+ def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
+ """
+ Test that `rooms` we are invited to have some stripped `invite_state` during an
+ incremental sync.
+
+ This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+ but depending on the semantics we decide, we could potentially see some
+ historical events before/after the `from_token` because the history is
+ `world_readable`. Same situation for events after the `from_token` if the
+ history visibility was set to `invited`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user1 = UserID.from_string(user1_id)
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+ user2 = UserID.from_string(user2_id)
+
+ room_id1 = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "preset": "public_chat",
+ "initial_state": [
+ {
+ "content": {
+ "history_visibility": HistoryVisibility.WORLD_READABLE
+ },
+ "state_key": "",
+ "type": EventTypes.RoomHistoryVisibility,
+ }
+ ],
+ },
+ )
+ # Ensure we're testing with a room with `world_readable` history visibility
+ # which means events are visible to anyone even without membership.
+ history_visibility_response = self.helper.get_state(
+ room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+ )
+ self.assertEqual(
+ history_visibility_response.get("history_visibility"),
+ HistoryVisibility.WORLD_READABLE,
+ )
+
+ self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+ self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+ from_token = self.event_sources.get_current_token()
+
+ self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+ self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint
+ + f"?pos={self.get_success(from_token.to_string(self.store))}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ # Large enough to see the latest events and before the invite
+ "timeline_limit": 4,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # `timeline` is omitted for `invite` rooms with `stripped_state`
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("timeline"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("num_live"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("limited"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+ self.assertIsNone(
+ channel.json_body["rooms"][room_id1].get("prev_batch"),
+ channel.json_body["rooms"][room_id1],
+ )
+ # We should have some `stripped_state` so the potential joiner can identify the
+ # room (we don't care about the order).
+ self.assertCountEqual(
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ [
+ {
+ "content": {"creator": user2_id, "room_version": "10"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.create",
+ },
+ {
+ "content": {"join_rule": "public"},
+ "sender": user2_id,
+ "state_key": "",
+ "type": "m.room.join_rules",
+ },
+ {
+ "content": {"displayname": user2.localpart, "membership": "join"},
+ "sender": user2_id,
+ "state_key": user2_id,
+ "type": "m.room.member",
+ },
+ {
+ "content": {"displayname": user1.localpart, "membership": "invite"},
+ "sender": user2_id,
+ "state_key": user1_id,
+ "type": "m.room.member",
+ },
+ ],
+ channel.json_body["rooms"][room_id1]["invite_state"],
+ )
+
+ def test_rooms_ban_initial_sync(self) -> None:
+ """
+ Test that `rooms` we are banned from in an intial sync only allows us to see
+ timeline events up to the ban event.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity before1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before2", tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+ event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+ user1_ban_response = self.helper.ban(
+ room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+
+ self.helper.send(room_id1, "activity after5", tok=user2_tok)
+ self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint,
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 3,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We should see events before the ban but not after
+ self.assertEqual(
+ [
+ event["event_id"]
+ for event in channel.json_body["rooms"][room_id1]["timeline"]
+ ],
+ [
+ event_response3["event_id"],
+ event_response4["event_id"],
+ user1_ban_response["event_id"],
+ ],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+ # No "live" events in an initial sync (no `from_token` to define the "live"
+ # range)
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 0,
+ channel.json_body["rooms"][room_id1],
+ )
+ # There are more events to paginate to
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ True,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_ban_incremental_sync1(self) -> None:
+ """
+ Test that `rooms` we are banned from during the next incremental sync only
+ allows us to see timeline events up to the ban event.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity before1", tok=user2_tok)
+ self.helper.send(room_id1, "activity before2", tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ from_token = self.event_sources.get_current_token()
+
+ event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+ event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+ # The ban is within the token range (between the `from_token` and the sliding
+ # sync request)
+ user1_ban_response = self.helper.ban(
+ room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+
+ self.helper.send(room_id1, "activity after5", tok=user2_tok)
+ self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint
+ + f"?pos={self.get_success(from_token.to_string(self.store))}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 4,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # We should see events before the ban but not after
+ self.assertEqual(
+ [
+ event["event_id"]
+ for event in channel.json_body["rooms"][room_id1]["timeline"]
+ ],
+ [
+ event_response3["event_id"],
+ event_response4["event_id"],
+ user1_ban_response["event_id"],
+ ],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+ # All live events in the incremental sync
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 3,
+ channel.json_body["rooms"][room_id1],
+ )
+ # There aren't anymore events to paginate to in this range
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ False,
+ channel.json_body["rooms"][room_id1],
+ )
+
+ def test_rooms_ban_incremental_sync2(self) -> None:
+ """
+ Test that `rooms` we are banned from before the incremental sync don't return
+ any events in the timeline.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.send(room_id1, "activity before1", tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ self.helper.send(room_id1, "activity after2", tok=user2_tok)
+ # The ban is before we get our `from_token`
+ self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+ from_token = self.event_sources.get_current_token()
+
+ self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+ # Make the Sliding Sync request
+ channel = self.make_request(
+ "POST",
+ self.sync_endpoint
+ + f"?pos={self.get_success(from_token.to_string(self.store))}",
+ {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [],
+ "timeline_limit": 4,
+ }
+ }
+ },
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Nothing to see for this banned user in the room in the token range
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["timeline"],
+ [],
+ channel.json_body["rooms"][room_id1]["timeline"],
+ )
+ # No events returned in the timeline so nothing is "live"
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["num_live"],
+ 0,
+ channel.json_body["rooms"][room_id1],
+ )
+ # There aren't anymore events to paginate to in this range
+ self.assertEqual(
+ channel.json_body["rooms"][room_id1]["limited"],
+ False,
+ channel.json_body["rooms"][room_id1],
+ )
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index f0ba40a1f1..e43140720d 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -261,9 +261,9 @@ class RestHelper:
targ: str,
expect_code: int = HTTPStatus.OK,
tok: Optional[str] = None,
- ) -> None:
+ ) -> JsonDict:
"""A convenience helper: `change_membership` with `membership` preset to "ban"."""
- self.change_membership(
+ return self.change_membership(
room=room,
src=src,
targ=targ,
|