diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 46d022092e..a7e6cdd66a 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -422,6 +422,18 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
"exclusive_as_user", "password", self.exclusive_as_user_device_id
)
+ self.exclusive_as_user_2_device_id = "exclusive_as_device_2"
+ self.exclusive_as_user_2 = self.register_user("exclusive_as_user_2", "password")
+ self.exclusive_as_user_2_token = self.login(
+ "exclusive_as_user_2", "password", self.exclusive_as_user_2_device_id
+ )
+
+ self.exclusive_as_user_3_device_id = "exclusive_as_device_3"
+ self.exclusive_as_user_3 = self.register_user("exclusive_as_user_3", "password")
+ self.exclusive_as_user_3_token = self.login(
+ "exclusive_as_user_3", "password", self.exclusive_as_user_3_device_id
+ )
+
def _notify_interested_services(self) -> None:
# This is normally set in `notify_interested_services` but we need to call the
# internal async version so the reactor gets pushed to completion.
@@ -849,6 +861,119 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
for count in service_id_to_message_count.values():
self.assertEqual(count, number_of_messages)
+ @unittest.override_config(
+ {"experimental_features": {"msc2409_to_device_messages_enabled": True}}
+ )
+ def test_application_services_receive_local_to_device_for_many_users(self) -> None:
+ """
+ Test that when a user sends a to-device message to many users
+ in an application service's user namespace, the
+ application service will receive all of them.
+ """
+ interested_appservice = self._register_application_service(
+ namespaces={
+ ApplicationService.NS_USERS: [
+ {
+ "regex": "@exclusive_as_user:.+",
+ "exclusive": True,
+ },
+ {
+ "regex": "@exclusive_as_user_2:.+",
+ "exclusive": True,
+ },
+ {
+ "regex": "@exclusive_as_user_3:.+",
+ "exclusive": True,
+ },
+ ],
+ },
+ )
+
+ # Have local_user send a to-device message to exclusive_as_users
+ message_content = {"some_key": "some really interesting value"}
+ chan = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/sendToDevice/m.room_key_request/3",
+ content={
+ "messages": {
+ self.exclusive_as_user: {
+ self.exclusive_as_user_device_id: message_content
+ },
+ self.exclusive_as_user_2: {
+ self.exclusive_as_user_2_device_id: message_content
+ },
+ self.exclusive_as_user_3: {
+ self.exclusive_as_user_3_device_id: message_content
+ },
+ }
+ },
+ access_token=self.local_user_token,
+ )
+ self.assertEqual(chan.code, 200, chan.result)
+
+ # Have exclusive_as_user send a to-device message to local_user
+ for user_token in [
+ self.exclusive_as_user_token,
+ self.exclusive_as_user_2_token,
+ self.exclusive_as_user_3_token,
+ ]:
+ chan = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/sendToDevice/m.room_key_request/4",
+ content={
+ "messages": {
+ self.local_user: {self.local_user_device_id: message_content}
+ }
+ },
+ access_token=user_token,
+ )
+ self.assertEqual(chan.code, 200, chan.result)
+
+ # Check if our application service - that is interested in exclusive_as_user - received
+ # the to-device message as part of an AS transaction.
+ # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS.
+ #
+ # The uninterested application service should not have been notified at all.
+ self.send_mock.assert_called_once()
+ (
+ service,
+ _events,
+ _ephemeral,
+ to_device_messages,
+ _otks,
+ _fbks,
+ _device_list_summary,
+ ) = self.send_mock.call_args[0]
+
+ # Assert that this was the same to-device message that local_user sent
+ self.assertEqual(service, interested_appservice)
+
+ # Assert expected number of messages
+ self.assertEqual(len(to_device_messages), 3)
+
+ for device_msg in to_device_messages:
+ self.assertEqual(device_msg["type"], "m.room_key_request")
+ self.assertEqual(device_msg["sender"], self.local_user)
+ self.assertEqual(device_msg["content"], message_content)
+
+ self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user)
+ self.assertEqual(
+ to_device_messages[0]["to_device_id"],
+ self.exclusive_as_user_device_id,
+ )
+
+ self.assertEqual(to_device_messages[1]["to_user_id"], self.exclusive_as_user_2)
+ self.assertEqual(
+ to_device_messages[1]["to_device_id"],
+ self.exclusive_as_user_2_device_id,
+ )
+
+ self.assertEqual(to_device_messages[2]["to_user_id"], self.exclusive_as_user_3)
+ self.assertEqual(
+ to_device_messages[2]["to_device_id"],
+ self.exclusive_as_user_3_device_id,
+ )
+
def _register_application_service(
self,
namespaces: Optional[Dict[str, Iterable[Dict]]] = None,
diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py
index 8582b1cd1e..13e2cd153a 100644
--- a/tests/handlers/test_cas.py
+++ b/tests/handlers/test_cas.py
@@ -197,6 +197,23 @@ class CasHandlerTestCase(HomeserverTestCase):
auth_provider_session_id=None,
)
+ @override_config({"cas_config": {"enable_registration": False}})
+ def test_map_cas_user_does_not_register_new_user(self) -> None:
+ """Ensures new users are not registered if the enabled registration flag is disabled."""
+
+ # stub out the auth handler
+ auth_handler = self.hs.get_auth_handler()
+ auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign]
+
+ cas_response = CasResponse("test_user", {})
+ request = _mock_request()
+ self.get_success(
+ self.handler._handle_cas_response(request, cas_response, "redirect_uri", "")
+ )
+
+ # check that the auth handler was not called as expected
+ auth_handler.complete_sso_login.assert_not_called()
+
def _mock_request() -> Mock:
"""Returns a mock which will stand in as a SynapseRequest"""
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index 55a4f95ef3..79d327499b 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -30,6 +30,7 @@ from synapse.server import HomeServer
from synapse.storage.databases.main.appservice import _make_exclusive_regex
from synapse.types import JsonDict, create_requester
from synapse.util import Clock
+from synapse.util.task_scheduler import TaskScheduler
from tests import unittest
from tests.unittest import override_config
@@ -49,6 +50,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
assert isinstance(handler, DeviceHandler)
self.handler = handler
self.store = hs.get_datastores().main
+ self.device_message_handler = hs.get_device_message_handler()
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -211,6 +213,51 @@ class DeviceTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(res)
+ def test_delete_device_and_big_device_inbox(self) -> None:
+ """Check that deleting a big device inbox is staged and batched asynchronously."""
+ DEVICE_ID = "abc"
+ sender = "@sender:" + self.hs.hostname
+ receiver = "@receiver:" + self.hs.hostname
+ self._record_user(sender, DEVICE_ID, DEVICE_ID)
+ self._record_user(receiver, DEVICE_ID, DEVICE_ID)
+
+ # queue a bunch of messages in the inbox
+ requester = create_requester(sender, device_id=DEVICE_ID)
+ for i in range(DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10):
+ self.get_success(
+ self.device_message_handler.send_device_message(
+ requester, "message_type", {receiver: {"*": {"val": i}}}
+ )
+ )
+
+ # delete the device
+ self.get_success(self.handler.delete_devices(receiver, [DEVICE_ID]))
+
+ # messages should be deleted up to DEVICE_MSGS_DELETE_BATCH_LIMIT straight away
+ res = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="device_inbox",
+ keyvalues={"user_id": receiver},
+ retcols=("user_id", "device_id", "stream_id"),
+ desc="get_device_id_from_device_inbox",
+ )
+ )
+ self.assertEqual(10, len(res))
+
+ # wait for the task scheduler to do a second delete pass
+ self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)
+
+ # remaining messages should now be deleted
+ res = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="device_inbox",
+ keyvalues={"user_id": receiver},
+ retcols=("user_id", "device_id", "stream_id"),
+ desc="get_device_id_from_device_inbox",
+ )
+ )
+ self.assertEqual(0, len(res))
+
def test_update_device(self) -> None:
self._record_users()
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 21d63ab1f2..4fc0742413 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -262,7 +262,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
if (ev.type, ev.state_key)
in {("m.room.create", ""), ("m.room.member", remote_server_user_id)}
]
- for _ in range(0, 8):
+ for _ in range(8):
event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 88a16193a3..638787b029 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -21,11 +21,12 @@ from signedjson.key import generate_signing_key
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, Membership, PresenceState
-from synapse.api.presence import UserPresenceState
+from synapse.api.presence import UserDevicePresenceState, UserPresenceState
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events.builder import EventBuilder
from synapse.federation.sender import FederationSender
from synapse.handlers.presence import (
+ BUSY_ONLINE_TIMEOUT,
EXTERNAL_PROCESS_EXPIRY,
FEDERATION_PING_INTERVAL,
FEDERATION_TIMEOUT,
@@ -352,6 +353,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
def test_idle_timer(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -362,8 +364,21 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_user_sync_ts=now,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNotNone(new_state)
assert new_state is not None
@@ -376,6 +391,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
presence state into unavailable.
"""
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -386,8 +402,21 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_user_sync_ts=now,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNotNone(new_state)
assert new_state is not None
@@ -396,6 +425,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
def test_sync_timeout(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -406,8 +436,21 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNotNone(new_state)
assert new_state is not None
@@ -416,6 +459,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
def test_sync_online(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -426,9 +470,20 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
new_state = handle_timeout(
- state, is_mine=True, syncing_user_ids={user_id}, now=now
+ state,
+ is_mine=True,
+ syncing_device_ids={(user_id, device_id)},
+ user_devices={device_id: device_state},
+ now=now,
)
self.assertIsNotNone(new_state)
@@ -438,6 +493,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
def test_federation_ping(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -449,14 +505,28 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNotNone(new_state)
self.assertEqual(state, new_state)
def test_no_timeout(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
now = 5000000
state = UserPresenceState.default(user_id)
@@ -466,8 +536,21 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_user_sync_ts=now,
last_federation_update_ts=now,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNone(new_state)
@@ -485,8 +568,9 @@ class PresenceTimeoutTestCase(unittest.TestCase):
status_msg=status_msg,
)
+ # Note that this is a remote user so we do not have their device information.
new_state = handle_timeout(
- state, is_mine=False, syncing_user_ids=set(), now=now
+ state, is_mine=False, syncing_device_ids=set(), user_devices={}, now=now
)
self.assertIsNotNone(new_state)
@@ -496,6 +580,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
def test_last_active(self) -> None:
user_id = "@foo:bar"
+ device_id = "dev-1"
status_msg = "I'm here!"
now = 5000000
@@ -507,8 +592,21 @@ class PresenceTimeoutTestCase(unittest.TestCase):
last_federation_update_ts=now,
status_msg=status_msg,
)
+ device_state = UserDevicePresenceState(
+ user_id=user_id,
+ device_id=device_id,
+ state=state.state,
+ last_active_ts=state.last_active_ts,
+ last_sync_ts=state.last_user_sync_ts,
+ )
- new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
+ new_state = handle_timeout(
+ state,
+ is_mine=True,
+ syncing_device_ids=set(),
+ user_devices={device_id: device_state},
+ now=now,
+ )
self.assertIsNotNone(new_state)
self.assertEqual(state, new_state)
@@ -579,7 +677,7 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase):
[
(PresenceState.BUSY, PresenceState.BUSY),
(PresenceState.ONLINE, PresenceState.ONLINE),
- (PresenceState.UNAVAILABLE, PresenceState.UNAVAILABLE),
+ (PresenceState.UNAVAILABLE, PresenceState.ONLINE),
# Offline syncs don't update the state.
(PresenceState.OFFLINE, PresenceState.ONLINE),
]
@@ -800,6 +898,486 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase):
# we should now be online
self.assertEqual(state.state, PresenceState.ONLINE)
+ @parameterized.expand(
+ # A list of tuples of 4 strings:
+ #
+ # * The presence state of device 1.
+ # * The presence state of device 2.
+ # * The expected user presence state after both devices have synced.
+ # * The expected user presence state after device 1 has idled.
+ # * The expected user presence state after device 2 has idled.
+ # * True to use workers, False a monolith.
+ [
+ (*cases, workers)
+ for workers in (False, True)
+ for cases in [
+ # If both devices have the same state, online should eventually idle.
+ # Otherwise, the state doesn't change.
+ (
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ ),
+ # If the second device has a "lower" state it should fallback to it,
+ # except for "busy" which overrides.
+ (
+ PresenceState.BUSY,
+ PresenceState.ONLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.BUSY,
+ PresenceState.UNAVAILABLE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.BUSY,
+ PresenceState.OFFLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.OFFLINE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.OFFLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ # If the second device has a "higher" state it should override.
+ (
+ PresenceState.ONLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ ]
+ ],
+ name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}",
+ )
+ @unittest.override_config({"experimental_features": {"msc3026_enabled": True}})
+ def test_set_presence_from_syncing_multi_device(
+ self,
+ dev_1_state: str,
+ dev_2_state: str,
+ expected_state_1: str,
+ expected_state_2: str,
+ expected_state_3: str,
+ test_with_workers: bool,
+ ) -> None:
+ """
+ Test the behaviour of multiple devices syncing at the same time.
+
+ Roughly the user's presence state should be set to the "highest" priority
+ of all the devices. When a device then goes offline its state should be
+ discarded and the next highest should win.
+
+ Note that these tests use the idle timer (and don't close the syncs), it
+ is unlikely that a *single* sync would last this long, but is close enough
+ to continually syncing with that current state.
+ """
+ user_id = f"@test:{self.hs.config.server.server_name}"
+
+ # By default, we call /sync against the main process.
+ worker_presence_handler = self.presence_handler
+ if test_with_workers:
+ # Create a worker and use it to handle /sync traffic instead.
+ # This is used to test that presence changes get replicated from workers
+ # to the main process correctly.
+ worker_to_sync_against = self.make_worker_hs(
+ "synapse.app.generic_worker", {"worker_name": "synchrotron"}
+ )
+ worker_presence_handler = worker_to_sync_against.get_presence_handler()
+
+ # 1. Sync with the first device.
+ self.get_success(
+ worker_presence_handler.user_syncing(
+ user_id,
+ "dev-1",
+ affect_presence=dev_1_state != PresenceState.OFFLINE,
+ presence_state=dev_1_state,
+ ),
+ by=0.01,
+ )
+
+ # 2. Wait half the idle timer.
+ self.reactor.advance(IDLE_TIMER / 1000 / 2)
+ self.reactor.pump([0.1])
+
+ # 3. Sync with the second device.
+ self.get_success(
+ worker_presence_handler.user_syncing(
+ user_id,
+ "dev-2",
+ affect_presence=dev_2_state != PresenceState.OFFLINE,
+ presence_state=dev_2_state,
+ ),
+ by=0.01,
+ )
+
+ # 4. Assert the expected presence state.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_1)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_1)
+
+ # When testing with workers, make another random sync (with any *different*
+ # user) to keep the process information from expiring.
+ #
+ # This is due to EXTERNAL_PROCESS_EXPIRY being equivalent to IDLE_TIMER.
+ if test_with_workers:
+ with self.get_success(
+ worker_presence_handler.user_syncing(
+ f"@other-user:{self.hs.config.server.server_name}",
+ "dev-3",
+ affect_presence=True,
+ presence_state=PresenceState.ONLINE,
+ ),
+ by=0.01,
+ ):
+ pass
+
+ # 5. Advance such that the first device should be discarded (the idle timer),
+ # then pump so _handle_timeouts function to called.
+ self.reactor.advance(IDLE_TIMER / 1000 / 2)
+ self.reactor.pump([0.01])
+
+ # 6. Assert the expected presence state.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_2)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_2)
+
+ # 7. Advance such that the second device should be discarded (half the idle timer),
+ # then pump so _handle_timeouts function to called.
+ self.reactor.advance(IDLE_TIMER / 1000 / 2)
+ self.reactor.pump([0.1])
+
+ # 8. The devices are still "syncing" (the sync context managers were never
+ # closed), so might idle.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_3)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_3)
+
+ @parameterized.expand(
+ # A list of tuples of 4 strings:
+ #
+ # * The presence state of device 1.
+ # * The presence state of device 2.
+ # * The expected user presence state after both devices have synced.
+ # * The expected user presence state after device 1 has stopped syncing.
+ # * True to use workers, False a monolith.
+ [
+ (*cases, workers)
+ for workers in (False, True)
+ for cases in [
+ # If both devices have the same state, nothing exciting should happen.
+ (
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ PresenceState.OFFLINE,
+ ),
+ # If the second device has a "lower" state it should fallback to it,
+ # except for "busy" which overrides.
+ (
+ PresenceState.BUSY,
+ PresenceState.ONLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.BUSY,
+ PresenceState.UNAVAILABLE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.BUSY,
+ PresenceState.OFFLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.ONLINE,
+ PresenceState.UNAVAILABLE,
+ ),
+ (
+ PresenceState.ONLINE,
+ PresenceState.OFFLINE,
+ PresenceState.ONLINE,
+ PresenceState.OFFLINE,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.OFFLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.OFFLINE,
+ ),
+ # If the second device has a "higher" state it should override.
+ (
+ PresenceState.ONLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ PresenceState.BUSY,
+ ),
+ (
+ PresenceState.UNAVAILABLE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ PresenceState.ONLINE,
+ ),
+ (
+ PresenceState.OFFLINE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ PresenceState.UNAVAILABLE,
+ ),
+ ]
+ ],
+ name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}",
+ )
+ @unittest.override_config({"experimental_features": {"msc3026_enabled": True}})
+ def test_set_presence_from_non_syncing_multi_device(
+ self,
+ dev_1_state: str,
+ dev_2_state: str,
+ expected_state_1: str,
+ expected_state_2: str,
+ test_with_workers: bool,
+ ) -> None:
+ """
+ Test the behaviour of multiple devices syncing at the same time.
+
+ Roughly the user's presence state should be set to the "highest" priority
+ of all the devices. When a device then goes offline its state should be
+ discarded and the next highest should win.
+
+ Note that these tests use the idle timer (and don't close the syncs), it
+ is unlikely that a *single* sync would last this long, but is close enough
+ to continually syncing with that current state.
+ """
+ user_id = f"@test:{self.hs.config.server.server_name}"
+
+ # By default, we call /sync against the main process.
+ worker_presence_handler = self.presence_handler
+ if test_with_workers:
+ # Create a worker and use it to handle /sync traffic instead.
+ # This is used to test that presence changes get replicated from workers
+ # to the main process correctly.
+ worker_to_sync_against = self.make_worker_hs(
+ "synapse.app.generic_worker", {"worker_name": "synchrotron"}
+ )
+ worker_presence_handler = worker_to_sync_against.get_presence_handler()
+
+ # 1. Sync with the first device.
+ sync_1 = self.get_success(
+ worker_presence_handler.user_syncing(
+ user_id,
+ "dev-1",
+ affect_presence=dev_1_state != PresenceState.OFFLINE,
+ presence_state=dev_1_state,
+ ),
+ by=0.1,
+ )
+
+ # 2. Sync with the second device.
+ sync_2 = self.get_success(
+ worker_presence_handler.user_syncing(
+ user_id,
+ "dev-2",
+ affect_presence=dev_2_state != PresenceState.OFFLINE,
+ presence_state=dev_2_state,
+ ),
+ by=0.1,
+ )
+
+ # 3. Assert the expected presence state.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_1)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_1)
+
+ # 4. Disconnect the first device.
+ with sync_1:
+ pass
+
+ # 5. Advance such that the first device should be discarded (the sync timeout),
+ # then pump so _handle_timeouts function to called.
+ self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000)
+ self.reactor.pump([5])
+
+ # 6. Assert the expected presence state.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_2)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, expected_state_2)
+
+ # 7. Disconnect the second device.
+ with sync_2:
+ pass
+
+ # 8. Advance such that the second device should be discarded (the sync timeout),
+ # then pump so _handle_timeouts function to called.
+ if dev_1_state == PresenceState.BUSY or dev_2_state == PresenceState.BUSY:
+ timeout = BUSY_ONLINE_TIMEOUT
+ else:
+ timeout = SYNC_ONLINE_TIMEOUT
+ self.reactor.advance(timeout / 1000)
+ self.reactor.pump([5])
+
+ # 9. There are no more devices, should be offline.
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, PresenceState.OFFLINE)
+ if test_with_workers:
+ state = self.get_success(
+ worker_presence_handler.get_state(UserID.from_string(user_id))
+ )
+ self.assertEqual(state.state, PresenceState.OFFLINE)
+
def test_set_presence_from_syncing_keeps_status(self) -> None:
"""Test that presence set by syncing retains status message"""
status_msg = "I'm here!"
|