diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index c5ac169644..901e2310b7 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -45,7 +45,6 @@ from synapse.types import (
JsonDict,
StreamKeyType,
StreamToken,
- UserID,
get_domain_from_id,
get_verify_key_from_cross_signing_key,
)
@@ -324,8 +323,6 @@ class DeviceHandler(DeviceWorkerHandler):
self.device_list_updater.incoming_device_list_update,
)
- hs.get_distributor().observe("user_left_room", self.user_left_room)
-
# Whether `_handle_new_device_update_async` is currently processing.
self._handle_new_device_update_is_processing = False
@@ -569,14 +566,6 @@ class DeviceHandler(DeviceWorkerHandler):
StreamKeyType.DEVICE_LIST, position, users=[from_user_id]
)
- async def user_left_room(self, user: UserID, room_id: str) -> None:
- user_id = user.to_string()
- room_ids = await self.store.get_rooms_for_user(user_id)
- if not room_ids:
- # We no longer share rooms with this user, so we'll no longer
- # receive device updates. Mark this in DB.
- await self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
-
async def store_dehydrated_device(
self,
user_id: str,
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index ec81639c78..8eed63ccf3 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -175,6 +175,32 @@ class E2eKeysHandler:
user_ids_not_in_cache,
remote_results,
) = await self.store.get_user_devices_from_cache(query_list)
+
+ # Check that the homeserver still shares a room with all cached users.
+ # Note that this check may be slightly racy when a remote user leaves a
+ # room after we have fetched their cached device list. In the worst case
+ # we will do extra federation queries for devices that we had cached.
+ cached_users = set(remote_results.keys())
+ valid_cached_users = (
+ await self.store.get_users_server_still_shares_room_with(
+ remote_results.keys()
+ )
+ )
+ invalid_cached_users = cached_users - valid_cached_users
+ if invalid_cached_users:
+ # Fix up results. If we get here, there is either a bug in device
+ # list tracking, or we hit the race mentioned above.
+ user_ids_not_in_cache.update(invalid_cached_users)
+ for invalid_user_id in invalid_cached_users:
+ remote_results.pop(invalid_user_id)
+ # This log message may be removed if it turns out it's almost
+ # entirely triggered by races.
+ logger.error(
+ "Devices for %s were cached, but the server no longer shares "
+ "any rooms with them. The cached device lists are stale.",
+ invalid_cached_users,
+ )
+
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.items():
|