diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index fbafbbee6b..6e15028b0a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -81,6 +81,8 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -1215,7 +1217,9 @@ class AuthHandler:
await self._failed_login_attempts_ratelimiter.can_do_action(
None, (medium, address)
)
- raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+ raise LoginError(
+ 403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN
+ )
identifier_dict = {"type": "m.id.user", "user": user_id}
@@ -1341,7 +1345,7 @@ class AuthHandler:
# We raise a 403 here, but note that if we're doing user-interactive
# login, it turns all LoginErrors into a 401 anyway.
- raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN)
+ raise LoginError(403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN)
async def check_password_provider_3pid(
self, medium: str, address: str, password: str
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index a0cbeedc30..b79c551703 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -398,35 +398,6 @@ class DeviceHandler(DeviceWorkerHandler):
await self.delete_devices(user_id, user_devices)
@trace
- async def delete_device(self, user_id: str, device_id: str) -> None:
- """Delete the given device
-
- Args:
- user_id: The user to delete the device from.
- device_id: The device to delete.
- """
-
- try:
- await self.store.delete_device(user_id, device_id)
- except errors.StoreError as e:
- if e.code == 404:
- # no match
- set_tag("error", True)
- log_kv(
- {"reason": "User doesn't have device id.", "device_id": device_id}
- )
- else:
- raise
-
- await self._auth_handler.delete_access_tokens_for_user(
- user_id, device_id=device_id
- )
-
- await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
-
- await self.notify_device_update(user_id, [device_id])
-
- @trace
async def delete_all_devices_for_user(
self, user_id: str, except_device_id: Optional[str] = None
) -> None:
@@ -591,7 +562,7 @@ class DeviceHandler(DeviceWorkerHandler):
user_id, device_id, device_data
)
if old_device_id is not None:
- await self.delete_device(user_id, old_device_id)
+ await self.delete_devices(user_id, [old_device_id])
return device_id
async def get_dehydrated_device(
@@ -638,7 +609,7 @@ class DeviceHandler(DeviceWorkerHandler):
await self.store.update_device(user_id, device_id, old_device["display_name"])
# can't call self.delete_device because that will clobber the
# access token so call the storage layer directly
- await self.store.delete_device(user_id, old_device_id)
+ await self.store.delete_devices(user_id, [old_device_id])
await self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=old_device_id
)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 6310f0ef27..1e5694244a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -545,6 +545,7 @@ class FederationHandler:
if ret.partial_state:
# TODO(faster_joins): roll this back if we don't manage to start the
# background resync (eg process_remote_join fails)
+ # https://github.com/matrix-org/synapse/issues/12998
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
max_stream_id = await self._federation_event_handler.process_remote_join(
@@ -1498,14 +1499,17 @@ class FederationHandler:
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
# worker processes kick off a resync in parallel? Perhaps we should just elect
# a single worker to do the resync.
+ # https://github.com/matrix-org/synapse/issues/12994
#
# TODO(faster_joins): what happens if we leave the room during a resync? if we
# really leave, that might mean we have difficulty getting the room state over
# federation.
+ # https://github.com/matrix-org/synapse/issues/12802
#
# TODO(faster_joins): we need some way of prioritising which homeservers in
# `other_destinations` to try first, otherwise we'll spend ages trying dead
# homeservers for large rooms.
+ # https://github.com/matrix-org/synapse/issues/12999
if initial_destination is None and len(other_destinations) == 0:
raise ValueError(
@@ -1535,9 +1539,11 @@ class FederationHandler:
# all the events are updated, so we can update current state and
# clear the lazy-loading flag.
logger.info("Updating current state for %s", room_id)
+ # TODO(faster_joins): support workers
+ # https://github.com/matrix-org/synapse/issues/12994
assert (
self._storage_controllers.persistence is not None
- ), "TODO(faster_joins): support for workers"
+ ), "worker-mode deployments not currently supported here"
await self._storage_controllers.persistence.update_current_state(
room_id
)
@@ -1551,6 +1557,8 @@ class FederationHandler:
)
# TODO(faster_joins) update room stats and user directory?
+ # https://github.com/matrix-org/synapse/issues/12814
+ # https://github.com/matrix-org/synapse/issues/12815
return
# we raced against more events arriving with partial state. Go round
@@ -1558,6 +1566,8 @@ class FederationHandler:
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
# having partial state.
+ # https://github.com/matrix-org/synapse/issues/12988
+ #
continue
events = await self.store.get_events_as_list(
@@ -1580,6 +1590,7 @@ class FederationHandler:
# indefinitely is also not the right thing to do if we can
# reach all homeservers and they all claim they don't have
# the state we want.
+ # https://github.com/matrix-org/synapse/issues/13000
logger.error(
"Failed to get state for %s at %s from %s because %s, "
"giving up!",
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 9488fef297..6c9e6a00b5 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -532,6 +532,7 @@ class FederationEventHandler:
#
# TODO(faster_joins): we probably need to be more intelligent, and
# exclude partial-state prev_events from consideration
+ # https://github.com/matrix-org/synapse/issues/13001
logger.warning(
"%s still has partial state: can't de-partial-state it yet",
event.event_id,
@@ -777,6 +778,7 @@ class FederationEventHandler:
state_ids = await self._resolve_state_at_missing_prevs(origin, event)
# TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
# not return partial state
+ # https://github.com/matrix-org/synapse/issues/13002
await self._process_received_pdu(
origin, event, state_ids=state_ids, backfilled=backfilled
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c8bbcfd8c2..9b17939163 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1102,6 +1102,7 @@ class EventCreationHandler:
#
# TODO(faster_joins): figure out how this works, and make sure that the
# old state is complete.
+ # https://github.com/matrix-org/synapse/issues/13003
metadata = await self.store.get_metadata_for_events(state_event_ids)
state_map_for_event: MutableStateMap[str] = {}
|