diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f2989cc4a2..5bf8e86387 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -100,6 +100,7 @@ class AdminHandler:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
+ user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
return user_info_dict
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index f5f0e0e7a7..8b9ef25d29 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@ from typing import (
import attr
import bcrypt
import unpaddedbase64
+from prometheus_client import Counter
from twisted.internet.defer import CancelledError
from twisted.web.server import Request
@@ -48,6 +49,7 @@ from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
LoginError,
+ NotFoundError,
StoreError,
SynapseError,
UserDeactivatedError,
@@ -63,10 +65,14 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.registration import (
+ LoginTokenExpired,
+ LoginTokenLookupResult,
+ LoginTokenReused,
+)
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
-from synapse.util.macaroons import LoginTokenAttributes
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
@@ -80,6 +86,12 @@ logger = logging.getLogger(__name__)
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+invalid_login_token_counter = Counter(
+ "synapse_user_login_invalid_login_tokens",
+ "Counts the number of rejected m.login.token on /login",
+ ["reason"],
+)
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -883,6 +895,25 @@ class AuthHandler:
return True
+ async def create_login_token_for_user_id(
+ self,
+ user_id: str,
+ duration_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+ ) -> str:
+ login_token = self.generate_login_token()
+ now = self._clock.time_msec()
+ expiry_ts = now + duration_ms
+ await self.store.add_login_token_to_user(
+ user_id=user_id,
+ token=login_token,
+ expiry_ts=expiry_ts,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+ return login_token
+
async def create_refresh_token_for_user_id(
self,
user_id: str,
@@ -1401,6 +1432,18 @@ class AuthHandler:
return None
return user_id
+ def generate_login_token(self) -> str:
+ """Generates an opaque string, for use as an short-term login token"""
+
+ # we use the following format for access tokens:
+ # syl_<random string>_<base62 crc check>
+
+ random_string = stringutils.random_string(20)
+ base = f"syl_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
def generate_access_token(self, for_user: UserID) -> str:
"""Generates an opaque string, for use as an access token"""
@@ -1427,16 +1470,17 @@ class AuthHandler:
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
- async def validate_short_term_login_token(
- self, login_token: str
- ) -> LoginTokenAttributes:
+ async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
try:
- res = self.macaroon_gen.verify_short_term_login_token(login_token)
- except Exception:
- raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
+ return await self.store.consume_login_token(login_token)
+ except LoginTokenExpired:
+ invalid_login_token_counter.labels("expired").inc()
+ except LoginTokenReused:
+ invalid_login_token_counter.labels("reused").inc()
+ except NotFoundError:
+ invalid_login_token_counter.labels("not found").inc()
- await self.auth_blocking.check_auth_blocking(res.user_id)
- return res
+ raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
async def delete_access_token(self, access_token: str) -> None:
"""Invalidate a single access token
@@ -1711,7 +1755,7 @@ class AuthHandler:
)
# Create a login token
- login_token = self.macaroon_gen.generate_short_term_login_token(
+ login_token = await self.create_login_token_for_user_id(
registered_user_id,
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 275a37a575..5fc3b8bc8c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -442,6 +442,15 @@ class FederationHandler:
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
+ dom,
+ )
+ continue
except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
@@ -477,15 +486,9 @@ class FederationHandler:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
- except FederationDeniedError as e:
- logger.info(e)
- continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
@@ -1017,7 +1020,9 @@ class FederationHandler:
context = EventContext.for_outlier(self._storage_controllers)
- await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context)
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
+ )
try:
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 06e41b5cc0..9ca5df7c78 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -58,7 +58,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.federation.federation_client import InvalidResponseError
+from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import (
SynapseTags,
@@ -1517,8 +1517,8 @@ class FederationEventHandler:
)
async def backfill_event_id(
- self, destination: str, room_id: str, event_id: str
- ) -> EventBase:
+ self, destinations: List[str], room_id: str, event_id: str
+ ) -> PulledPduInfo:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
@@ -1530,24 +1530,21 @@ class FederationEventHandler:
Raises:
FederationError if we are unable to find the event from the destination
"""
- logger.info(
- "backfill_event_id: event_id=%s from destination=%s", event_id, destination
- )
+ logger.info("backfill_event_id: event_id=%s", event_id)
room_version = await self._store.get_room_version(room_id)
- event_from_response = await self._federation_client.get_pdu(
- [destination],
+ pulled_pdu_info = await self._federation_client.get_pdu(
+ destinations,
event_id,
room_version,
)
- if not event_from_response:
+ if not pulled_pdu_info:
raise FederationError(
"ERROR",
404,
- "Unable to find event_id=%s from destination=%s to backfill."
- % (event_id, destination),
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
affected=event_id,
)
@@ -1555,13 +1552,13 @@ class FederationEventHandler:
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
- destination,
- [event_from_response],
+ pulled_pdu_info.pull_origin,
+ [pulled_pdu_info.pdu],
# Prevent notifications going to clients
backfilled=True,
)
- return event_from_response
+ return pulled_pdu_info
@trace
@tag_args
@@ -1584,19 +1581,19 @@ class FederationEventHandler:
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
try:
- event = await self._federation_client.get_pdu(
+ pulled_pdu_info = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
- if event is None:
+ if pulled_pdu_info is None:
logger.warning(
"Server %s didn't return event %s",
destination,
event_id,
)
return
- events.append(event)
+ events.append(pulled_pdu_info.pdu)
except Exception as e:
logger.warning(
@@ -2171,8 +2168,8 @@ class FederationEventHandler:
min_depth,
)
else:
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
try:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 15b828dd74..468900a07f 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1433,17 +1433,9 @@ class EventCreationHandler:
a room that has been un-partial stated.
"""
- for event, context in events_and_context:
- # Skip push notification actions for historical messages
- # because we don't want to notify people about old history back in time.
- # The historical messages also do not have the proper `context.current_state_ids`
- # and `state_groups` because they have `prev_events` that aren't persisted yet
- # (historical messages persisted in reverse-chronological order).
- if not event.internal_metadata.is_historical():
- with opentracing.start_active_span("calculate_push_actions"):
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
- )
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ events_and_context
+ )
try:
# If we're a worker we need to hit out to the master.
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index d7a8226900..9759daf043 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -275,6 +275,7 @@ class OidcProvider:
provider: OidcProviderConfig,
):
self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
self._macaroon_generaton = macaroon_generator
@@ -673,6 +674,13 @@ class OidcProvider:
Returns:
The decoded claims in the ID token.
"""
+ id_token = token.get("id_token")
+ logger.debug("Attempting to decode JWT id_token %r", id_token)
+
+ # That has been theoritically been checked by the caller, so even though
+ # assertion are not enabled in production, it is mainly here to appease mypy
+ assert id_token is not None
+
metadata = await self.load_metadata()
claims_params = {
"nonce": nonce,
@@ -688,9 +696,6 @@ class OidcProvider:
claim_options = {"iss": {"values": [metadata["issuer"]]}}
- id_token = token["id_token"]
- logger.debug("Attempting to decode JWT id_token %r", id_token)
-
# Try to decode the keys in cache first, then retry by forcing the keys
# to be reloaded
jwk_set = await self.load_jwks()
@@ -715,7 +720,9 @@ class OidcProvider:
logger.debug("Decoded id_token JWT %r; validating", claims)
- claims.validate(leeway=120) # allows 2 min of clock skew
+ claims.validate(
+ now=self._clock.time(), leeway=120
+ ) # allows 2 min of clock skew
return claims
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index d8ff5289b5..4bf9a047a3 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -307,7 +307,11 @@ class ProfileHandler:
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
- server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+ host, port, media_id = parse_and_validate_mxc_uri(mxc)
+ if port is not None:
+ server_name = host + ":" + str(port)
+ else:
+ server_name = host
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index d74b675adc..f10cfca073 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -49,7 +49,6 @@ from synapse.api.constants import (
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
@@ -60,7 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
-from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
@@ -1058,9 +1056,6 @@ class RoomCreationHandler:
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
depth = 1
- # the last event sent/persisted to the db
- last_sent_event_id: Optional[str] = None
-
# the most recently created event
prev_event: List[str] = []
# a map of event types, state keys -> event_ids. We collect these mappings this as events are
@@ -1105,26 +1100,6 @@ class RoomCreationHandler:
return new_event, new_context
- async def send(
- event: EventBase,
- context: synapse.events.snapshot.EventContext,
- creator: Requester,
- ) -> int:
- nonlocal last_sent_event_id
-
- ev = await self.event_creation_handler.handle_new_client_event(
- requester=creator,
- events_and_context=[(event, context)],
- ratelimit=False,
- ignore_shadow_ban=True,
- )
-
- last_sent_event_id = ev.event_id
-
- # we know it was persisted, so must have a stream ordering
- assert ev.internal_metadata.stream_ordering
- return ev.internal_metadata.stream_ordering
-
try:
config = self._presets_dict[preset_config]
except KeyError:
@@ -1138,10 +1113,14 @@ class RoomCreationHandler:
)
logger.debug("Sending %s in new room", EventTypes.Member)
- await send(creation_event, creation_context, creator)
+ ev = await self.event_creation_handler.handle_new_client_event(
+ requester=creator,
+ events_and_context=[(creation_event, creation_context)],
+ ratelimit=False,
+ ignore_shadow_ban=True,
+ )
+ last_sent_event_id = ev.event_id
- # Room create event must exist at this point
- assert last_sent_event_id is not None
member_event_id, _ = await self.room_member_handler.update_membership(
creator,
creator.user,
@@ -1160,6 +1139,7 @@ class RoomCreationHandler:
depth += 1
state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
+ events_to_send = []
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
@@ -1168,7 +1148,7 @@ class RoomCreationHandler:
EventTypes.PowerLevels, pl_content, False
)
current_state_group = power_context._state_group
- await send(power_event, power_context, creator)
+ events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
@@ -1217,9 +1197,8 @@ class RoomCreationHandler:
False,
)
current_state_group = pl_context._state_group
- await send(pl_event, pl_context, creator)
+ events_to_send.append((pl_event, pl_context))
- events_to_send = []
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
@@ -1497,7 +1476,12 @@ class TimestampLookupHandler:
Raises:
SynapseError if unable to find any event locally in the given direction
"""
-
+ logger.debug(
+ "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
+ room_id,
+ timestamp,
+ direction,
+ )
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
@@ -1549,85 +1533,54 @@ class TimestampLookupHandler:
)
)
- # Loop through each homeserver candidate until we get a succesful response
- for domain in likely_domains:
- # We don't want to ask our own server for information we don't have
- if domain == self.server_name:
- continue
+ remote_response = await self.federation_client.timestamp_to_event(
+ destinations=likely_domains,
+ room_id=room_id,
+ timestamp=timestamp,
+ direction=direction,
+ )
+ if remote_response is not None:
+ logger.debug(
+ "get_event_for_timestamp: remote_response=%s",
+ remote_response,
+ )
- try:
- remote_response = await self.federation_client.timestamp_to_event(
- domain, room_id, timestamp, direction
- )
- logger.debug(
- "get_event_for_timestamp: response from domain(%s)=%s",
- domain,
- remote_response,
- )
+ remote_event_id = remote_response.event_id
+ remote_origin_server_ts = remote_response.origin_server_ts
- remote_event_id = remote_response.event_id
- remote_origin_server_ts = remote_response.origin_server_ts
-
- # Backfill this event so we can get a pagination token for
- # it with `/context` and paginate `/messages` from this
- # point.
- #
- # TODO: The requested timestamp may lie in a part of the
- # event graph that the remote server *also* didn't have,
- # in which case they will have returned another event
- # which may be nowhere near the requested timestamp. In
- # the future, we may need to reconcile that gap and ask
- # other homeservers, and/or extend `/timestamp_to_event`
- # to return events on *both* sides of the timestamp to
- # help reconcile the gap faster.
- remote_event = (
- await self.federation_event_handler.backfill_event_id(
- domain, room_id, remote_event_id
- )
- )
+ # Backfill this event so we can get a pagination token for
+ # it with `/context` and paginate `/messages` from this
+ # point.
+ pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
+ likely_domains, room_id, remote_event_id
+ )
+ remote_event = pulled_pdu_info.pdu
- # XXX: When we see that the remote server is not trustworthy,
- # maybe we should not ask them first in the future.
- if remote_origin_server_ts != remote_event.origin_server_ts:
- logger.info(
- "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
- domain,
- remote_event_id,
- remote_origin_server_ts,
- remote_event.origin_server_ts,
- )
-
- # Only return the remote event if it's closer than the local event
- if not local_event or (
- abs(remote_event.origin_server_ts - timestamp)
- < abs(local_event.origin_server_ts - timestamp)
- ):
- logger.info(
- "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
- remote_event_id,
- remote_event.origin_server_ts,
- timestamp,
- local_event.event_id if local_event else None,
- local_event.origin_server_ts if local_event else None,
- )
- return remote_event_id, remote_origin_server_ts
- except (HttpResponseException, InvalidResponseError) as ex:
- # Let's not put a high priority on some other homeserver
- # failing to respond or giving a random response
- logger.debug(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
- domain,
- type(ex).__name__,
- ex,
- ex.args,
+ # XXX: When we see that the remote server is not trustworthy,
+ # maybe we should not ask them first in the future.
+ if remote_origin_server_ts != remote_event.origin_server_ts:
+ logger.info(
+ "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
+ pulled_pdu_info.pull_origin,
+ remote_event_id,
+ remote_origin_server_ts,
+ remote_event.origin_server_ts,
)
- except Exception:
- # But we do want to see some exceptions in our code
- logger.warning(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
- domain,
- exc_info=True,
+
+ # Only return the remote event if it's closer than the local event
+ if not local_event or (
+ abs(remote_event.origin_server_ts - timestamp)
+ < abs(local_event.origin_server_ts - timestamp)
+ ):
+ logger.info(
+ "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
+ remote_event_id,
+ remote_event.origin_server_ts,
+ timestamp,
+ local_event.event_id if local_event else None,
+ local_event.origin_server_ts if local_event else None,
)
+ return remote_event_id, remote_origin_server_ts
# To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
|