diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 6425f851ea..bcb1cba362 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -395,8 +395,8 @@ class DeferredCache(Generic[KT, VT]):
# _pending_deferred_cache.pop should either return a CacheEntry, or, in the
# case of a TreeCache, a dict of keys to cache entries. Either way calling
# iterate_tree_cache_entry on it will do the right thing.
- for entry in iterate_tree_cache_entry(entry):
- for cb in entry.get_invalidation_callbacks(key):
+ for iter_entry in iterate_tree_cache_entry(entry):
+ for cb in iter_entry.get_invalidation_callbacks(key):
cb()
def invalidate_all(self) -> None:
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 0391966462..75428d19ba 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import enum
import functools
import inspect
import logging
@@ -146,109 +145,6 @@ class _CacheDescriptorBase:
)
-class _LruCachedFunction(Generic[F]):
- cache: LruCache[CacheKey, Any]
- __call__: F
-
-
-def lru_cache(
- *, max_entries: int = 1000, cache_context: bool = False
-) -> Callable[[F], _LruCachedFunction[F]]:
- """A method decorator that applies a memoizing cache around the function.
-
- This is more-or-less a drop-in equivalent to functools.lru_cache, although note
- that the signature is slightly different.
-
- The main differences with functools.lru_cache are:
- (a) the size of the cache can be controlled via the cache_factor mechanism
- (b) the wrapped function can request a "cache_context" which provides a
- callback mechanism to indicate that the result is no longer valid
- (c) prometheus metrics are exposed automatically.
-
- The function should take zero or more arguments, which are used as the key for the
- cache. Single-argument functions use that argument as the cache key; otherwise the
- arguments are built into a tuple.
-
- Cached functions can be "chained" (i.e. a cached function can call other cached
- functions and get appropriately invalidated when they called caches are
- invalidated) by adding a special "cache_context" argument to the function
- and passing that as a kwarg to all caches called. For example:
-
- @lru_cache(cache_context=True)
- def foo(self, key, cache_context):
- r1 = self.bar1(key, on_invalidate=cache_context.invalidate)
- r2 = self.bar2(key, on_invalidate=cache_context.invalidate)
- return r1 + r2
-
- The wrapped function also has a 'cache' property which offers direct access to the
- underlying LruCache.
- """
-
- def func(orig: F) -> _LruCachedFunction[F]:
- desc = LruCacheDescriptor(
- orig,
- max_entries=max_entries,
- cache_context=cache_context,
- )
- return cast(_LruCachedFunction[F], desc)
-
- return func
-
-
-class LruCacheDescriptor(_CacheDescriptorBase):
- """Helper for @lru_cache"""
-
- class _Sentinel(enum.Enum):
- sentinel = object()
-
- def __init__(
- self,
- orig: Callable[..., Any],
- max_entries: int = 1000,
- cache_context: bool = False,
- ):
- super().__init__(
- orig, num_args=None, uncached_args=None, cache_context=cache_context
- )
- self.max_entries = max_entries
-
- def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
- cache: LruCache[CacheKey, Any] = LruCache(
- cache_name=self.name,
- max_size=self.max_entries,
- )
-
- get_cache_key = self.cache_key_builder
- sentinel = LruCacheDescriptor._Sentinel.sentinel
-
- @functools.wraps(self.orig)
- def _wrapped(*args: Any, **kwargs: Any) -> Any:
- invalidate_callback = kwargs.pop("on_invalidate", None)
- callbacks = (invalidate_callback,) if invalidate_callback else ()
-
- cache_key = get_cache_key(args, kwargs)
-
- ret = cache.get(cache_key, default=sentinel, callbacks=callbacks)
- if ret != sentinel:
- return ret
-
- # Add our own `cache_context` to argument list if the wrapped function
- # has asked for one
- if self.add_cache_context:
- kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)
-
- ret2 = self.orig(obj, *args, **kwargs)
- cache.set(cache_key, ret2, callbacks=callbacks)
-
- return ret2
-
- wrapped = cast(CachedFunction, _wrapped)
- wrapped.cache = cache
- obj.__dict__[self.name] = wrapped
-
- return wrapped
-
-
class DeferredCacheDescriptor(_CacheDescriptorBase):
"""A method decorator that applies a memoizing cache around the function.
@@ -432,7 +328,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
num_args = cached_method.num_args
if num_args != self.num_args:
- raise Exception(
+ raise TypeError(
"Number of args (%s) does not match underlying cache_method_name=%s (%s)."
% (self.num_args, self.cached_method_name, num_args)
)
diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py
index df77edcce2..5df03d3ddc 100644
--- a/synapse/util/macaroons.py
+++ b/synapse/util/macaroons.py
@@ -24,7 +24,7 @@ from typing_extensions import Literal
from synapse.util import Clock, stringutils
-MacaroonType = Literal["access", "delete_pusher", "session", "login"]
+MacaroonType = Literal["access", "delete_pusher", "session"]
def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
@@ -111,19 +111,6 @@ class OidcSessionData:
"""The session ID of the ongoing UI Auth ("" if this is a login)"""
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class LoginTokenAttributes:
- """Data we store in a short-term login token"""
-
- user_id: str
-
- auth_provider_id: str
- """The SSO Identity Provider that the user authenticated with, to get this token."""
-
- auth_provider_session_id: Optional[str]
- """The session ID advertised by the SSO Identity Provider."""
-
-
class MacaroonGenerator:
def __init__(self, clock: Clock, location: str, secret_key: bytes):
self._clock = clock
@@ -165,35 +152,6 @@ class MacaroonGenerator:
macaroon.add_first_party_caveat(f"pushkey = {pushkey}")
return macaroon.serialize()
- def generate_short_term_login_token(
- self,
- user_id: str,
- auth_provider_id: str,
- auth_provider_session_id: Optional[str] = None,
- duration_in_ms: int = (2 * 60 * 1000),
- ) -> str:
- """Generate a short-term login token used during SSO logins
-
- Args:
- user_id: The user for which the token is valid.
- auth_provider_id: The SSO IdP the user used.
- auth_provider_session_id: The session ID got during login from the SSO IdP.
-
- Returns:
- A signed token valid for using as a ``m.login.token`` token.
- """
- now = self._clock.time_msec()
- expiry = now + duration_in_ms
- macaroon = self._generate_base_macaroon("login")
- macaroon.add_first_party_caveat(f"user_id = {user_id}")
- macaroon.add_first_party_caveat(f"time < {expiry}")
- macaroon.add_first_party_caveat(f"auth_provider_id = {auth_provider_id}")
- if auth_provider_session_id is not None:
- macaroon.add_first_party_caveat(
- f"auth_provider_session_id = {auth_provider_session_id}"
- )
- return macaroon.serialize()
-
def generate_oidc_session_token(
self,
state: str,
@@ -233,49 +191,6 @@ class MacaroonGenerator:
return macaroon.serialize()
- def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes:
- """Verify a short-term-login macaroon
-
- Checks that the given token is a valid, unexpired short-term-login token
- minted by this server.
-
- Args:
- token: The login token to verify.
-
- Returns:
- A set of attributes carried by this token, including the
- ``user_id`` and informations about the SSO IDP used during that
- login.
-
- Raises:
- MacaroonVerificationFailedException if the verification failed
- """
- macaroon = pymacaroons.Macaroon.deserialize(token)
-
- v = self._base_verifier("login")
- v.satisfy_general(lambda c: c.startswith("user_id = "))
- v.satisfy_general(lambda c: c.startswith("auth_provider_id = "))
- v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = "))
- satisfy_expiry(v, self._clock.time_msec)
- v.verify(macaroon, self._secret_key)
-
- user_id = get_value_from_macaroon(macaroon, "user_id")
- auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id")
-
- auth_provider_session_id: Optional[str] = None
- try:
- auth_provider_session_id = get_value_from_macaroon(
- macaroon, "auth_provider_session_id"
- )
- except MacaroonVerificationFailedException:
- pass
-
- return LoginTokenAttributes(
- user_id=user_id,
- auth_provider_id=auth_provider_id,
- auth_provider_session_id=auth_provider_session_id,
- )
-
def verify_guest_token(self, token: str) -> str:
"""Verify a guest access token macaroon
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index d0a69ff843..dcc037b982 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -51,7 +51,7 @@ class NotRetryingDestination(Exception):
destination: the domain in question
"""
- msg = "Not retrying server %s." % (destination,)
+ msg = f"Not retrying server {destination} because we tried it recently retry_last_ts={retry_last_ts} and we won't check for another retry_interval={retry_interval}ms."
super().__init__(msg)
self.retry_last_ts = retry_last_ts
|