diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 4f028078fa..a9cc322d7e 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -775,9 +775,12 @@ class Auth(object):
)
@defer.inlineCallbacks
- def check_auth_blocking(self):
+ def check_auth_blocking(self, user_id=None):
"""Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag
+
+ Args:
+ user_id(str): If present, checks for presence against existing MAU cohort
"""
if self.hs.config.hs_disabled:
raise AuthError(
@@ -786,6 +789,12 @@ class Auth(object):
admin_email=self.hs.config.admin_email,
)
if self.hs.config.limit_usage_by_mau is True:
+ # If the user is already part of the MAU cohort
+ if user_id:
+ timestamp = yield self.store.user_last_seen_monthly_active(user_id)
+ if timestamp:
+ return
+ # Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value:
raise AuthError(
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6393a9674b..3b21a04a5d 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -191,6 +191,7 @@ class SyncHandler(object):
self.clock = hs.get_clock()
self.response_cache = ResponseCache(hs, "sync")
self.state = hs.get_state_handler()
+ self.auth = hs.get_auth()
# ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
self.lazy_loaded_members_cache = ExpiringCache(
@@ -198,19 +199,27 @@ class SyncHandler(object):
max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
)
+ @defer.inlineCallbacks
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
full_state=False):
"""Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
Returns:
- A Deferred SyncResult.
+ Deferred[SyncResult]
"""
- return self.response_cache.wrap(
+ # If the user is not part of the mau group, then check that limits have
+ # not been exceeded (if not part of the group by this point, almost certain
+ # auth_blocking will occur)
+ user_id = sync_config.user.to_string()
+ yield self.auth.check_auth_blocking(user_id)
+
+ res = yield self.response_cache.wrap(
sync_config.request_key,
self._wait_for_sync_for_user,
sync_config, since_token, timeout, full_state,
)
+ defer.returnValue(res)
@defer.inlineCallbacks
def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index d47dcef3a0..7b36accdb6 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -64,23 +64,27 @@ class MonthlyActiveUsersStore(SQLBaseStore):
Deferred[]
"""
def _reap_users(txn):
+ # Purge stale users
thirty_days_ago = (
int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
)
- # Purge stale users
-
- # questionmarks is a hack to overcome sqlite not supporting
- # tuples in 'WHERE IN %s'
- questionmarks = '?' * len(self.reserved_users)
query_args = [thirty_days_ago]
- query_args.extend(self.reserved_users)
-
- sql = """
- DELETE FROM monthly_active_users
- WHERE timestamp < ?
- AND user_id NOT IN ({})
- """.format(','.join(questionmarks))
+ base_sql = "DELETE FROM monthly_active_users WHERE timestamp < ?"
+
+ # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
+ # when len(reserved_users) == 0. Works fine on sqlite.
+ if len(self.reserved_users) > 0:
+ # questionmarks is a hack to overcome sqlite not supporting
+ # tuples in 'WHERE IN %s'
+ questionmarks = '?' * len(self.reserved_users)
+
+ query_args.extend(self.reserved_users)
+ sql = base_sql + """ AND user_id NOT IN ({})""".format(
+ ','.join(questionmarks)
+ )
+ else:
+ sql = base_sql
txn.execute(sql, query_args)
@@ -93,16 +97,24 @@ class MonthlyActiveUsersStore(SQLBaseStore):
# negative LIMIT values. So there is no way to write it that both can
# support
query_args = [self.hs.config.max_mau_value]
- query_args.extend(self.reserved_users)
- sql = """
+
+ base_sql = """
DELETE FROM monthly_active_users
WHERE user_id NOT IN (
SELECT user_id FROM monthly_active_users
ORDER BY timestamp DESC
LIMIT ?
)
- AND user_id NOT IN ({})
- """.format(','.join(questionmarks))
+ """
+ # Need if/else since 'AND user_id NOT IN ({})' fails on Postgres
+ # when len(reserved_users) == 0. Works fine on sqlite.
+ if len(self.reserved_users) > 0:
+ query_args.extend(self.reserved_users)
+ sql = base_sql + """ AND user_id NOT IN ({})""".format(
+ ','.join(questionmarks)
+ )
+ else:
+ sql = base_sql
txn.execute(sql, query_args)
yield self.runInteraction("reap_monthly_active_users", _reap_users)
@@ -113,7 +125,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
# is racy.
# Have resolved to invalidate the whole cache for now and do
# something about it if and when the perf becomes significant
- self._user_last_seen_monthly_active.invalidate_all()
+ self.user_last_seen_monthly_active.invalidate_all()
self.get_monthly_active_count.invalidate_all()
@cached(num_args=0)
@@ -152,11 +164,11 @@ class MonthlyActiveUsersStore(SQLBaseStore):
lock=False,
)
if is_insert:
- self._user_last_seen_monthly_active.invalidate((user_id,))
+ self.user_last_seen_monthly_active.invalidate((user_id,))
self.get_monthly_active_count.invalidate(())
@cached(num_args=1)
- def _user_last_seen_monthly_active(self, user_id):
+ def user_last_seen_monthly_active(self, user_id):
"""
Checks if a given user is part of the monthly active user group
Arguments:
@@ -173,7 +185,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
},
retcol="timestamp",
allow_none=True,
- desc="_user_last_seen_monthly_active",
+ desc="user_last_seen_monthly_active",
))
@defer.inlineCallbacks
@@ -185,7 +197,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
user_id(str): the user_id to query
"""
if self.hs.config.limit_usage_by_mau:
- last_seen_timestamp = yield self._user_last_seen_monthly_active(user_id)
+ last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id)
now = self.hs.get_clock().time_msec()
# We want to reduce to the total number of db writes, and are happy
|