summary refs log tree commit diff
path: root/synapse/storage/_base.py
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/storage/_base.py')
-rw-r--r--synapse/storage/_base.py87
1 files changed, 53 insertions, 34 deletions
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py

index 3fe827cd43..29589853c6 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py
@@ -16,6 +16,7 @@ # limitations under the License. import itertools import logging +import random import sys import threading import time @@ -37,6 +38,14 @@ from synapse.util.caches.descriptors import Cache from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.stringutils import exception_to_unicode +# import a function which will return a monotonic time, in seconds +try: + # on python 3, use time.monotonic, since time.clock can go backwards + from time import monotonic as monotonic_time +except ImportError: + # ... but python 2 doesn't have it + from time import clock as monotonic_time + logger = logging.getLogger(__name__) try: @@ -166,22 +175,22 @@ class PerformanceCounters(object): self.current_counters = {} self.previous_counters = {} - def update(self, key, start_time, end_time=None): - if end_time is None: - end_time = time.time() - duration = end_time - start_time + def update(self, key, duration_secs): count, cum_time = self.current_counters.get(key, (0, 0)) count += 1 - cum_time += duration + cum_time += duration_secs self.current_counters[key] = (count, cum_time) - return end_time - def interval(self, interval_duration, limit=3): + def interval(self, interval_duration_secs, limit=3): counters = [] for name, (count, cum_time) in iteritems(self.current_counters): prev_count, prev_time = self.previous_counters.get(name, (0, 0)) counters.append( - ((cum_time - prev_time) / interval_duration, count - prev_count, name) + ( + (cum_time - prev_time) / interval_duration_secs, + count - prev_count, + name, + ) ) self.previous_counters = dict(self.current_counters) @@ -212,7 +221,6 @@ class SQLBaseStore(object): # is running in mainline, and we have some nice monitoring frontends # to watch it self._txn_perf_counters = PerformanceCounters() - self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache( "*getEvent*", keylen=3, max_entries=hs.config.event_cache_size @@ -247,6 +255,8 @@ class SQLBaseStore(object): self._check_safe_to_upsert, ) + self.rand = random.SystemRandom() + if self._account_validity.enabled: self._clock.call_later( 0.0, @@ -296,33 +306,46 @@ class SQLBaseStore(object): def select_users_with_no_expiration_date_txn(txn): """Retrieves the list of registered users with no expiration date from the - database. + database, filtering out deactivated users. """ sql = ( "SELECT users.name FROM users" " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" - " WHERE account_validity.user_id is NULL;" + " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" ) txn.execute(sql, []) res = self.cursor_to_dict(txn) if res: for user in res: - self.set_expiration_date_for_user_txn(txn, user["name"]) + self.set_expiration_date_for_user_txn( + txn, user["name"], use_delta=True + ) yield self.runInteraction( "get_users_with_no_expiration_date", select_users_with_no_expiration_date_txn, ) - def set_expiration_date_for_user_txn(self, txn, user_id): + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): """Sets an expiration date to the account with the given user ID. Args: user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period - d ; now + period] range, d being a + delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period + + if use_delta: + expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, + expiration_ts, + ) + self._simple_insert_txn( txn, "account_validity", @@ -334,32 +357,24 @@ class SQLBaseStore(object): ) def start_profiling(self): - self._previous_loop_ts = self._clock.time_msec() + self._previous_loop_ts = monotonic_time() def loop(): curr = self._current_txn_total_time prev = self._previous_txn_total_time self._previous_txn_total_time = curr - time_now = self._clock.time_msec() + time_now = monotonic_time() time_then = self._previous_loop_ts self._previous_loop_ts = time_now - ratio = (curr - prev) / (time_now - time_then) - - top_three_counters = self._txn_perf_counters.interval( - time_now - time_then, limit=3 - ) + duration = time_now - time_then + ratio = (curr - prev) / duration - top_3_event_counters = self._get_event_counters.interval( - time_now - time_then, limit=3 - ) + top_three_counters = self._txn_perf_counters.interval(duration, limit=3) perf_logger.info( - "Total database time: %.3f%% {%s} {%s}", - ratio * 100, - top_three_counters, - top_3_event_counters, + "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters ) self._clock.looping_call(loop, 10000) @@ -367,7 +382,7 @@ class SQLBaseStore(object): def _new_transaction( self, conn, desc, after_callbacks, exception_callbacks, func, *args, **kwargs ): - start = time.time() + start = monotonic_time() txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from @@ -433,7 +448,7 @@ class SQLBaseStore(object): logger.debug("[TXN FAIL] {%s} %s", name, e) raise finally: - end = time.time() + end = monotonic_time() duration = end - start LoggingContext.current_context().add_database_transaction(duration) @@ -441,7 +456,7 @@ class SQLBaseStore(object): transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) self._current_txn_total_time += duration - self._txn_perf_counters.update(desc, start, end) + self._txn_perf_counters.update(desc, duration) sql_txn_timer.labels(desc).observe(duration) @defer.inlineCallbacks @@ -507,11 +522,11 @@ class SQLBaseStore(object): ) parent_context = None - start_time = time.time() + start_time = monotonic_time() def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection", parent_context) as context: - sched_duration_sec = time.time() - start_time + sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) @@ -570,6 +585,10 @@ class SQLBaseStore(object): Args: table : string giving the table name values : dict of new column names and values for them + or_ignore : bool stating whether an exception should be raised + when a conflicting row already exists. If True, False will be + returned by the function instead + desc : string giving a description of the transaction Returns: bool: Whether the row was inserted or not. Only useful when @@ -1210,8 +1229,8 @@ class SQLBaseStore(object): ) txn.execute(select_sql, list(keyvalues.values())) - row = txn.fetchone() + if not row: if allow_none: return None @@ -1645,7 +1664,7 @@ def db_to_json(db_content): # Decode it to a Unicode string before feeding it to json.loads, so we # consistenty get a Unicode-containing object out. if isinstance(db_content, (bytes, bytearray)): - db_content = db_content.decode('utf8') + db_content = db_content.decode("utf8") try: return json.loads(db_content)