summary refs log tree commit diff
diff options
context:
space:
mode:
authorMark Haines <mark.haines@matrix.org>2015-02-10 14:50:53 +0000
committerMark Haines <mark.haines@matrix.org>2015-02-10 14:50:53 +0000
commitd7c7efb691bd726ec3e8879e289546fbcfd7dabd (patch)
treeca344b20f5da7967212caf6f35c46534f9d27f84
parentConvert directory paths to absolute paths before daemonizing (diff)
downloadsynapse-d7c7efb691bd726ec3e8879e289546fbcfd7dabd.tar.xz
Add performance counters for different stages of loading events
-rw-r--r--synapse/storage/_base.py84
1 files changed, 60 insertions, 24 deletions
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 5ddd410607..c79399fe5e 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -77,6 +77,43 @@ class LoggingTransaction(object):
             sql_logger.debug("[SQL time] {%s} %f", self.name, end - start)
 
 
+class PerformanceCounters(object):
+    def __init__(self):
+        self.current_counters = {}
+        self.previous_counters = {}
+
+    def update(self, key, start_time, end_time=None):
+        if end_time is None:
+            end_time = time.time() * 1000;
+        duration = end_time - start_time
+        count, cum_time = self.current_counters.get(key, (0, 0))
+        count += 1
+        cum_time += duration
+        self.current_counters[key] = (count, cum_time)
+        return end_time
+
+    def interval(self, interval_duration, limit=3):
+        counters = []
+        for name, (count, cum_time) in self.current_counters.items():
+            prev_count, prev_time = self.previous_counters.get(name, (0, 0))
+            counters.append((
+                (cum_time - prev_time) / interval_duration,
+                count - prev_count,
+                name
+            ))
+
+        self.previous_counters = dict(self.current_counters)
+
+        counters.sort(reverse=True)
+
+        top_n_counters = ", ".join(
+            "%s(%d): %.3f%%" % (name, count, 100 * ratio)
+            for ratio, count, name in txn_counters[:limit]
+        )
+
+        return top_n_counters
+
+
 class SQLBaseStore(object):
     _TXN_ID = 0
 
@@ -88,8 +125,8 @@ class SQLBaseStore(object):
         self._previous_txn_total_time = 0
         self._current_txn_total_time = 0
         self._previous_loop_ts = 0
-        self._txn_perf_counters = {}
-        self._previous_txn_perf_counters = {}
+        self._txn_perf_counters = PerformanceCounters()
+        self._get_event_counters = PerformanceCounters()
 
     def start_profiling(self):
         self._previous_loop_ts = self._clock.time_msec()
@@ -105,23 +142,12 @@ class SQLBaseStore(object):
 
             ratio = (curr - prev)/(time_now - time_then)
 
-            txn_counters = []
-            for name, (count, cum_time) in self._txn_perf_counters.items():
-                prev_count, prev_time = self._previous_txn_perf_counters.get(
-                    name, (0,0)
-                )
-                txn_counters.append((
-                    (cum_time - prev_time) / (time_now - time_then),
-                    count - prev_count,
-                    name
-                ))
-
-            self._previous_txn_perf_counters = dict(self._txn_perf_counters)
-
-            txn_counters.sort(reverse=True)
-            top_three_counters = ", ".join(
-                "%s(%d): %.3f%%" % (name, count, 100 * ratio)
-                for ratio, count, name in txn_counters[:3]
+            top_three_counters = self._txn_perf_counters.interval(
+                time_now - time_then, limit=3
+            )
+
+            top_3_event_counters = self._get_event_counters.interval(
+                time_now - time_then, limit=3
             )
 
             logger.info(
@@ -162,11 +188,7 @@ class SQLBaseStore(object):
                     )
 
                     self._current_txn_total_time += end - start
-
-                    count, cum_time = self._txn_perf_counters.get(desc, (0,0))
-                    count += 1
-                    cum_time += end - start
-                    self._txn_perf_counters[desc] = (count, cum_time)
+                    self._txn_perf_counters.update(desc, start, end)
 
         with PreserveLoggingContext():
             result = yield self._db_pool.runInteraction(
@@ -566,6 +588,8 @@ class SQLBaseStore(object):
             "LIMIT 1 "
         )
 
+        start_time = time.time() * 1000;
+
         txn.execute(sql, (event_id,))
 
         res = txn.fetchone()
@@ -575,6 +599,8 @@ class SQLBaseStore(object):
 
         internal_metadata, js, redacted, rejected_reason = res
 
+        self._get_event_counters.update("select_event", start_time)
+
         if allow_rejected or not rejected_reason:
             return self._get_event_from_row_txn(
                 txn, internal_metadata, js, redacted,
@@ -586,10 +612,18 @@ class SQLBaseStore(object):
 
     def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
                                 check_redacted=True, get_prev_content=False):
+
+        start_time = time.time() * 1000;
+        update_counter = self._get_event_counters.update
+
         d = json.loads(js)
+        start_time = update_counter("decode_json", start_time)
+
         internal_metadata = json.loads(internal_metadata)
+        start_time = update_counter("decode_internal", start_time)
 
         ev = FrozenEvent(d, internal_metadata_dict=internal_metadata)
+        start_time = update_counter("build_frozen_event", start_time)
 
         if check_redacted and redacted:
             ev = prune_event(ev)
@@ -605,6 +639,7 @@ class SQLBaseStore(object):
 
             if because:
                 ev.unsigned["redacted_because"] = because
+            start_time = update_counter("redact_event", start_time)
 
         if get_prev_content and "replaces_state" in ev.unsigned:
             prev = self._get_event_txn(
@@ -614,6 +649,7 @@ class SQLBaseStore(object):
             )
             if prev:
                 ev.unsigned["prev_content"] = prev.get_dict()["content"]
+            start_time = update_counter("get_prev_content", start_time)
 
         return ev