summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
Diffstat (limited to 'synapse')
-rw-r--r--synapse/metrics/__init__.py8
-rw-r--r--synapse/metrics/metric.py24
-rw-r--r--synapse/storage/_base.py14
3 files changed, 25 insertions, 21 deletions
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 1acaa3fd09..c161c17e9f 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -19,7 +19,9 @@ from __future__ import absolute_import
 import logging
 from resource import getrusage, getpagesize, RUSAGE_SELF
 
-from .metric import CounterMetric, CallbackMetric, TimerMetric, CacheMetric
+from .metric import (
+    CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
+)
 
 
 logger = logging.getLogger(__name__)
@@ -59,8 +61,8 @@ class Metrics(object):
     def register_callback(self, *args, **kwargs):
         return self._register(CallbackMetric, *args, **kwargs)
 
-    def register_timer(self, *args, **kwargs):
-        return self._register(TimerMetric, *args, **kwargs)
+    def register_distribution(self, *args, **kwargs):
+        return self._register(DistributionMetric, *args, **kwargs)
 
     def register_cache(self, *args, **kwargs):
         return self._register(CacheMetric, *args, **kwargs)
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 6b7d3358bc..45d2752a20 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -101,35 +101,37 @@ class CallbackMetric(BaseMetric):
                 for k in sorted(value.keys())]
 
 
-class TimerMetric(CounterMetric):
-    """A combination of an event counter and a time accumulator, which counts
-    both the number of events and how long each one takes.
+class DistributionMetric(CounterMetric):
+    """A combination of an event counter and an accumulator, which counts
+    both the number of events and accumulates the total value. Typically this
+    could be used to keep track of method-running times, or other distributions
+    of values that occur in discrete occurances.
 
     TODO(paul): Try to export some heatmap-style stats?
     """
 
     def __init__(self, *args, **kwargs):
-        super(TimerMetric, self).__init__(*args, **kwargs)
+        super(DistributionMetric, self).__init__(*args, **kwargs)
 
-        self.times = {}
+        self.totals = {}
 
         # Scalar metrics are never empty
         if self.is_scalar():
-            self.times[()] = 0
+            self.totals[()] = 0
 
-    def inc_time(self, msec, *values):
+    def inc_by(self, inc, *values):
         self.inc(*values)
 
-        if values not in self.times:
-            self.times[values] = msec
+        if values not in self.totals:
+            self.totals[values] = inc
         else:
-            self.times[values] += msec
+            self.totals[values] += inc
 
     def render_item(self, k):
         keystr = self._render_key(k)
 
         return ["%s:count%s %d" % (self.name, keystr, self.counts[k]),
-                "%s:msec%s %d" % (self.name, keystr, self.times[k])]
+                "%s:total%s %d" % (self.name, keystr, self.totals[k])]
 
 
 class CacheMetric(object):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 2708d3c5b6..104e8e3cf6 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -38,9 +38,9 @@ transaction_logger = logging.getLogger("synapse.storage.txn")
 
 metrics = synapse.metrics.get_metrics_for("synapse.storage")
 
-sql_query_timer = metrics.register_timer("queries", labels=["verb"])
-sql_txn_timer = metrics.register_timer("transactions", labels=["desc"])
-sql_getevents_timer = metrics.register_timer("getEvents", labels=["desc"])
+sql_query_timer = metrics.register_distribution("queries", labels=["verb"])
+sql_txn_timer = metrics.register_distribution("transactions", labels=["desc"])
+sql_getevents_timer = metrics.register_distribution("getEvents", labels=["desc"])
 
 caches_by_name = {}
 cache_counter = metrics.register_cache(
@@ -143,7 +143,7 @@ class LoggingTransaction(object):
         finally:
             msecs = (time.time() * 1000) - start
             sql_logger.debug("[SQL time] {%s} %f", self.name, msecs)
-            sql_query_timer.inc_time(msecs, sql.split()[0])
+            sql_query_timer.inc_by(msecs, sql.split()[0])
 
 
 class PerformanceCounters(object):
@@ -268,7 +268,7 @@ class SQLBaseStore(object):
                     self._current_txn_total_time += end - start
                     self._txn_perf_counters.update(desc, start, end)
 
-                    sql_txn_timer.inc_time(self._current_txn_total_time, desc)
+                    sql_txn_timer.inc_by(self._current_txn_total_time, desc)
 
         with PreserveLoggingContext():
             result = yield self._db_pool.runInteraction(
@@ -672,7 +672,7 @@ class SQLBaseStore(object):
 
         def update_counter(desc, last_time):
             curr_time = self._get_event_counters.update(desc, last_time)
-            sql_getevents_timer.inc_time(curr_time - last_time, desc)
+            sql_getevents_timer.inc_by(curr_time - last_time, desc)
             return curr_time
 
         cache = self._get_event_cache.setdefault(event_id, {})
@@ -727,7 +727,7 @@ class SQLBaseStore(object):
 
         def update_counter(desc, last_time):
             curr_time = self._get_event_counters.update(desc, last_time)
-            sql_getevents_timer.inc_time(curr_time - last_time, desc)
+            sql_getevents_timer.inc_by(curr_time - last_time, desc)
             return curr_time
 
         d = json.loads(js)