summary refs log tree commit diff
path: root/synapse/storage
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/storage')
-rw-r--r--synapse/storage/_base.py17
-rw-r--r--synapse/storage/events.py28
2 files changed, 18 insertions, 27 deletions
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 2262776ab2..d1b625dc30 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -18,8 +18,8 @@ from synapse.api.errors import StoreError
 from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
 from synapse.util.caches.descriptors import Cache
 from synapse.storage.engines import PostgresEngine
-import synapse.metrics
 
+from prometheus_client import Histogram
 
 from twisted.internet import defer
 
@@ -34,13 +34,10 @@ sql_logger = logging.getLogger("synapse.storage.SQL")
 transaction_logger = logging.getLogger("synapse.storage.txn")
 perf_logger = logging.getLogger("synapse.storage.TIME")
 
+sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "")
 
-metrics = synapse.metrics.get_metrics_for("synapse.storage")
-
-sql_scheduling_timer = metrics.register_distribution("schedule_time")
-
-sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
-sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])
+sql_query_timer = Histogram("synapse_storage_query_time", "", ["verb"])
+sql_txn_timer = Histogram("synapse_storage_transaction_time", "", ["desc"])
 
 
 class LoggingTransaction(object):
@@ -117,7 +114,7 @@ class LoggingTransaction(object):
         finally:
             msecs = (time.time() * 1000) - start
             sql_logger.debug("[SQL time] {%s} %f", self.name, msecs)
-            sql_query_timer.inc_by(msecs, sql.split()[0])
+            sql_query_timer.labels(sql.split()[0]).observe(msecs)
 
 
 class PerformanceCounters(object):
@@ -287,7 +284,7 @@ class SQLBaseStore(object):
 
             self._current_txn_total_time += duration
             self._txn_perf_counters.update(desc, start, end)
-            sql_txn_timer.inc_by(duration, desc)
+            sql_txn_timer.labels(desc).observe(duration)
 
     @defer.inlineCallbacks
     def runInteraction(self, desc, func, *args, **kwargs):
@@ -349,7 +346,7 @@ class SQLBaseStore(object):
         def inner_func(conn, *args, **kwargs):
             with LoggingContext("runWithConnection") as context:
                 sched_duration_ms = time.time() * 1000 - start_time
-                sql_scheduling_timer.inc_by(sched_duration_ms)
+                sql_scheduling_timer.observe(sched_duration_ms)
                 current_context.add_database_scheduled(sched_duration_ms)
 
                 if self.database_engine.is_connection_closed(conn):
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 05cde96afc..96b48cfdbb 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -40,30 +40,24 @@ import synapse.metrics
 from synapse.events import EventBase    # noqa: F401
 from synapse.events.snapshot import EventContext   # noqa: F401
 
-logger = logging.getLogger(__name__)
+from prometheus_client import Counter
 
+logger = logging.getLogger(__name__)
 
-metrics = synapse.metrics.get_metrics_for(__name__)
-persist_event_counter = metrics.register_counter("persisted_events")
-event_counter = metrics.register_counter(
-    "persisted_events_sep", labels=["type", "origin_type", "origin_entity"]
-)
+persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
+event_counter = Counter("synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"])
 
 # The number of times we are recalculating the current state
-state_delta_counter = metrics.register_counter(
-    "state_delta",
-)
+state_delta_counter = Counter("synapse_storage_events_state_delta", "")
+
 # The number of times we are recalculating state when there is only a
 # single forward extremity
-state_delta_single_event_counter = metrics.register_counter(
-    "state_delta_single_event",
-)
+state_delta_single_event_counter = Counter("synapse_storage_events_state_delta_single_event", "")
+
 # The number of times we are reculating state when we could have resonably
 # calculated the delta when we calculated the state for an event we were
 # persisting.
-state_delta_reuse_delta_counter = metrics.register_counter(
-    "state_delta_reuse_delta",
-)
+state_delta_reuse_delta_counter = Counter("synapse_storage_events_state_delta_reuse_delta", "")
 
 
 def encode_json(json_object):
@@ -445,7 +439,7 @@ class EventsStore(EventsWorkerStore):
                     state_delta_for_room=state_delta_for_room,
                     new_forward_extremeties=new_forward_extremeties,
                 )
-                persist_event_counter.inc_by(len(chunk))
+                persist_event_counter.inc(len(chunk))
                 synapse.metrics.event_persisted_position.set(
                     chunk[-1][0].internal_metadata.stream_ordering,
                 )
@@ -460,7 +454,7 @@ class EventsStore(EventsWorkerStore):
                         origin_type = "remote"
                         origin_entity = get_domain_from_id(event.sender)
 
-                    event_counter.inc(event.type, origin_type, origin_entity)
+                    event_counter.labels(event.type, origin_type, origin_entity).inc()
 
                 for room_id, new_state in current_state_for_room.iteritems():
                     self.get_current_state_ids.prefill(