From 18e144fe088bda9e28697062cda62dca1e03724b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 19:55:03 +0100 Subject: Move RequestsMetrics to its own file This is useful in its own right, because server.py is full of stuff; but more importantly, I want to do some refactoring that will cause a circular reference as it is. --- synapse/http/request_metrics.py | 147 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 synapse/http/request_metrics.py (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py new file mode 100644 index 0000000000..4a843a36a7 --- /dev/null +++ b/synapse/http/request_metrics.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import synapse.metrics +from synapse.util.logcontext import LoggingContext + +logger = logging.getLogger(__name__) + +metrics = synapse.metrics.get_metrics_for("synapse.http.server") + +# total number of responses served, split by method/servlet/tag +response_count = metrics.register_counter( + "response_count", + labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) +) + +requests_counter = metrics.register_counter( + "requests_received", + labels=["method", "servlet", ], +) + +outgoing_responses_counter = metrics.register_counter( + "responses", + labels=["method", "code"], +) + +response_timer = metrics.register_counter( + "response_time_seconds", + labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), +) + +response_ru_utime = metrics.register_counter( + "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), +) + +response_ru_stime = metrics.register_counter( + "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), +) + +response_db_txn_count = metrics.register_counter( + "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), +) + +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request +response_db_txn_duration = metrics.register_counter( + "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), +) + +# seconds spent waiting for a db connection, when processing this request +response_db_sched_duration = metrics.register_counter( + "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +) + +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + + +class RequestMetrics(object): + def start(self, clock, name): + self.start = clock.time_msec() + self.start_context = LoggingContext.current_context() + self.name = name + + def stop(self, clock, request): + context = LoggingContext.current_context() + + tag = "" + if context: + tag = context.tag + + if context != self.start_context: + logger.warn( + "Context have unexpectedly changed %r, %r", + context, self.start_context + ) + return + + response_count.inc(request.method, self.name, tag) + + response_timer.inc_by( + clock.time_msec() - self.start, request.method, + self.name, tag + ) + + ru_utime, ru_stime = context.get_resource_usage() + + response_ru_utime.inc_by( + ru_utime, request.method, self.name, tag + ) + response_ru_stime.inc_by( + ru_stime, request.method, self.name, tag + ) + response_db_txn_count.inc_by( + context.db_txn_count, request.method, self.name, tag + ) + response_db_txn_duration.inc_by( + context.db_txn_duration_ms / 1000., request.method, self.name, tag + ) + response_db_sched_duration.inc_by( + context.db_sched_duration_ms / 1000., request.method, self.name, tag + ) + + response_size.inc_by(request.sentLength, request.method, self.name, tag) -- cgit 1.4.1 From 09b29f9c4a941ae5721b1f5e296156dfe92e3395 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:03:11 +0100 Subject: Make RequestMetrics take a raw time rather than a clock ... which is going to make it easier to move around. --- synapse/http/request_metrics.py | 8 ++++---- synapse/http/server.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4a843a36a7..4e8a5f5306 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -100,12 +100,12 @@ response_size = metrics.register_counter( class RequestMetrics(object): - def start(self, clock, name): - self.start = clock.time_msec() + def start(self, time_msec, name): + self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name - def stop(self, clock, request): + def stop(self, time_msec, request): context = LoggingContext.current_context() tag = "" @@ -122,7 +122,7 @@ class RequestMetrics(object): response_count.inc(request.method, self.name, tag) response_timer.inc_by( - clock.time_msec() - self.start, request.method, + time_msec - self.start, request.method, self.name, tag ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 369f7bbdd1..b16c9c17f6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,7 +87,7 @@ def wrap_request_handler(request_handler, include_metrics=False): # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ - request_metrics.start(self.clock, name=servlet_name) + request_metrics.start(self.clock.time_msec(), name=servlet_name) with request.processing(): try: @@ -138,7 +138,7 @@ def wrap_request_handler(request_handler, include_metrics=False): finally: try: request_metrics.stop( - self.clock, request + self.clock.time_msec(), request ) except Exception as e: logger.warn("Failed to stop metrics: %r", e) -- cgit 1.4.1 From 49e5a613f1ac53498b31150f78332fc64932e61b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:44:22 +0100 Subject: Move outgoing_responses_counter handling to RequestMetrics it's much neater there. --- synapse/http/request_metrics.py | 2 ++ synapse/http/server.py | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4e8a5f5306..8c850bf23f 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -119,6 +119,8 @@ class RequestMetrics(object): ) return + outgoing_responses_counter.inc(request.method, str(request.code)) + response_count.inc(request.method, self.name, tag) response_timer.inc_by( diff --git a/synapse/http/server.py b/synapse/http/server.py index 1d05b53873..200b2c4837 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -20,7 +20,6 @@ from synapse.api.errors import ( ) from synapse.http.request_metrics import ( requests_counter, - outgoing_responses_counter, ) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches import intern_dict @@ -112,7 +111,6 @@ def wrap_request_handler(request_handler, include_metrics=False): ) else: logger.exception(e) - outgoing_responses_counter.inc(request.method, str(code)) respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), @@ -274,8 +272,6 @@ class JsonResource(HttpServer, resource.Resource): def _send_response(self, request, code, response_json_object, response_code_message=None): - outgoing_responses_counter.inc(request.method, str(code)) - # TODO: Only enable CORS for the requests that need it. respond_with_json( request, code, response_json_object, -- cgit 1.4.1 From dfa70adc33b8f058acae8a4f4f61a83f9fab47a7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 21 May 2018 16:03:39 +0100 Subject: Add in flight request metrics This tracks CPU and DB usage while requests are in flight, rather than when we write the response. --- synapse/http/request_metrics.py | 152 +++++++++++++++++++++++++++++++++++++++- synapse/http/site.py | 4 +- 2 files changed, 154 insertions(+), 2 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8c850bf23f..f1e9b41fde 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -98,14 +98,76 @@ response_size = metrics.register_counter( "response_size", labels=["method", "servlet", "tag"] ) +# In flight metrics are incremented while the requests are in flight, rather +# than when the response was written. + +in_flight_requests_ru_utime = metrics.register_counter( + "in_flight_requests_ru_utime_seconds", labels=["method", "servlet"], +) + +in_flight_requests_ru_stime = metrics.register_counter( + "in_flight_requests_ru_stime_seconds", labels=["method", "servlet"], +) + +in_flight_requests_db_txn_count = metrics.register_counter( + "in_flight_requests_db_txn_count", labels=["method", "servlet"], +) + +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request +in_flight_requests_db_txn_duration = metrics.register_counter( + "in_flight_requests_db_txn_duration_seconds", labels=["method", "servlet"], +) + +# seconds spent waiting for a db connection, when processing this request +in_flight_requests_db_sched_duration = metrics.register_counter( + "in_flight_requests_db_sched_duration_seconds", labels=["method", "servlet"] +) + +_in_flight_requests_count = metrics.register_gauge( + "in_flight_requests_count", labels=["method", "servlet"] +) + + +# The set of all in flight requests. +_in_flight_requests = set() + + +def _collect_in_flight(): + """Called just before metrics are collected, so we use it to update all + the in flight request metrics + """ + + # Map from (method, name) -> int, the number of in flight requests of that + # type + counts = {} + + for rm in _in_flight_requests: + rm.update_metrics() + key = (rm.method, rm.name,) + counts[key] = counts.get(key, 0) + 1 + + for (method, name), count in counts.iteritems(): + _in_flight_requests_count.set(count, method, name) + + +metrics.register_collector(_collect_in_flight) + class RequestMetrics(object): - def start(self, time_msec, name): + def start(self, time_msec, name, method): self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name + self.method = method + + self._request_stats = _RequestStats.from_context(self.start_context) + + _in_flight_requests.add(self) def stop(self, time_msec, request): + _in_flight_requests.discard(self) + context = LoggingContext.current_context() tag = "" @@ -147,3 +209,91 @@ class RequestMetrics(object): ) response_size.inc_by(request.sentLength, request.method, self.name, tag) + + # We always call this at the end to ensure that we update the metrics + # regardless of whether a call to /metrics while the request was in + # flight. + self.update_metrics() + + def update_metrics(self): + """Updates the in flight metrics with values from this request. + """ + + diff = self._request_stats.update() + + in_flight_requests_ru_utime.inc_by( + diff.ru_utime, self.method, self.name, + ) + + in_flight_requests_ru_stime.inc_by( + diff.ru_stime, self.method, self.name, + ) + + in_flight_requests_db_txn_count.inc_by( + diff.db_txn_count, self.method, self.name, + ) + + in_flight_requests_db_txn_duration.inc_by( + diff.db_txn_duration_ms / 1000., self.method, self.name, + ) + + in_flight_requests_db_sched_duration.inc_by( + diff.db_sched_duration_ms / 1000., self.method, self.name, + ) + + +class _RequestStats(object): + """Keeps tracks of various metrics for an in flight request. + """ + + __slots__ = [ + "context", "ru_utime", "ru_stime", + "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + ] + + def __init__(self, context, ru_utime, ru_stime, db_txn_count, + db_txn_duration_ms, db_sched_duration_ms): + self.context = context + self.ru_utime = ru_utime + self.ru_stime = ru_stime + self.db_txn_count = db_txn_count + self.db_txn_duration_ms = db_txn_duration_ms + self.db_sched_duration_ms = db_sched_duration_ms + + @staticmethod + def from_context(context): + ru_utime, ru_stime = context.get_resource_usage() + + return _RequestStats( + context, + ru_utime, ru_stime, + context.db_txn_count, + context.db_txn_duration_ms, + context.db_sched_duration_ms, + ) + + def update(self): + """Updates the current values and returns the difference between the + old and new values. + + Returns: + _RequestStats: The difference between the old and new values + """ + new = _RequestStats.from_context(self.context) + + diff = _RequestStats( + self.context, + new.ru_utime - self.ru_utime, + new.ru_stime - self.ru_stime, + new.db_txn_count - self.db_txn_count, + new.db_txn_duration_ms - self.db_txn_duration_ms, + new.db_sched_duration_ms - self.db_sched_duration_ms, + ) + + self.ru_utime = new.ru_utime + self.ru_stime = new.ru_stime + self.db_txn_count = new.db_txn_count + self.db_txn_duration_ms = new.db_txn_duration_ms + self.db_sched_duration_ms = new.db_sched_duration_ms + + return diff diff --git a/synapse/http/site.py b/synapse/http/site.py index 202a990508..b608504225 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,9 @@ class SynapseRequest(Request): def _started_processing(self, servlet_name): self.start_time = int(time.time() * 1000) self.request_metrics = RequestMetrics() - self.request_metrics.start(self.start_time, name=servlet_name) + self.request_metrics.start( + self.start_time, name=servlet_name, method=self.method, + ) self.site.access_logger.info( "%s - %s - Received request: %s %s", -- cgit 1.4.1 From fcc525b0b705703fead3d703c0df62a264ff8ce8 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 21 May 2018 19:48:57 -0500 Subject: rest of the changes --- synapse/http/request_metrics.py | 106 ++++------------- synapse/http/server.py | 4 +- synapse/push/bulk_push_rule_evaluator.py | 28 ++--- synapse/replication/tcp/resource.py | 30 +++-- synapse/storage/_base.py | 17 ++- synapse/storage/events.py | 28 ++--- tests/metrics/__init__.py | 0 tests/metrics/test_metric.py | 192 ------------------------------- 8 files changed, 68 insertions(+), 337 deletions(-) delete mode 100644 tests/metrics/__init__.py delete mode 100644 tests/metrics/test_metric.py (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8c850bf23f..34a730d5bc 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -16,86 +16,38 @@ import logging -import synapse.metrics +from prometheus_client.core import Counter, Histogram + from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for("synapse.http.server") # total number of responses served, split by method/servlet/tag -response_count = metrics.register_counter( - "response_count", - labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) -) +response_count = Counter("synapse_http_server_response_count", "", ["method", "servlet", "tag"]) -requests_counter = metrics.register_counter( - "requests_received", - labels=["method", "servlet", ], -) +requests_counter = Counter("synapse_http_server_requests_received", "", ["method", "servlet"]) -outgoing_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_responses_counter = Counter("synapse_http_server_responses", "", ["method", "code"]) -response_timer = metrics.register_counter( - "response_time_seconds", - labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), -) +response_timer = Histogram("synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"]) -response_ru_utime = metrics.register_counter( - "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), -) +response_ru_utime = Counter("synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"]) -response_ru_stime = metrics.register_counter( - "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), -) +response_ru_stime = Counter("synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"]) -response_db_txn_count = metrics.register_counter( - "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), -) +response_db_txn_count = Counter("synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"]) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -response_db_txn_duration = metrics.register_counter( - "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), -) +response_db_txn_duration = Counter("synapse_http_server_response_db_txn_duration_seconds", "", ["method", "servlet", "tag"]) # seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = metrics.register_counter( - "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +response_db_sched_duration = Counter("synapse_http_request_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"] ) # size in bytes of the response written -response_size = metrics.register_counter( - "response_size", labels=["method", "servlet", "tag"] +response_size = Counter("synapse_http_request_response_size", "", ["method", "servlet", "tag"] ) @@ -119,31 +71,19 @@ class RequestMetrics(object): ) return - outgoing_responses_counter.inc(request.method, str(request.code)) + outgoing_responses_counter.labels(request.method, str(request.code)).inc() - response_count.inc(request.method, self.name, tag) + response_count.labels(request.method, self.name, tag).inc() - response_timer.inc_by( - time_msec - self.start, request.method, - self.name, tag - ) + response_timer.labels(request.method, self.name, tag).observe(time_msec - self.start) ru_utime, ru_stime = context.get_resource_usage() - response_ru_utime.inc_by( - ru_utime, request.method, self.name, tag - ) - response_ru_stime.inc_by( - ru_stime, request.method, self.name, tag - ) - response_db_txn_count.inc_by( - context.db_txn_count, request.method, self.name, tag - ) - response_db_txn_duration.inc_by( - context.db_txn_duration_ms / 1000., request.method, self.name, tag - ) - response_db_sched_duration.inc_by( - context.db_sched_duration_ms / 1000., request.method, self.name, tag - ) - - response_size.inc_by(request.sentLength, request.method, self.name, tag) + response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) + response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) + response_db_txn_count.labels(request.method, self.name, tag).inc(context.db_txn_count) + response_db_txn_duration.labels(request.method, self.name, tag).inc(context.db_txn_duration_ms / 1000.) + response_db_sched_duration.labels(request.method, self.name, tag).inc( + context.db_sched_duration_ms / 1000.) + + response_size.labels(request.method, self.name, tag).inc(request.sentLength) diff --git a/synapse/http/server.py b/synapse/http/server.py index b6e2ae14a2..f72d986288 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -138,8 +138,8 @@ def wrap_request_handler_with_logging(h): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.inc(request.method, - request.request_metrics.name) + requests_counter.labels(request.method, + request.request_metrics.name).inc() yield d return wrapped_request_handler diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 7c680659b6..6fcca5e260 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -22,35 +22,29 @@ from .push_rule_evaluator import PushRuleEvaluatorForEvent from synapse.event_auth import get_user_power_level from synapse.api.constants import EventTypes, Membership -from synapse.metrics import get_metrics_for -from synapse.util.caches import metrics as cache_metrics +from synapse.util.caches import register_cache from synapse.util.caches.descriptors import cached from synapse.util.async import Linearizer from synapse.state import POWER_KEY from collections import namedtuple - +from prometheus_client import Counter logger = logging.getLogger(__name__) rules_by_room = {} -push_metrics = get_metrics_for(__name__) -push_rules_invalidation_counter = push_metrics.register_counter( - "push_rules_invalidation_counter" -) -push_rules_state_size_counter = push_metrics.register_counter( - "push_rules_state_size_counter" -) +push_rules_invalidation_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") +push_rules_state_size_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch -push_rules_delta_state_cache_metric = cache_metrics.register_cache( +push_rules_delta_state_cache_metric = register_cache( "cache", - size_callback=lambda: 0, # Meaningless size, as this isn't a cache that stores values - cache_name="push_rules_delta_state_cache_metric", + "push_rules_delta_state_cache_metric", + cache=[], # Meaningless size, as this isn't a cache that stores values ) @@ -64,10 +58,10 @@ class BulkPushRuleEvaluator(object): self.store = hs.get_datastore() self.auth = hs.get_auth() - self.room_push_rule_cache_metrics = cache_metrics.register_cache( + self.room_push_rule_cache_metrics = register_cache( "cache", - size_callback=lambda: 0, # There's not good value for this - cache_name="room_push_rule_cache", + "room_push_rule_cache", + cache=[], # Meaningless size, as this isn't a cache that stores values ) @defer.inlineCallbacks @@ -309,7 +303,7 @@ class RulesForRoom(object): current_state_ids = context.current_state_ids push_rules_delta_state_cache_metric.inc_misses() - push_rules_state_size_counter.inc_by(len(current_state_ids)) + push_rules_state_size_counter.inc(len(current_state_ids)) logger.debug( "Looking for member changes in %r %r", state_group, current_state_ids diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index a41af4fd6c..0e6b1957c6 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -22,20 +22,19 @@ from .streams import STREAMS_MAP, FederationStream from .protocol import ServerReplicationStreamProtocol from synapse.util.metrics import Measure, measure_func +from synapse.metrics import LaterGauge import logging -import synapse.metrics +from prometheus_client import Counter -metrics = synapse.metrics.get_metrics_for(__name__) -stream_updates_counter = metrics.register_counter( - "stream_updates", labels=["stream_name"] +stream_updates_counter = Counter("synapse_replication_tcp_resource_stream_updates", "", ["stream_name"] ) -user_sync_counter = metrics.register_counter("user_sync") -federation_ack_counter = metrics.register_counter("federation_ack") -remove_pusher_counter = metrics.register_counter("remove_pusher") -invalidate_cache_counter = metrics.register_counter("invalidate_cache") -user_ip_cache_counter = metrics.register_counter("user_ip_cache") +user_sync_counter = Counter("synapse_replication_tcp_resource_user_sync", "") +federation_ack_counter = Counter("synapse_replication_tcp_resource_federation_ack", "") +remove_pusher_counter = Counter("synapse_replication_tcp_resource_remove_pusher", "") +invalidate_cache_counter = Counter("synapse_replication_tcp_resource_invalidate_cache", "") +user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache", "") logger = logging.getLogger(__name__) @@ -73,7 +72,8 @@ class ReplicationStreamer(object): # Current connections. self.connections = [] - metrics.register_callback("total_connections", lambda: len(self.connections)) + l = LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], lambda: len(self.connections)) + l.register() # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been @@ -85,17 +85,15 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} - metrics.register_callback( - "connections_per_stream", + LaterGauge( + "synapse_replication_tcp_resource_connections_per_stream", "", ["stream_name"], lambda: { (stream_name,): len([ conn for conn in self.connections if stream_name in conn.replication_streams ]) for stream_name in self.streams_by_name - }, - labels=["stream_name"], - ) + }).register() self.federation_sender = None if not hs.config.send_federation: @@ -175,7 +173,7 @@ class ReplicationStreamer(object): logger.info( "Streaming: %s -> %s", stream.NAME, updates[-1][0] ) - stream_updates_counter.inc_by(len(updates), stream.NAME) + stream_updates_counter.labels(stream.NAME).inc(len(updates)) # Some streams return multiple rows with the same stream IDs, # we need to make sure they get sent out in batches. We do diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2262776ab2..d1b625dc30 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,8 +18,8 @@ from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches.descriptors import Cache from synapse.storage.engines import PostgresEngine -import synapse.metrics +from prometheus_client import Histogram from twisted.internet import defer @@ -34,13 +34,10 @@ sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") perf_logger = logging.getLogger("synapse.storage.TIME") +sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "") -metrics = synapse.metrics.get_metrics_for("synapse.storage") - -sql_scheduling_timer = metrics.register_distribution("schedule_time") - -sql_query_timer = metrics.register_distribution("query_time", labels=["verb"]) -sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"]) +sql_query_timer = Histogram("synapse_storage_query_time", "", ["verb"]) +sql_txn_timer = Histogram("synapse_storage_transaction_time", "", ["desc"]) class LoggingTransaction(object): @@ -117,7 +114,7 @@ class LoggingTransaction(object): finally: msecs = (time.time() * 1000) - start sql_logger.debug("[SQL time] {%s} %f", self.name, msecs) - sql_query_timer.inc_by(msecs, sql.split()[0]) + sql_query_timer.labels(sql.split()[0]).observe(msecs) class PerformanceCounters(object): @@ -287,7 +284,7 @@ class SQLBaseStore(object): self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) - sql_txn_timer.inc_by(duration, desc) + sql_txn_timer.labels(desc).observe(duration) @defer.inlineCallbacks def runInteraction(self, desc, func, *args, **kwargs): @@ -349,7 +346,7 @@ class SQLBaseStore(object): def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection") as context: sched_duration_ms = time.time() * 1000 - start_time - sql_scheduling_timer.inc_by(sched_duration_ms) + sql_scheduling_timer.observe(sched_duration_ms) current_context.add_database_scheduled(sched_duration_ms) if self.database_engine.is_connection_closed(conn): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 05cde96afc..96b48cfdbb 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -40,30 +40,24 @@ import synapse.metrics from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 -logger = logging.getLogger(__name__) +from prometheus_client import Counter +logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) -persist_event_counter = metrics.register_counter("persisted_events") -event_counter = metrics.register_counter( - "persisted_events_sep", labels=["type", "origin_type", "origin_entity"] -) +persist_event_counter = Counter("synapse_storage_events_persisted_events", "") +event_counter = Counter("synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"]) # The number of times we are recalculating the current state -state_delta_counter = metrics.register_counter( - "state_delta", -) +state_delta_counter = Counter("synapse_storage_events_state_delta", "") + # The number of times we are recalculating state when there is only a # single forward extremity -state_delta_single_event_counter = metrics.register_counter( - "state_delta_single_event", -) +state_delta_single_event_counter = Counter("synapse_storage_events_state_delta_single_event", "") + # The number of times we are reculating state when we could have resonably # calculated the delta when we calculated the state for an event we were # persisting. -state_delta_reuse_delta_counter = metrics.register_counter( - "state_delta_reuse_delta", -) +state_delta_reuse_delta_counter = Counter("synapse_storage_events_state_delta_reuse_delta", "") def encode_json(json_object): @@ -445,7 +439,7 @@ class EventsStore(EventsWorkerStore): state_delta_for_room=state_delta_for_room, new_forward_extremeties=new_forward_extremeties, ) - persist_event_counter.inc_by(len(chunk)) + persist_event_counter.inc(len(chunk)) synapse.metrics.event_persisted_position.set( chunk[-1][0].internal_metadata.stream_ordering, ) @@ -460,7 +454,7 @@ class EventsStore(EventsWorkerStore): origin_type = "remote" origin_entity = get_domain_from_id(event.sender) - event_counter.inc(event.type, origin_type, origin_entity) + event_counter.labels(event.type, origin_type, origin_entity).inc() for room_id, new_state in current_state_for_room.iteritems(): self.get_current_state_ids.prefill( diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py deleted file mode 100644 index 069c0be762..0000000000 --- a/tests/metrics/test_metric.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tests import unittest - -from synapse.metrics.metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, - _escape_label_value, -) - - -class CounterMetricTestCase(unittest.TestCase): - - def test_scalar(self): - counter = CounterMetric("scalar") - - self.assertEquals(counter.render(), [ - 'scalar 0', - ]) - - counter.inc() - - self.assertEquals(counter.render(), [ - 'scalar 1', - ]) - - counter.inc_by(2) - - self.assertEquals(counter.render(), [ - 'scalar 3' - ]) - - def test_vector(self): - counter = CounterMetric("vector", labels=["method"]) - - # Empty counter doesn't yet know what values it has - self.assertEquals(counter.render(), []) - - counter.inc("GET") - - self.assertEquals(counter.render(), [ - 'vector{method="GET"} 1', - ]) - - counter.inc("GET") - counter.inc("PUT") - - self.assertEquals(counter.render(), [ - 'vector{method="GET"} 2', - 'vector{method="PUT"} 1', - ]) - - -class CallbackMetricTestCase(unittest.TestCase): - - def test_scalar(self): - d = dict() - - metric = CallbackMetric("size", lambda: len(d)) - - self.assertEquals(metric.render(), [ - 'size 0', - ]) - - d["key"] = "value" - - self.assertEquals(metric.render(), [ - 'size 1', - ]) - - def test_vector(self): - vals = dict() - - metric = CallbackMetric("values", lambda: vals, labels=["type"]) - - self.assertEquals(metric.render(), []) - - # Keys have to be tuples, even if they're 1-element - vals[("foo",)] = 1 - vals[("bar",)] = 2 - - self.assertEquals(metric.render(), [ - 'values{type="bar"} 2', - 'values{type="foo"} 1', - ]) - - -class DistributionMetricTestCase(unittest.TestCase): - - def test_scalar(self): - metric = DistributionMetric("thing") - - self.assertEquals(metric.render(), [ - 'thing:count 0', - 'thing:total 0', - ]) - - metric.inc_by(500) - - self.assertEquals(metric.render(), [ - 'thing:count 1', - 'thing:total 500', - ]) - - def test_vector(self): - metric = DistributionMetric("queries", labels=["verb"]) - - self.assertEquals(metric.render(), []) - - metric.inc_by(300, "SELECT") - metric.inc_by(200, "SELECT") - metric.inc_by(800, "INSERT") - - self.assertEquals(metric.render(), [ - 'queries:count{verb="INSERT"} 1', - 'queries:count{verb="SELECT"} 2', - 'queries:total{verb="INSERT"} 800', - 'queries:total{verb="SELECT"} 500', - ]) - - -class CacheMetricTestCase(unittest.TestCase): - - def test_cache(self): - d = dict() - - metric = CacheMetric("cache", lambda: len(d), "cache_name") - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 0', - 'cache:total{name="cache_name"} 0', - 'cache:size{name="cache_name"} 0', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_misses() - d["key"] = "value" - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 0', - 'cache:total{name="cache_name"} 1', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_hits() - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 1', - 'cache:total{name="cache_name"} 2', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_evictions(2) - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 1', - 'cache:total{name="cache_name"} 2', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 2', - ]) - - -class LabelValueEscapeTestCase(unittest.TestCase): - def test_simple(self): - string = "safjhsdlifhyskljfksdfh" - self.assertEqual(string, _escape_label_value(string)) - - def test_escape(self): - self.assertEqual( - "abc\\\"def\\nghi\\\\", - _escape_label_value("abc\"def\nghi\\"), - ) - - def test_sequence_of_escapes(self): - self.assertEqual( - "abc\\\"def\\nghi\\\\\\n", - _escape_label_value("abc\"def\nghi\\\n"), - ) -- cgit 1.4.1 From fb2806b1868baaa24d0d6e21d10f2c1f6177717e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 09:31:53 +0100 Subject: Move in_flight_requests_count to be a callback metric --- synapse/http/request_metrics.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index f1e9b41fde..8cb23f4d64 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -124,12 +124,8 @@ in_flight_requests_db_sched_duration = metrics.register_counter( "in_flight_requests_db_sched_duration_seconds", labels=["method", "servlet"] ) -_in_flight_requests_count = metrics.register_gauge( - "in_flight_requests_count", labels=["method", "servlet"] -) - -# The set of all in flight requests. +# The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() @@ -138,20 +134,35 @@ def _collect_in_flight(): the in flight request metrics """ + for rm in _in_flight_requests: + rm.update_metrics() + + +metrics.register_collector(_collect_in_flight) + + +def _get_in_flight_counts(): + """Returns a count of all in flight requests by (method, server_name) + + Returns: + dict[tuple[str, str], int] + """ + # Map from (method, name) -> int, the number of in flight requests of that # type counts = {} - for rm in _in_flight_requests: - rm.update_metrics() key = (rm.method, rm.name,) counts[key] = counts.get(key, 0) + 1 - for (method, name), count in counts.iteritems(): - _in_flight_requests_count.set(count, method, name) + return counts -metrics.register_collector(_collect_in_flight) +metrics.register_callback( + "in_flight_requests_count", + _get_in_flight_counts, + labels=["method", "servlet"] +) class RequestMetrics(object): -- cgit 1.4.1 From c435b0b44172efe485ae1401deb8adca2e774d3c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 09:34:26 +0100 Subject: Don't store context --- synapse/http/request_metrics.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8cb23f4d64..5917c83958 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -230,7 +230,7 @@ class RequestMetrics(object): """Updates the in flight metrics with values from this request. """ - diff = self._request_stats.update() + diff = self._request_stats.update(self.start_context) in_flight_requests_ru_utime.inc_by( diff.ru_utime, self.method, self.name, @@ -258,13 +258,12 @@ class _RequestStats(object): """ __slots__ = [ - "context", "ru_utime", "ru_stime", + "ru_utime", "ru_stime", "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", ] - def __init__(self, context, ru_utime, ru_stime, db_txn_count, + def __init__(self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms): - self.context = context self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count @@ -276,24 +275,22 @@ class _RequestStats(object): ru_utime, ru_stime = context.get_resource_usage() return _RequestStats( - context, ru_utime, ru_stime, context.db_txn_count, context.db_txn_duration_ms, context.db_sched_duration_ms, ) - def update(self): + def update(self, context): """Updates the current values and returns the difference between the old and new values. Returns: _RequestStats: The difference between the old and new values """ - new = _RequestStats.from_context(self.context) + new = _RequestStats.from_context(context) diff = _RequestStats( - self.context, new.ru_utime - self.ru_utime, new.ru_stime - self.ru_stime, new.db_txn_count - self.db_txn_count, -- cgit 1.4.1 From 8f5a688d420c8f6b51826c561da9094b76fbea1e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 10:56:03 -0500 Subject: cleanups, self-registration --- synapse/handlers/presence.py | 7 ++++--- synapse/http/request_metrics.py | 15 +++------------ synapse/replication/tcp/resource.py | 9 +++++---- 3 files changed, 12 insertions(+), 19 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index aca822c46a..4ee87d5714 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -142,8 +142,9 @@ class PresenceHandler(object): } LaterGauge( - "user_to_current_state_size", "", [], lambda: len(self.user_to_current_state) - ).register() + "synapse_handlers_presence_user_to_current_state_size", "", [], + lambda: len(self.user_to_current_state) + ) now = self.clock.time_msec() for state in active_presence: @@ -212,7 +213,7 @@ class PresenceHandler(object): 60 * 1000, ) - LaterGauge("wheel_timer_size", "", [], lambda: len(self.wheel_timer)).register() + LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], lambda: len(self.wheel_timer)) @defer.inlineCallbacks def _on_shutdown(self): diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 0984870e7e..e7f1bfc4ae 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -71,24 +71,14 @@ in_flight_requests_db_sched_duration = Counter("synapse_http_request_in_flight_r _in_flight_requests = set() -def _collect_in_flight(): - """Called just before metrics are collected, so we use it to update all - the in flight request metrics - """ - - for rm in _in_flight_requests: - rm.update_metrics() - - -metrics.register_collector(_collect_in_flight) - - def _get_in_flight_counts(): """Returns a count of all in flight requests by (method, server_name) Returns: dict[tuple[str, str], int] """ + for rm in _in_flight_requests: + rm.update_metrics() # Map from (method, name) -> int, the number of in flight requests of that # type @@ -99,6 +89,7 @@ def _get_in_flight_counts(): return counts + LaterGauge( "synapse_http_request_metrics_in_flight_requests_count", "", ["method", "servlet"], diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ed251c79ac..1ca30bc31a 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -73,8 +73,8 @@ class ReplicationStreamer(object): # Current connections. self.connections = [] - l = LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], lambda: len(self.connections)) - l.register() + LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], + lambda: len(self.connections)) # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been @@ -87,14 +87,15 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} LaterGauge( - "synapse_replication_tcp_resource_connections_per_stream", "", ["stream_name"], + "synapse_replication_tcp_resource_connections_per_stream", "", + ["stream_name"], lambda: { (stream_name,): len([ conn for conn in self.connections if stream_name in conn.replication_streams ]) for stream_name in self.streams_by_name - }).register() + }) self.federation_sender = None if not hs.config.send_federation: -- cgit 1.4.1 From 071206304d088aac8bd0e2fff600141dae1d09b6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 16:54:22 -0500 Subject: cleanup pep8 errors --- synapse/federation/federation_server.py | 5 +- synapse/federation/transaction_queue.py | 22 ++++-- synapse/http/request_metrics.py | 114 ++++++++++++++++++++++++-------- synapse/notifier.py | 3 +- synapse/util/caches/__init__.py | 7 +- 5 files changed, 114 insertions(+), 37 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8211273006..2d420a58a2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,10 +46,13 @@ received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") received_edus_counter = Counter("synapse_federation_server_received_edus", "") -received_queries_counter = Counter("synapse_federation_server_received_queries", "", ["type"]) +received_queries_counter = Counter( + "synapse_federation_server_received_queries", "", ["type"] +) class FederationServer(FederationBase): + def __init__(self, hs): super(FederationServer, self).__init__(hs) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 2049351fdd..53442688c8 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -28,7 +28,10 @@ from synapse.handlers.presence import format_user_presence_state, get_interested import synapse.metrics from synapse.metrics import LaterGauge from synapse.metrics import ( - sent_edus_counter, sent_transactions_counter, events_processed_counter) + sent_edus_counter, + sent_transactions_counter, + events_processed_counter, +) from prometheus_client import Counter @@ -37,7 +40,9 @@ import logging logger = logging.getLogger(__name__) -sent_pdus_destination_dist = Counter("synapse_federation_client_sent_pdu_destinations", "") +sent_pdus_destination_dist = Counter( + "synapse_federation_client_sent_pdu_destinations", "" +) class TransactionQueue(object): @@ -64,7 +69,10 @@ class TransactionQueue(object): # done self.pending_transactions = {} - LaterGauge("synapse_federation_client_pending_destinations", "", [], + LaterGauge( + "synapse_federation_client_pending_destinations", + "", + [], lambda: len(self.pending_transactions), ) @@ -89,11 +97,15 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "synapse_federation_client_pending_pdus", "", [], + "synapse_federation_client_pending_pdus", + "", + [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "synapse_federation_client_pending_edus", "", [], + "synapse_federation_client_pending_edus", + "", + [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index e7f1bfc4ae..7f11b5c5a4 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -25,47 +25,87 @@ logger = logging.getLogger(__name__) # total number of responses served, split by method/servlet/tag -response_count = Counter("synapse_http_server_response_count", "", ["method", "servlet", "tag"]) +response_count = Counter( + "synapse_http_server_response_count", "", ["method", "servlet", "tag"] +) -requests_counter = Counter("synapse_http_server_requests_received", "", ["method", "servlet"]) +requests_counter = Counter( + "synapse_http_server_requests_received", "", ["method", "servlet"] +) -outgoing_responses_counter = Counter("synapse_http_server_responses", "", ["method", "code"]) +outgoing_responses_counter = Counter( + "synapse_http_server_responses", "", ["method", "code"] +) -response_timer = Histogram("synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"]) +response_timer = Histogram( + "synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"] +) -response_ru_utime = Counter("synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"]) +response_ru_utime = Counter( + "synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"] +) -response_ru_stime = Counter("synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"]) +response_ru_stime = Counter( + "synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"] +) -response_db_txn_count = Counter("synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"]) +response_db_txn_count = Counter( + "synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -response_db_txn_duration = Counter("synapse_http_server_response_db_txn_duration_seconds", "", ["method", "servlet", "tag"]) +response_db_txn_duration = Counter( + "synapse_http_server_response_db_txn_duration_seconds", + "", + ["method", "servlet", "tag"], +) # seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = Counter("synapse_http_request_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"] +response_db_sched_duration = Counter( + "synapse_http_request_response_db_sched_duration_seconds", + "", + ["method", "servlet", "tag"], ) # size in bytes of the response written -response_size = Counter("synapse_http_request_response_size", "", ["method", "servlet", "tag"] +response_size = Counter( + "synapse_http_request_response_size", "", ["method", "servlet", "tag"] ) # In flight metrics are incremented while the requests are in flight, rather # than when the response was written. -in_flight_requests_ru_utime = Counter("synapse_http_request_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_utime = Counter( + "synapse_http_request_in_flight_requests_ru_utime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_ru_stime = Counter("synapse_http_request_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_stime = Counter( + "synapse_http_request_in_flight_requests_ru_stime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_db_txn_count = Counter("synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"]) +in_flight_requests_db_txn_count = Counter( + "synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -in_flight_requests_db_txn_duration = Counter("synapse_http_request_in_flight_requests_db_txn_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_txn_duration = Counter( + "synapse_http_request_in_flight_requests_db_txn_duration_seconds", + "", + ["method", "servlet"], +) # seconds spent waiting for a db connection, when processing this request -in_flight_requests_db_sched_duration = Counter("synapse_http_request_in_flight_requests_db_sched_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_sched_duration = Counter( + "synapse_http_request_in_flight_requests_db_sched_duration_seconds", + "", + ["method", "servlet"], +) # The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() @@ -91,9 +131,10 @@ def _get_in_flight_counts(): LaterGauge( - "synapse_http_request_metrics_in_flight_requests_count", "", + "synapse_http_request_metrics_in_flight_requests_count", + "", ["method", "servlet"], - _get_in_flight_counts + _get_in_flight_counts, ) @@ -128,16 +169,23 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag).observe(time_msec - self.start) + response_timer.labels(request.method, self.name, tag).observe( + time_msec - self.start + ) ru_utime, ru_stime = context.get_resource_usage() response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) - response_db_txn_count.labels(request.method, self.name, tag).inc(context.db_txn_count) - response_db_txn_duration.labels(request.method, self.name, tag).inc(context.db_txn_duration_ms / 1000.) + response_db_txn_count.labels(request.method, self.name, tag).inc( + context.db_txn_count + ) + response_db_txn_duration.labels(request.method, self.name, tag).inc( + context.db_txn_duration_ms / 1000. + ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000.) + context.db_sched_duration_ms / 1000. + ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -154,11 +202,17 @@ class RequestMetrics(object): in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) - in_flight_requests_db_txn_count.labels(self.method, self.name).inc(diff.db_txn_count) + in_flight_requests_db_txn_count.labels(self.method, self.name).inc( + diff.db_txn_count + ) - in_flight_requests_db_txn_duration.labels(self.method, self.name).inc(diff.db_txn_duration_ms / 1000.) + in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( + diff.db_txn_duration_ms / 1000. + ) - in_flight_requests_db_sched_duration.labels(self.method, self.name).inc(diff.db_sched_duration_ms / 1000.) + in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( + diff.db_sched_duration_ms / 1000. + ) class _RequestStats(object): @@ -166,12 +220,16 @@ class _RequestStats(object): """ __slots__ = [ - "ru_utime", "ru_stime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "ru_utime", + "ru_stime", + "db_txn_count", + "db_txn_duration_ms", + "db_sched_duration_ms", ] - def __init__(self, ru_utime, ru_stime, db_txn_count, - db_txn_duration_ms, db_sched_duration_ms): + def __init__( + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count diff --git a/synapse/notifier.py b/synapse/notifier.py index 40cc553918..6dce20a284 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,8 @@ logger = logging.getLogger(__name__) notified_events_counter = Counter("synapse_notifier_notified_events", "") -users_woken_by_stream_counter = Counter("synapse_notifier_users_woken_by_stream", "", ["stream"]) +users_woken_by_stream_counter = Counter( + "synapse_notifier_users_woken_by_stream", "", ["stream"]) # TODO(paul): Should be shared somewhere diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 1c511a7072..e0c22df249 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -29,13 +29,16 @@ cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) -response_cache_evicted = Gauge("synapse_util_caches_response_cache:evicted_size", "", ["name"]) +response_cache_evicted = Gauge( + "synapse_util_caches_response_cache:evicted_size", "", ["name"] +) response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) + def register_cache(cache_type, cache_name, cache): # Check if the metric is already registered. Unregister it, if so. - metric_name = "cache_%s_%s" % (cache_type, cache_name,) + metric_name = "cache_%s_%s" % (cache_type, cache_name) if metric_name in collectors_by_name.keys(): REGISTRY.unregister(collectors_by_name[metric_name]) -- cgit 1.4.1 From e987079037eb89a3c5c04fe64d843a6f9e6c1718 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 23 May 2018 13:03:51 -0500 Subject: fixes --- synapse/federation/transaction_queue.py | 8 ++++---- synapse/http/request_metrics.py | 14 +++++++------- synapse/push/bulk_push_rule_evaluator.py | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 53442688c8..69312ec233 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -41,7 +41,7 @@ import logging logger = logging.getLogger(__name__) sent_pdus_destination_dist = Counter( - "synapse_federation_client_sent_pdu_destinations", "" + "synapse_federation_transaction_queue_sent_pdu_destinations", "" ) @@ -70,7 +70,7 @@ class TransactionQueue(object): self.pending_transactions = {} LaterGauge( - "synapse_federation_client_pending_destinations", + "synapse_federation_transaction_queue_pending_destinations", "", [], lambda: len(self.pending_transactions), @@ -97,13 +97,13 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "synapse_federation_client_pending_pdus", + "synapse_federation_transaction_queue_pending_pdus", "", [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "synapse_federation_client_pending_edus", + "synapse_federation_transaction_queue_pending_edus", "", [], lambda: ( diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 7f11b5c5a4..e7df494333 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -63,46 +63,46 @@ response_db_txn_duration = Counter( # seconds spent waiting for a db connection, when processing this request response_db_sched_duration = Counter( - "synapse_http_request_response_db_sched_duration_seconds", + "synapse_http_server_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"], ) # size in bytes of the response written response_size = Counter( - "synapse_http_request_response_size", "", ["method", "servlet", "tag"] + "synapse_http_server_response_size", "", ["method", "servlet", "tag"] ) # In flight metrics are incremented while the requests are in flight, rather # than when the response was written. in_flight_requests_ru_utime = Counter( - "synapse_http_request_in_flight_requests_ru_utime_seconds", + "synapse_http_server_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"], ) in_flight_requests_ru_stime = Counter( - "synapse_http_request_in_flight_requests_ru_stime_seconds", + "synapse_http_server_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"], ) in_flight_requests_db_txn_count = Counter( - "synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"] + "synapse_http_server_in_flight_requests_db_txn_count", "", ["method", "servlet"] ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request in_flight_requests_db_txn_duration = Counter( - "synapse_http_request_in_flight_requests_db_txn_duration_seconds", + "synapse_http_server_in_flight_requests_db_txn_duration_seconds", "", ["method", "servlet"], ) # seconds spent waiting for a db connection, when processing this request in_flight_requests_db_sched_duration = Counter( - "synapse_http_request_in_flight_requests_db_sched_duration_seconds", + "synapse_http_server_in_flight_requests_db_sched_duration_seconds", "", ["method", "servlet"], ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index b0053e7f3f..da1cde417f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -37,9 +37,9 @@ rules_by_room = {} push_rules_invalidation_counter = Counter( - "synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") + "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", "") push_rules_state_size_counter = Counter( - "synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") + "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch -- cgit 1.4.1 From a2eb5db4a066148da5057b8ee844c7e4f5d888bc Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:10:27 +1000 Subject: update metrics to be in seconds --- synapse/http/request_metrics.py | 6 +++--- synapse/http/site.py | 20 ++++++++++---------- synapse/metrics/__init__.py | 19 ++++++++++--------- synapse/storage/_base.py | 30 +++++++++++++++--------------- 4 files changed, 38 insertions(+), 37 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index e7df494333..af3067b4bb 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -38,15 +38,15 @@ outgoing_responses_counter = Counter( ) response_timer = Histogram( - "synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_time_seconds", "sec", ["method", "servlet", "tag"] ) response_ru_utime = Counter( - "synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_ru_utime_seconds", "sec", ["method", "servlet", "tag"] ) response_ru_stime = Counter( - "synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_ru_stime_seconds", "sec", ["method", "servlet", "tag"] ) response_db_txn_count = Counter( diff --git a/synapse/http/site.py b/synapse/http/site.py index 23c1b76922..60299657b9 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -83,7 +83,7 @@ class SynapseRequest(Request): return Request.render(self, resrc) def _started_processing(self, servlet_name): - self.start_time = int(time.time() * 1000) + self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( self.start_time, name=servlet_name, method=self.method, @@ -102,26 +102,26 @@ class SynapseRequest(Request): context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count - db_txn_duration_ms = context.db_txn_duration_ms - db_sched_duration_ms = context.db_sched_duration_ms + db_txn_duration_sec = context.db_txn_duration_sec + db_sched_duration_sec = context.db_sched_duration_sec except Exception: ru_utime, ru_stime = (0, 0) - db_txn_count, db_txn_duration_ms = (0, 0) + db_txn_count, db_txn_duration_sec = (0, 0) - end_time = int(time.time() * 1000) + end_time = time.time() self.site.access_logger.info( "%s - %s - {%s}" - " Processed request: %dms (%dms, %dms) (%dms/%dms/%d)" + " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" " %sB %s \"%s %s %s\" \"%s\"", self.getClientIP(), self.site.site_tag, self.authenticated_entity, end_time - self.start_time, - int(ru_utime * 1000), - int(ru_stime * 1000), - db_sched_duration_ms, - db_txn_duration_ms, + ru_utime, + ru_stime, + db_sched_duration_sec, + db_txn_duration_sec, int(db_txn_count), self.sentLength, self.code, diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e33ed6c9be..0c557a43f6 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -122,10 +122,10 @@ REGISTRY.register(CPUMetrics()) gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) gc_time = Histogram( "python_gc_time", - "Time taken to GC (ms)", + "Time taken to GC (sec)", ["gen"], - buckets=[2.5, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 7500, 15000, - 30000, 45000, 60000], + buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50, + 5.00, 7.50, 15.00, 30.00, 45.00, 60.00], ) @@ -147,8 +147,9 @@ REGISTRY.register(GCCounts()) tick_time = Histogram( "python_twisted_reactor_tick_time", - "Tick time of the Twisted reactor (ms)", - buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000], + "Tick time of the Twisted reactor (sec)", + buckets=[0.001, 0.002, 0.005, 0.001, 0.005, 0.01. 0.025, 0.05, 0.1, 0.2, + 0.5, 1, 2, 5], ) pending_calls_metric = Histogram( "python_twisted_reactor_pending_calls", @@ -202,9 +203,9 @@ def runUntilCurrentTimer(func): num_pending += 1 num_pending += len(reactor.threadCallQueue) - start = time.time() * 1000 + start = time.time() ret = func(*args, **kwargs) - end = time.time() * 1000 + end = time.time() # record the amount of wallclock time spent running pending calls. # This is a proxy for the actual amount of time between reactor polls, @@ -225,9 +226,9 @@ def runUntilCurrentTimer(func): if threshold[i] < counts[i]: logger.info("Collecting gc %d", i) - start = time.time() * 1000 + start = time.time() unreachable = gc.collect(i) - end = time.time() * 1000 + end = time.time() gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d963af5c89..22d6257a9f 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -42,10 +42,10 @@ sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") perf_logger = logging.getLogger("synapse.storage.TIME") -sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "") +sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "sec") -sql_query_timer = Histogram("synapse_storage_query_time", "", ["verb"]) -sql_txn_timer = Histogram("synapse_storage_transaction_time", "", ["desc"]) +sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"]) +sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"]) class LoggingTransaction(object): @@ -110,7 +110,7 @@ class LoggingTransaction(object): # Don't let logging failures stop SQL from working pass - start = time.time() * 1000 + start = time.time() try: return func( @@ -120,9 +120,9 @@ class LoggingTransaction(object): logger.debug("[SQL FAIL] {%s} %s", self.name, e) raise finally: - msecs = (time.time() * 1000) - start - sql_logger.debug("[SQL time] {%s} %f", self.name, msecs) - sql_query_timer.labels(sql.split()[0]).observe(msecs) + secs = time.time() - start + sql_logger.debug("[SQL time] {%s} %f sec", self.name, secs) + sql_query_timer.labels(sql.split()[0]).observe(secs) class PerformanceCounters(object): @@ -132,7 +132,7 @@ class PerformanceCounters(object): def update(self, key, start_time, end_time=None): if end_time is None: - end_time = time.time() * 1000 + end_time = time.time() duration = end_time - start_time count, cum_time = self.current_counters.get(key, (0, 0)) count += 1 @@ -222,7 +222,7 @@ class SQLBaseStore(object): def _new_transaction(self, conn, desc, after_callbacks, exception_callbacks, logging_context, func, *args, **kwargs): - start = time.time() * 1000 + start = time.time() txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from @@ -282,13 +282,13 @@ class SQLBaseStore(object): logger.debug("[TXN FAIL] {%s} %s", name, e) raise finally: - end = time.time() * 1000 + end = time.time() duration = end - start if logging_context is not None: logging_context.add_database_transaction(duration) - transaction_logger.debug("[TXN END] {%s} %f", name, duration) + transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) @@ -349,13 +349,13 @@ class SQLBaseStore(object): """ current_context = LoggingContext.current_context() - start_time = time.time() * 1000 + start_time = time.time() def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection") as context: - sched_duration_ms = time.time() * 1000 - start_time - sql_scheduling_timer.observe(sched_duration_ms) - current_context.add_database_scheduled(sched_duration_ms) + sched_duration_sec = time.time() - start_time + sql_scheduling_timer.observe(sched_duration_sec) + current_context.add_database_scheduled(sched_duration_sec) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") -- cgit 1.4.1 From 3ef5cd74a6bff9b33144cb834782e0402e6eb152 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:39:27 +1000 Subject: update to more consistently use seconds in any metrics or logging --- synapse/http/request_metrics.py | 38 +++++++++++++++++++------------------- synapse/util/logcontext.py | 24 ++++++++++++------------ synapse/util/logutils.py | 6 +++--- synapse/util/metrics.py | 8 ++++---- 4 files changed, 38 insertions(+), 38 deletions(-) (limited to 'synapse/http/request_metrics.py') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index af3067b4bb..dc06f6c443 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -139,8 +139,8 @@ LaterGauge( class RequestMetrics(object): - def start(self, time_msec, name, method): - self.start = time_msec + def start(self, time_sec, name, method): + self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method @@ -149,7 +149,7 @@ class RequestMetrics(object): _in_flight_requests.add(self) - def stop(self, time_msec, request): + def stop(self, time_sec, request): _in_flight_requests.discard(self) context = LoggingContext.current_context() @@ -170,7 +170,7 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() response_timer.labels(request.method, self.name, tag).observe( - time_msec - self.start + time_sec - self.start ) ru_utime, ru_stime = context.get_resource_usage() @@ -181,10 +181,10 @@ class RequestMetrics(object): context.db_txn_count ) response_db_txn_duration.labels(request.method, self.name, tag).inc( - context.db_txn_duration_ms / 1000. + context.db_txn_duration_sec ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000. + context.db_sched_duration_sec ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -207,11 +207,11 @@ class RequestMetrics(object): ) in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( - diff.db_txn_duration_ms / 1000. + diff.db_txn_duration_sec ) in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( - diff.db_sched_duration_ms / 1000. + diff.db_sched_duration_sec ) @@ -223,18 +223,18 @@ class _RequestStats(object): "ru_utime", "ru_stime", "db_txn_count", - "db_txn_duration_ms", - "db_sched_duration_ms", + "db_txn_duration_sec", + "db_sched_duration_sec", ] def __init__( - self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_sec, db_sched_duration_sec ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count - self.db_txn_duration_ms = db_txn_duration_ms - self.db_sched_duration_ms = db_sched_duration_ms + self.db_txn_duration_sec = db_txn_duration_sec + self.db_sched_duration_sec = db_sched_duration_sec @staticmethod def from_context(context): @@ -243,8 +243,8 @@ class _RequestStats(object): return _RequestStats( ru_utime, ru_stime, context.db_txn_count, - context.db_txn_duration_ms, - context.db_sched_duration_ms, + context.db_txn_duration_sec, + context.db_sched_duration_sec, ) def update(self, context): @@ -260,14 +260,14 @@ class _RequestStats(object): new.ru_utime - self.ru_utime, new.ru_stime - self.ru_stime, new.db_txn_count - self.db_txn_count, - new.db_txn_duration_ms - self.db_txn_duration_ms, - new.db_sched_duration_ms - self.db_sched_duration_ms, + new.db_txn_duration_sec - self.db_txn_duration_sec, + new.db_sched_duration_sec - self.db_sched_duration_sec, ) self.ru_utime = new.ru_utime self.ru_stime = new.ru_stime self.db_txn_count = new.db_txn_count - self.db_txn_duration_ms = new.db_txn_duration_ms - self.db_sched_duration_ms = new.db_sched_duration_ms + self.db_txn_duration_sec = new.db_txn_duration_sec + self.db_sched_duration_sec = new.db_sched_duration_sec return diff diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 914f616312..a58c723403 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -59,7 +59,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", "usage_start", "main_thread", "alive", "request", "tag", @@ -84,10 +84,10 @@ class LoggingContext(object): def stop(self): pass - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): pass - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): pass def __nonzero__(self): @@ -103,11 +103,11 @@ class LoggingContext(object): self.ru_utime = 0. self.db_txn_count = 0 - # ms spent waiting for db txns, excluding scheduling time - self.db_txn_duration_ms = 0 + # sec spent waiting for db txns, excluding scheduling time + self.db_txn_duration_sec = 0 - # ms spent waiting for db txns to be scheduled - self.db_sched_duration_ms = 0 + # sec spent waiting for db txns to be scheduled + self.db_sched_duration_sec = 0 # If alive has the thread resource usage when the logcontext last # became active. @@ -230,18 +230,18 @@ class LoggingContext(object): return ru_utime, ru_stime - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): self.db_txn_count += 1 - self.db_txn_duration_ms += duration_ms + self.db_txn_duration_sec += duration_sec - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): """Record a use of the database pool Args: - sched_ms (int): number of milliseconds it took us to get a + sched_sec (float): number of seconds it took us to get a connection """ - self.db_sched_duration_ms += sched_ms + self.db_sched_duration_sec += sched_sec class LoggingContextFilter(logging.Filter): diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index 3a83828d25..03249c5dc8 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -96,7 +96,7 @@ def time_function(f): id = _TIME_FUNC_ID _TIME_FUNC_ID += 1 - start = time.clock() * 1000 + start = time.clock() try: _log_debug_as_f( @@ -107,10 +107,10 @@ def time_function(f): r = f(*args, **kwargs) finally: - end = time.clock() * 1000 + end = time.clock() _log_debug_as_f( f, - "[FUNC END] {%s-%d} %f", + "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start,), ) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 424fdcb036..23fc0ca20d 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -74,7 +74,7 @@ class Measure(object): self.created_context = False def __enter__(self): - self.start = self.clock.time_msec() + self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") @@ -90,7 +90,7 @@ class Measure(object): if isinstance(exc_type, Exception) or not self.start_context: return - duration = self.clock.time_msec() - self.start + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() block_timer.labels(self.name).inc(duration) @@ -114,9 +114,9 @@ class Measure(object): block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) block_db_txn_duration.labels(self.name).inc( - (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000.) + context.db_txn_duration_sec - self.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc( - (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000.) + context.db_sched_duration_sec - self.db_sched_duration_sec) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.4.1