From 926ba76e23ea9d55638baff541cdfaeb9e01ac47 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 15 Mar 2018 23:43:31 +0000 Subject: Replace ujson with simplejson --- synapse/http/server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 25466cd292..f1e9002e4d 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -36,7 +36,7 @@ from twisted.web.util import redirectTo import collections import logging import urllib -import ujson +import simplejson logger = logging.getLogger(__name__) @@ -370,8 +370,7 @@ def respond_with_json(request, code, json_object, send_cors=False, if canonical_json or synapse.events.USE_FROZEN_DICTS: json_bytes = encode_canonical_json(json_object) else: - # ujson doesn't like frozen_dicts. - json_bytes = ujson.dumps(json_object, ensure_ascii=False) + json_bytes = simplejson.dumps(json_object) return respond_with_json_bytes( request, code, json_bytes, -- cgit 1.5.1 From a8ce159be43560e9aea8f3be65110eea49d1f50e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 15 Mar 2018 23:38:43 +0000 Subject: Replace some ujson with simplejson to make it work --- synapse/http/server.py | 3 ++- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/storage/events.py | 2 +- synapse/storage/events_worker.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 4b567215c8..3c7a0ef97a 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -38,6 +38,7 @@ import collections import logging import urllib import ujson +import simplejson logger = logging.getLogger(__name__) @@ -462,7 +463,7 @@ def respond_with_json(request, code, json_object, send_cors=False, json_bytes = encode_canonical_json(json_object) else: # ujson doesn't like frozen_dicts. - json_bytes = ujson.dumps(json_object, ensure_ascii=False) + json_bytes = simplejson.dumps(json_object) return respond_with_json_bytes( request, code, json_bytes, diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a0a8e4b8e4..eb91c0b293 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -33,7 +33,7 @@ from ._base import set_timeline_upper_limit import itertools import logging -import ujson as json +import simplejson as json logger = logging.getLogger(__name__) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 3890878170..9fc65229fd 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -38,7 +38,7 @@ from functools import wraps import synapse.metrics import logging -import ujson as json +import simplejson as json # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 86c3b48ad4..2e23dd78ba 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -28,7 +28,7 @@ from synapse.api.errors import SynapseError from collections import namedtuple import logging -import ujson as json +import simplejson as json # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 -- cgit 1.5.1 From 1c41b05c8c98f0b9157c791b5b8ebf5f9fe85acf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Mar 2018 17:46:26 +0000 Subject: Add Cache-Control headers to all JSON APIs It is especially important that sync requests don't get cached, as if a sync returns the same token given then the client will call sync with the same parameters again. If the previous response was cached it will get reused, resulting in the client tight looping making the same request and never making any progress. In general, clients will expect to get up to date data when requesting APIs, and so its safer to do a blanket no cache policy than only whitelisting APIs that we know will break things if they get cached. --- synapse/http/server.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 1551db239d..f19c068ef6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -488,6 +488,7 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False, request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Server", version_string) request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) + request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") if send_cors: set_cors_headers(request) -- cgit 1.5.1 From 72251d1b979db0bc96e5d95ac70b8e1cd78cde7c Mon Sep 17 00:00:00 2001 From: Silke Date: Tue, 20 Mar 2018 10:40:16 +0100 Subject: Remove address resolution of hosts in SRV records Signed-off-by: Silke Hofstra --- synapse/http/endpoint.py | 103 ++++------------------------------------------- tests/test_dns.py | 29 +------------ 2 files changed, 10 insertions(+), 122 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 87639b9151..00572c2897 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import socket - from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet import defer, reactor from twisted.internet.error import ConnectError @@ -33,7 +31,7 @@ SERVER_CACHE = {} # our record of an individual server which can be tried to reach a destination. # -# "host" is actually a dotted-quad or ipv6 address string. Except when there's +# "host" is the hostname acquired from the SRV record. Except when there's # no SRV record, in which case it is the original hostname. _Server = collections.namedtuple( "_Server", "priority weight host port expires" @@ -297,20 +295,13 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t payload = answer.payload - hosts = yield _get_hosts_for_srv_record( - dns_client, str(payload.target) - ) - - for (ip, ttl) in hosts: - host_ttl = min(answer.ttl, ttl) - - servers.append(_Server( - host=ip, - port=int(payload.port), - priority=int(payload.priority), - weight=int(payload.weight), - expires=int(clock.time()) + host_ttl, - )) + servers.append(_Server( + host=str(payload.target), + port=int(payload.port), + priority=int(payload.priority), + weight=int(payload.weight), + expires=int(clock.time()) + answer.ttl, + )) servers.sort() cache[service_name] = list(servers) @@ -328,81 +319,3 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t raise e defer.returnValue(servers) - - -@defer.inlineCallbacks -def _get_hosts_for_srv_record(dns_client, host): - """Look up each of the hosts in a SRV record - - Args: - dns_client (twisted.names.dns.IResolver): - host (basestring): host to look up - - Returns: - Deferred[list[(str, int)]]: a list of (host, ttl) pairs - - """ - ip4_servers = [] - ip6_servers = [] - - def cb(res): - # lookupAddress and lookupIP6Address return a three-tuple - # giving the answer, authority, and additional sections of the - # response. - # - # we only care about the answers. - - return res[0] - - def eb(res, record_type): - if res.check(DNSNameError): - return [] - logger.warn("Error looking up %s for %s: %s", record_type, host, res) - return res - - # no logcontexts here, so we can safely fire these off and gatherResults - d1 = dns_client.lookupAddress(host).addCallbacks( - cb, eb, errbackArgs=("A", )) - d2 = dns_client.lookupIPV6Address(host).addCallbacks( - cb, eb, errbackArgs=("AAAA", )) - results = yield defer.DeferredList( - [d1, d2], consumeErrors=True) - - # if all of the lookups failed, raise an exception rather than blowing out - # the cache with an empty result. - if results and all(s == defer.FAILURE for (s, _) in results): - defer.returnValue(results[0][1]) - - for (success, result) in results: - if success == defer.FAILURE: - continue - - for answer in result: - if not answer.payload: - continue - - try: - if answer.type == dns.A: - ip = answer.payload.dottedQuad() - ip4_servers.append((ip, answer.ttl)) - elif answer.type == dns.AAAA: - ip = socket.inet_ntop( - socket.AF_INET6, answer.payload.address, - ) - ip6_servers.append((ip, answer.ttl)) - else: - # the most likely candidate here is a CNAME record. - # rfc2782 says srvs may not point to aliases. - logger.warn( - "Ignoring unexpected DNS record type %s for %s", - answer.type, host, - ) - continue - except Exception as e: - logger.warn("Ignoring invalid DNS response for %s: %s", - host, e) - continue - - # keep the ipv4 results before the ipv6 results, mostly to match historical - # behaviour. - defer.returnValue(ip4_servers + ip6_servers) diff --git a/tests/test_dns.py b/tests/test_dns.py index d08b0f4333..af607d626f 100644 --- a/tests/test_dns.py +++ b/tests/test_dns.py @@ -33,8 +33,6 @@ class DnsTestCase(unittest.TestCase): service_name = "test_service.example.com" host_name = "example.com" - ip_address = "127.0.0.1" - ip6_address = "::1" answer_srv = dns.RRHeader( type=dns.SRV, @@ -43,29 +41,9 @@ class DnsTestCase(unittest.TestCase): ) ) - answer_a = dns.RRHeader( - type=dns.A, - payload=dns.Record_A( - address=ip_address, - ) - ) - - answer_aaaa = dns.RRHeader( - type=dns.AAAA, - payload=dns.Record_AAAA( - address=ip6_address, - ) - ) - dns_client_mock.lookupService.return_value = defer.succeed( ([answer_srv], None, None), ) - dns_client_mock.lookupAddress.return_value = defer.succeed( - ([answer_a], None, None), - ) - dns_client_mock.lookupIPV6Address.return_value = defer.succeed( - ([answer_aaaa], None, None), - ) cache = {} @@ -74,13 +52,10 @@ class DnsTestCase(unittest.TestCase): ) dns_client_mock.lookupService.assert_called_once_with(service_name) - dns_client_mock.lookupAddress.assert_called_once_with(host_name) - dns_client_mock.lookupIPV6Address.assert_called_once_with(host_name) - self.assertEquals(len(servers), 2) + self.assertEquals(len(servers), 1) self.assertEquals(servers, cache[service_name]) - self.assertEquals(servers[0].host, ip_address) - self.assertEquals(servers[1].host, ip6_address) + self.assertEquals(servers[0].host, host_name) @defer.inlineCallbacks def test_from_cache_expired_and_dns_fail(self): -- cgit 1.5.1 From 616835187702a0c6f16042e3efb452e1ee3e7826 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Tue, 3 Apr 2018 20:41:21 +0200 Subject: Add b prefixes to some strings that are bytes in py3 This has no effect on python2 Signed-off-by: Adrian Tschira --- synapse/api/auth.py | 10 +++++----- synapse/app/frontend_proxy.py | 2 +- synapse/http/server.py | 4 ++-- synapse/http/site.py | 6 +++--- synapse/rest/client/v1/register.py | 4 ++-- tests/utils.py | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) (limited to 'synapse/http') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index ac0a3655a5..f17fda6315 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -204,8 +204,8 @@ class Auth(object): ip_addr = self.hs.get_ip_from_request(request) user_agent = request.requestHeaders.getRawHeaders( - "User-Agent", - default=[""] + b"User-Agent", + default=[b""] )[0] if user and access_token and ip_addr: self.store.insert_client_ip( @@ -672,7 +672,7 @@ def has_access_token(request): bool: False if no access_token was given, True otherwise. """ query_params = request.args.get("access_token") - auth_headers = request.requestHeaders.getRawHeaders("Authorization") + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") return bool(query_params) or bool(auth_headers) @@ -692,8 +692,8 @@ def get_access_token_from_request(request, token_not_found_http_status=401): AuthError: If there isn't an access_token in the request. """ - auth_headers = request.requestHeaders.getRawHeaders("Authorization") - query_params = request.args.get("access_token") + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + query_params = request.args.get(b"access_token") if auth_headers: # Try the get the access_token from a "Authorization: Bearer" # header diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index de889357c3..b349e3e3ce 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -90,7 +90,7 @@ class KeyUploadServlet(RestServlet): # They're actually trying to upload something, proxy to main synapse. # Pass through the auth headers, if any, in case the access token # is there. - auth_headers = request.requestHeaders.getRawHeaders("Authorization", []) + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", []) headers = { "Authorization": auth_headers, } diff --git a/synapse/http/server.py b/synapse/http/server.py index f19c068ef6..d979e76639 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -324,7 +324,7 @@ class JsonResource(HttpServer, resource.Resource): register_paths, so will return (possibly via Deferred) either None, or a tuple of (http code, response body). """ - if request.method == "OPTIONS": + if request.method == b"OPTIONS": return _options_handler, {} # Loop through all the registered callbacks to check if the method @@ -536,7 +536,7 @@ def finish_request(request): def _request_user_agent_is_curl(request): user_agents = request.requestHeaders.getRawHeaders( - "User-Agent", default=[] + b"User-Agent", default=[] ) for user_agent in user_agents: if "curl" in user_agent: diff --git a/synapse/http/site.py b/synapse/http/site.py index e422c8dfae..c8b46e1af2 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -20,7 +20,7 @@ import logging import re import time -ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') class SynapseRequest(Request): @@ -43,12 +43,12 @@ class SynapseRequest(Request): def get_redacted_uri(self): return ACCESS_TOKEN_RE.sub( - r'\1\3', + br'\1\3', self.uri ) def get_user_agent(self): - return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1] + return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] def started_processing(self): self.site.access_logger.info( diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 5c5fa8f7ab..8a82097178 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -348,9 +348,9 @@ class RegisterRestServlet(ClientV1RestServlet): admin = register_json.get("admin", None) # Its important to check as we use null bytes as HMAC field separators - if "\x00" in user: + if b"\x00" in user: raise SynapseError(400, "Invalid user") - if "\x00" in password: + if b"\x00" in password: raise SynapseError(400, "Invalid password") # str() because otherwise hmac complains that 'unicode' does not diff --git a/tests/utils.py b/tests/utils.py index 8efd3a3475..f15317d27b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -212,7 +212,7 @@ class MockHttpResource(HttpServer): headers = {} if federation_auth: - headers["Authorization"] = ["X-Matrix origin=test,key=,sig="] + headers[b"Authorization"] = ["X-Matrix origin=test,key=,sig="] mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) # return the right path if the event requires it -- cgit 1.5.1 From 518f6de0881378b1fa356e21256436491d43c93c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 4 Apr 2018 19:46:28 +0100 Subject: Remove redundant metrics which were deprecated in 0.27.0. --- CHANGES.rst | 9 +++++++++ UPGRADE.rst | 9 ++++++++- docs/metrics-howto.rst | 11 +++++++++++ synapse/http/server.py | 26 -------------------------- synapse/util/metrics.py | 25 ------------------------- 5 files changed, 28 insertions(+), 52 deletions(-) (limited to 'synapse/http') diff --git a/CHANGES.rst b/CHANGES.rst index 38372381ac..5fbad54427 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.28.0 (2018-xx-xx) +======================================= + +As previously advised, this release removes a number of redundant Prometheus +metrics. Administrators may need to update their dashboards and alerting rules +to use the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..39a16b1c0c 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,6 +60,13 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. +This release also removes a number of redundant Prometheus metrics. +Administrators may need to update their dashboards and alerting rules to use +the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + + Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 8acc479bc3..5e2d7c52ec 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,6 +34,17 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. +Deprecated metrics removed in 0.28.0 +------------------------------------ + +Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those +listed under "Old name" below. This has been done to reduce the bandwidth used +by gathering metrics and the storage requirements for the Prometheus server, as +well as reducing CPU overhead for both Synapse and Prometheus. + +Administrators should update any alerts or monitoring dashboards to use the +"New name" listed below. + Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index f19c068ef6..02c7e46f08 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,17 +47,6 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) ) requests_counter = metrics.register_counter( @@ -73,39 +62,24 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..c3d8237e8f 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,53 +31,28 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, in this block -- cgit 1.5.1 From 1d71f484d4ec00fd41e3ef195622d0d5dba6d372 Mon Sep 17 00:00:00 2001 From: Krombel Date: Fri, 6 Apr 2018 12:54:09 +0200 Subject: use PUT instead of POST for federating groups/m.join_policy --- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/http/matrixfederationclient.py | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'synapse/http') diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1fe162d55b..3beab47832 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -864,7 +864,7 @@ class TransportLayerClient(object): """ path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) - return self.client.post_json( + return self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3658ca75f3..b98e30459c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1131,7 +1131,7 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/settings/m.join_policy$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): + def on_PUT(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 9145405cb0..60a29081e8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -286,7 +286,8 @@ class MatrixFederationHttpClient(object): headers_dict[b"Authorization"] = auth_headers @defer.inlineCallbacks - def put_json(self, destination, path, data={}, json_data_callback=None, + def put_json(self, destination, path, args={}, data={}, + json_data_callback=None, long_retries=False, timeout=None, ignore_backoff=False, backoff_on_404=False): @@ -296,6 +297,7 @@ class MatrixFederationHttpClient(object): destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. + args (dict): query params data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to @@ -342,6 +344,7 @@ class MatrixFederationHttpClient(object): path, body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, + query_bytes=encode_query_args(args), long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -373,6 +376,7 @@ class MatrixFederationHttpClient(object): giving up. None indicates no timeout. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. -- cgit 1.5.1 From 7b824f147590c6519ca8889e6d406928de7f1e6e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Apr 2018 13:20:05 +0100 Subject: Add response size metrics --- synapse/http/server.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 02c7e46f08..ac75206ef5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,6 +87,11 @@ response_db_sched_duration = metrics.register_counter( "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] ) +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + _next_request_id = 0 @@ -400,6 +405,8 @@ class RequestMetrics(object): context.db_sched_duration_ms / 1000., request.method, self.name, tag ) + response_size.inc_by(request.sentLength, request.method, self.name, tag) + class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" -- cgit 1.5.1 From 13decdbf96981782616a3ee1826fce1213a1bc89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Apr 2018 12:58:37 +0100 Subject: Revert "Merge pull request #3066 from matrix-org/rav/remove_redundant_metrics" We aren't ready to release this yet, so I'm reverting it for now. This reverts commit d1679a4ed7947b0814e0f2af9b888a16c588f1a1, reversing changes made to e089100c6231541c446e37e157dec8feed02d283. --- CHANGES.rst | 9 --------- UPGRADE.rst | 9 +-------- docs/metrics-howto.rst | 11 ----------- synapse/http/server.py | 26 ++++++++++++++++++++++++++ synapse/util/metrics.py | 25 +++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 28 deletions(-) (limited to 'synapse/http') diff --git a/CHANGES.rst b/CHANGES.rst index 5fbad54427..38372381ac 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,12 +1,3 @@ -Changes in synapse v0.28.0 (2018-xx-xx) -======================================= - -As previously advised, this release removes a number of redundant Prometheus -metrics. Administrators may need to update their dashboards and alerting rules -to use the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index 39a16b1c0c..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,13 +60,6 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. -This release also removes a number of redundant Prometheus metrics. -Administrators may need to update their dashboards and alerting rules to use -the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - - Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 5e2d7c52ec..8acc479bc3 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,17 +34,6 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. -Deprecated metrics removed in 0.28.0 ------------------------------------- - -Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those -listed under "Old name" below. This has been done to reduce the bandwidth used -by gathering metrics and the storage requirements for the Prometheus server, as -well as reducing CPU overhead for both Synapse and Prometheus. - -Administrators should update any alerts or monitoring dashboards to use the -"New name" listed below. - Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index ac75206ef5..64e083ebfc 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,6 +47,17 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) ) requests_counter = metrics.register_counter( @@ -62,24 +73,39 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c3d8237e8f..e4b5687a4b 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,28 +31,53 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_block_timer:count", + "_block_ru_utime:count", + "_block_ru_stime:count", + "_block_db_txn_count:count", + "_block_db_txn_duration:count", + ) + ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_timer:total", + ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_utime:total", + ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_stime:total", + ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, in this block -- cgit 1.5.1 From 2a3c33ff03aa88317c30da43cd3773c2789f0fcf Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 17:15:16 +0200 Subject: Use six.moves.urlparse The imports were shuffled around a bunch in py3 Signed-off-by: Adrian Tschira --- synapse/config/appservice.py | 4 ++-- synapse/http/matrixfederationclient.py | 3 +-- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/room.py | 9 +++++---- synapse/rest/media/v1/_base.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- tests/rest/client/v1/test_rooms.py | 14 +++++++------- tests/utils.py | 5 ++--- 8 files changed, 20 insertions(+), 21 deletions(-) (limited to 'synapse/http') diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 9a2359b6fd..277305e184 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -17,11 +17,11 @@ from ._base import Config, ConfigError from synapse.appservice import ApplicationService from synapse.types import UserID -import urllib import yaml import logging from six import string_types +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) @@ -105,7 +105,7 @@ def _load_appservice(hostname, as_info, config_filename): ) localpart = as_info["sender_localpart"] - if urllib.quote(localpart) != localpart: + if urlparse.quote(localpart) != localpart: raise ValueError( "sender_localpart needs characters which are not URL encoded." ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 60a29081e8..c2e5610f50 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -38,8 +38,7 @@ import logging import random import sys import urllib -import urlparse - +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 45844aa2d2..34df5be4e9 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -25,7 +25,7 @@ from .base import ClientV1RestServlet, client_path_patterns import simplejson as json import urllib -import urlparse +from six.moves.urllib import parse as urlparse import logging from saml2 import BINDING_HTTP_POST diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 2ad0e5943b..fcf9c9ab44 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -28,8 +28,9 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, parse_integer ) +from six.moves.urllib import parse as urlparse + import logging -import urllib import simplejson as json logger = logging.getLogger(__name__) @@ -433,7 +434,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet): as_client_event = "raw" not in request.args filter_bytes = request.args.get("filter", None) if filter_bytes: - filter_json = urllib.unquote(filter_bytes[-1]).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes[-1]).decode("UTF-8") event_filter = Filter(json.loads(filter_json)) else: event_filter = None @@ -718,8 +719,8 @@ class RoomTypingRestServlet(ClientV1RestServlet): def on_PUT(self, request, room_id, user_id): requester = yield self.auth.get_user_by_req(request) - room_id = urllib.unquote(room_id) - target_user = UserID.from_string(urllib.unquote(user_id)) + room_id = urlparse.unquote(room_id) + target_user = UserID.from_string(urlparse.unquote(user_id)) content = parse_json_object_from_request(request) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index e7ac01da01..d9c4af9389 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -28,7 +28,7 @@ import os import logging import urllib -import urlparse +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index bb79599379..9800ce7581 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -47,7 +47,7 @@ import shutil import cgi import logging -import urlparse +from six.moves.urllib import parse as urlparse logger = logging.getLogger(__name__) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 7e8966a1a8..d763400eaf 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -24,7 +24,7 @@ from synapse.api.constants import Membership from synapse.types import UserID import json -import urllib +from six.moves.urllib import parse as urlparse from ....utils import MockHttpResource, setup_test_homeserver from .utils import RestTestCase @@ -766,7 +766,7 @@ class RoomMemberStateTestCase(RestTestCase): @defer.inlineCallbacks def test_rooms_members_self(self): path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.user_id + urlparse.quote(self.room_id), self.user_id ) # valid join message (NOOP since we made the room) @@ -786,7 +786,7 @@ class RoomMemberStateTestCase(RestTestCase): def test_rooms_members_other(self): self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.other_id + urlparse.quote(self.room_id), self.other_id ) # valid invite message @@ -802,7 +802,7 @@ class RoomMemberStateTestCase(RestTestCase): def test_rooms_members_other_custom_keys(self): self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( - urllib.quote(self.room_id), self.other_id + urlparse.quote(self.room_id), self.other_id ) # valid invite message with custom key @@ -859,7 +859,7 @@ class RoomMessagesTestCase(RestTestCase): @defer.inlineCallbacks def test_invalid_puts(self): path = "/rooms/%s/send/m.room.message/mid1" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) # missing keys or invalid json (code, response) = yield self.mock_resource.trigger( "PUT", path, '{}' @@ -894,7 +894,7 @@ class RoomMessagesTestCase(RestTestCase): @defer.inlineCallbacks def test_rooms_messages_sent(self): path = "/rooms/%s/send/m.room.message/mid1" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) content = '{"body":"test","msgtype":{"type":"a"}}' (code, response) = yield self.mock_resource.trigger("PUT", path, content) @@ -911,7 +911,7 @@ class RoomMessagesTestCase(RestTestCase): # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % ( - urllib.quote(self.room_id)) + urlparse.quote(self.room_id)) content = '{"body":"test2","msgtype":"m.text"}' (code, response) = yield self.mock_resource.trigger("PUT", path, content) self.assertEquals(200, code, msg=str(response)) diff --git a/tests/utils.py b/tests/utils.py index f15317d27b..9e815d8643 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,8 +15,7 @@ import hashlib from inspect import getcallargs -import urllib -import urlparse +from six.moves.urllib import parse as urlparse from mock import Mock, patch from twisted.internet import defer, reactor @@ -234,7 +233,7 @@ class MockHttpResource(HttpServer): if matcher: try: args = [ - urllib.unquote(u).decode("UTF-8") + urlparse.unquote(u).decode("UTF-8") for u in matcher.groups() ] -- cgit 1.5.1 From 1ea904b9f09ea6a9df7792f61029d0250602d46b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Apr 2018 00:53:18 +0100 Subject: Use deferred.addTimeout instead of time_bound_deferred This doesn't feel like a wheel we need to reinvent. --- synapse/http/__init__.py | 22 +++++++++++++ synapse/http/client.py | 20 +++++------- synapse/http/matrixfederationclient.py | 35 ++++++++++----------- synapse/notifier.py | 23 +++++++------- synapse/util/__init__.py | 56 ---------------------------------- tests/util/test_clock.py | 33 -------------------- 6 files changed, 59 insertions(+), 130 deletions(-) delete mode 100644 tests/util/test_clock.py (limited to 'synapse/http') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index bfebb0f644..20e568bc43 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3 +13,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet.defer import CancelledError +from twisted.python import failure + +from synapse.api.errors import SynapseError + + +class RequestTimedOutError(SynapseError): + """Exception representing timeout of an outbound request""" + def __init__(self): + super(RequestTimedOutError, self).__init__(504, "Timed out") + + +def cancelled_to_request_timed_out_error(value): + """Turns CancelledErrors into RequestTimedOutErrors. + + For use with deferred.addTimeout() + """ + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise RequestTimedOutError() + return value diff --git a/synapse/http/client.py b/synapse/http/client.py index f3e4973c2e..35c8d51e71 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,9 +19,9 @@ from OpenSSL.SSL import VERIFY_NONE from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) +from synapse.http import cancelled_to_request_timed_out_error from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable -from synapse.util import logcontext import synapse.metrics from synapse.http.endpoint import SpiderEndpoint @@ -95,21 +96,16 @@ class SimpleHttpClient(object): # counters to it outgoing_requests_counter.inc(method) - def send_request(): + logger.info("Sending request %s %s", method, uri) + + try: request_deferred = self.agent.request( method, uri, *args, **kwargs ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=60, + request_deferred.addTimeout( + 60, reactor, cancelled_to_request_timed_out_error, ) - - logger.info("Sending request %s %s", method, uri) - - try: - with logcontext.PreserveLoggingContext(): - response = yield send_request() + response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.inc(method, response.code) logger.info( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 60a29081e8..fe4b1636a6 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +13,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.util.retryutils from twisted.internet import defer, reactor, protocol from twisted.internet.error import DNSLookupError from twisted.web.client import readBody, HTTPConnectionPool, Agent from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone +from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint +import synapse.metrics from synapse.util.async import sleep from synapse.util import logcontext -import synapse.metrics +from synapse.util.logcontext import make_deferred_yieldable +import synapse.util.retryutils from canonicaljson import encode_canonical_json @@ -184,21 +187,19 @@ class MatrixFederationHttpClient(object): producer = body_callback(method, http_url_bytes, headers_dict) try: - def send_request(): - request_deferred = self.agent.request( - method, - url_bytes, - Headers(headers_dict), - producer - ) - - return self.clock.time_bound_deferred( - request_deferred, - time_out=timeout / 1000. if timeout else 60, - ) - - with logcontext.PreserveLoggingContext(): - response = yield send_request() + request_deferred = self.agent.request( + method, + url_bytes, + Headers(headers_dict), + producer + ) + request_deferred.addTimeout( + timeout / 1000. if timeout else 60, + reactor, cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable( + request_deferred, + ) log_result = "%d %s" % (response.code, response.phrase,) break diff --git a/synapse/notifier.py b/synapse/notifier.py index 0e40a4aad6..1e4f78b993 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from twisted.internet import defer, reactor +from twisted.internet.defer import TimeoutError + from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state -from synapse.util import DeferredTimedOutError from synapse.util.logutils import log_function from synapse.util.async import ObservableDeferred from synapse.util.logcontext import PreserveLoggingContext, preserve_fn @@ -331,11 +332,11 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) + listener.deferred.addTimeout( + (end_time - now) / 1000., reactor, + ) with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) + yield listener.deferred current_token = user_stream.current_token @@ -346,7 +347,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimedOutError: + except TimeoutError: break except defer.CancelledError: break @@ -551,13 +552,11 @@ class Notifier(object): if end_time <= now: break + listener.deferred.addTimeout((end_time - now) / 1000., reactor) try: with PreserveLoggingContext(): - yield self.clock.time_bound_deferred( - listener.deferred, - time_out=(end_time - now) / 1000. - ) - except DeferredTimedOutError: + yield listener.deferred + except TimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 756d8ffa32..814a7bf71b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError from synapse.util.logcontext import PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -24,11 +23,6 @@ import logging logger = logging.getLogger(__name__) -class DeferredTimedOutError(SynapseError): - def __init__(self): - super(DeferredTimedOutError, self).__init__(504, "Timed out") - - def unwrapFirstError(failure): # defer.gatherResults and DeferredLists wrap failures. failure.trap(defer.FirstError) @@ -85,53 +79,3 @@ class Clock(object): except Exception: if not ignore_errs: raise - - def time_bound_deferred(self, given_deferred, time_out): - if given_deferred.called: - return given_deferred - - ret_deferred = defer.Deferred() - - def timed_out_fn(): - e = DeferredTimedOutError() - - try: - ret_deferred.errback(e) - except Exception: - pass - - try: - given_deferred.cancel() - except Exception: - pass - - timer = None - - def cancel(res): - try: - self.cancel_call_later(timer) - except Exception: - pass - return res - - ret_deferred.addBoth(cancel) - - def success(res): - try: - ret_deferred.callback(res) - except Exception: - pass - - return res - - def err(res): - try: - ret_deferred.errback(res) - except Exception: - pass - - given_deferred.addCallbacks(callback=success, errback=err) - - timer = self.call_later(time_out, timed_out_fn) - - return ret_deferred diff --git a/tests/util/test_clock.py b/tests/util/test_clock.py deleted file mode 100644 index 9672603579..0000000000 --- a/tests/util/test_clock.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from synapse import util -from twisted.internet import defer -from tests import unittest - - -class ClockTestCase(unittest.TestCase): - @defer.inlineCallbacks - def test_time_bound_deferred(self): - # just a deferred which never resolves - slow_deferred = defer.Deferred() - - clock = util.Clock() - time_bound = clock.time_bound_deferred(slow_deferred, 0.001) - - try: - yield time_bound - self.fail("Expected timedout error, but got nothing") - except util.DeferredTimedOutError: - pass -- cgit 1.5.1 From 9d2c1b8429996cb9766ac636034485e2a1d685cc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Apr 2018 12:52:30 +0100 Subject: Backport deferred.addTimeout Twisted 16.0 doesn't have addTimeout, so let's backport it. --- synapse/http/__init__.py | 2 +- synapse/http/client.py | 6 ++- synapse/http/matrixfederationclient.py | 7 ++-- synapse/notifier.py | 22 +++++++---- synapse/util/async.py | 67 ++++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 14 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 20e568bc43..0d47ccdb59 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -28,7 +28,7 @@ class RequestTimedOutError(SynapseError): def cancelled_to_request_timed_out_error(value): """Turns CancelledErrors into RequestTimedOutErrors. - For use with deferred.addTimeout() + For use with async.add_timeout_to_deferred """ if isinstance(value, failure.Failure): value.trap(CancelledError) diff --git a/synapse/http/client.py b/synapse/http/client.py index 35c8d51e71..62309c3365 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -20,6 +20,7 @@ from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) from synapse.http import cancelled_to_request_timed_out_error +from synapse.util.async import add_timeout_to_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable import synapse.metrics @@ -102,8 +103,9 @@ class SimpleHttpClient(object): request_deferred = self.agent.request( method, uri, *args, **kwargs ) - request_deferred.addTimeout( - 60, reactor, cancelled_to_request_timed_out_error, + add_timeout_to_deferred( + request_deferred, + 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index fe4b1636a6..30036fe81c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -22,7 +22,7 @@ from twisted.web._newclient import ResponseDone from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint import synapse.metrics -from synapse.util.async import sleep +from synapse.util.async import sleep, add_timeout_to_deferred from synapse.util import logcontext from synapse.util.logcontext import make_deferred_yieldable import synapse.util.retryutils @@ -193,9 +193,10 @@ class MatrixFederationHttpClient(object): Headers(headers_dict), producer ) - request_deferred.addTimeout( + add_timeout_to_deferred( + request_deferred, timeout / 1000. if timeout else 60, - reactor, cancelled_to_request_timed_out_error, + cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, diff --git a/synapse/notifier.py b/synapse/notifier.py index 1e4f78b993..a1c06e8fca 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer, reactor -from twisted.internet.defer import TimeoutError +from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.util.logutils import log_function -from synapse.util.async import ObservableDeferred +from synapse.util.async import ( + ObservableDeferred, add_timeout_to_deferred, + DeferredTimeoutError, +) from synapse.util.logcontext import PreserveLoggingContext, preserve_fn from synapse.util.metrics import Measure from synapse.types import StreamToken @@ -332,8 +334,9 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - listener.deferred.addTimeout( - (end_time - now) / 1000., reactor, + add_timeout_to_deferred( + listener.deferred, + (end_time - now) / 1000., ) with PreserveLoggingContext(): yield listener.deferred @@ -347,7 +350,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break @@ -552,11 +555,14 @@ class Notifier(object): if end_time <= now: break - listener.deferred.addTimeout((end_time - now) / 1000., reactor) + add_timeout_to_deferred( + listener.deferred.addTimeout, + (end_time - now) / 1000., + ) try: with PreserveLoggingContext(): yield listener.deferred - except TimeoutError: + except DeferredTimeoutError: break except defer.CancelledError: break diff --git a/synapse/util/async.py b/synapse/util/async.py index 0729bb2863..1df5c5600c 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -15,6 +15,8 @@ from twisted.internet import defer, reactor +from twisted.internet.defer import CancelledError +from twisted.python import failure from .logcontext import ( PreserveLoggingContext, make_deferred_yieldable, preserve_fn @@ -392,3 +394,68 @@ class ReadWriteLock(object): self.key_to_current_writer.pop(key) defer.returnValue(_ctx_manager()) + + +class DeferredTimeoutError(Exception): + """ + This error is raised by default when a L{Deferred} times out. + """ + + +def add_timeout_to_deferred(deferred, timeout, on_timeout_cancel=None): + """ + Add a timeout to a deferred by scheduling it to be cancelled after + timeout seconds. + + This is essentially a backport of deferred.addTimeout, which was introduced + in twisted 16.5. + + If the deferred gets timed out, it errbacks with a DeferredTimeoutError, + unless a cancelable function was passed to its initialization or unless + a different on_timeout_cancel callable is provided. + + Args: + deferred (defer.Deferred): deferred to be timed out + timeout (Number): seconds to time out after + + on_timeout_cancel (callable): A callable which is called immediately + after the deferred times out, and not if this deferred is + otherwise cancelled before the timeout. + + It takes an arbitrary value, which is the value of the deferred at + that exact point in time (probably a CancelledError Failure), and + the timeout. + + The default callable (if none is provided) will translate a + CancelledError Failure into a DeferredTimeoutError. + """ + timed_out = [False] + + def time_it_out(): + timed_out[0] = True + deferred.cancel() + + delayed_call = reactor.callLater(timeout, time_it_out) + + def convert_cancelled(value): + if timed_out[0]: + to_call = on_timeout_cancel or _cancelled_to_timed_out_error + return to_call(value, timeout) + return value + + deferred.addBoth(convert_cancelled) + + def cancel_timeout(result): + # stop the pending call to cancel the deferred if it's been fired + if delayed_call.active(): + delayed_call.cancel() + return result + + deferred.addBoth(cancel_timeout) + + +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise DeferredTimeoutError(timeout, "Deferred") + return value -- cgit 1.5.1 From 4f2f5171b752169e265b5d93c9b4e788171b5326 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 11:39:37 +0200 Subject: replace stringIO imports --- synapse/http/client.py | 4 ++-- synapse/util/logformatter.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/client.py b/synapse/http/client.py index 62309c3365..70a19d9b74 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -40,7 +40,7 @@ from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone -from StringIO import StringIO +from six import StringIO import simplejson as json import logging @@ -507,7 +507,7 @@ class SpiderHttpClient(SimpleHttpClient): reactor, SpiderEndpointFactory(hs) ) - ), [('gzip', GzipDecoder)] + ), [(b'gzip', GzipDecoder)] ) # We could look like Chrome: # self.user_agent = ("Mozilla/5.0 (%s) (KHTML, like Gecko) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index cdbc4bffd7..59ab3c6968 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -14,7 +14,7 @@ # limitations under the License. -import StringIO +from six import StringIO import logging import traceback -- cgit 1.5.1 From e9143b659352e87fd9e26c8e5a771c78011bc945 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sat, 28 Apr 2018 23:56:59 +0200 Subject: more bytes strings Signed-off-by: Adrian Tschira --- synapse/http/endpoint.py | 2 +- synapse/http/server.py | 2 +- synapse/rest/media/v1/upload_resource.py | 6 +++--- synapse/util/httpresourcetree.py | 7 +++++-- 4 files changed, 10 insertions(+), 7 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 00572c2897..db455e5909 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -286,7 +286,7 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t if (len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload - and answers[0].payload.target == dns.Name('.')): + and answers[0].payload.target == dns.Name(b'.')): raise ConnectError("Service %s unavailable" % service_name) for answer in answers: diff --git a/synapse/http/server.py b/synapse/http/server.py index 8d632290de..55b9ad5251 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -546,6 +546,6 @@ def _request_user_agent_is_curl(request): b"User-Agent", default=[] ) for user_agent in user_agents: - if "curl" in user_agent: + if b"curl" in user_agent: return True return False diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index f6f498cdc5..a31e75cb46 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -81,15 +81,15 @@ class UploadResource(Resource): headers = request.requestHeaders if headers.hasHeader("Content-Type"): - media_type = headers.getRawHeaders("Content-Type")[0] + media_type = headers.getRawHeaders(b"Content-Type")[0] else: raise SynapseError( msg="Upload request missing 'Content-Type'", code=400, ) - # if headers.hasHeader("Content-Disposition"): - # disposition = headers.getRawHeaders("Content-Disposition")[0] + # if headers.hasHeader(b"Content-Disposition"): + # disposition = headers.getRawHeaders(b"Content-Disposition")[0] # TODO(markjh): parse content-dispostion content_uri = yield self.media_repo.create_content( diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index d747849553..e9f0f292ee 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -40,9 +40,12 @@ def create_resource_tree(desired_tree, root_resource): # extra resources to existing nodes. See self._resource_id for the key. resource_mappings = {} for full_path, res in desired_tree.items(): + # twisted requires all resources to be bytes + full_path = full_path.encode("utf-8") + logger.info("Attaching %s to path %s", res, full_path) last_resource = root_resource - for path_seg in full_path.split('/')[1:-1]: + for path_seg in full_path.split(b'/')[1:-1]: if path_seg not in last_resource.listNames(): # resource doesn't exist, so make a "dummy resource" child_resource = NoResource() @@ -57,7 +60,7 @@ def create_resource_tree(desired_tree, root_resource): # =========================== # now attach the actual desired resource - last_path_seg = full_path.split('/')[-1] + last_path_seg = full_path.split(b'/')[-1] # if there is already a resource here, thieve its children and # replace it -- cgit 1.5.1 From 1dfd650348251508ac044b3db114092b1e857a74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 May 2018 22:42:36 +0100 Subject: add missing param to cancelled_to_request_timed_out_error This gets two arguments, not one. --- synapse/http/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/http') diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 0d47ccdb59..054372e179 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -25,7 +25,7 @@ class RequestTimedOutError(SynapseError): super(RequestTimedOutError, self).__init__(504, "Timed out") -def cancelled_to_request_timed_out_error(value): +def cancelled_to_request_timed_out_error(value, timeout): """Turns CancelledErrors into RequestTimedOutErrors. For use with async.add_timeout_to_deferred -- cgit 1.5.1 From 2e7a94c36ba75a94114e4a9fb1aa6d4e3444e8b4 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 3 May 2018 12:31:47 +0100 Subject: Don't abortConnection() if the transport connection has already closed. --- synapse/http/endpoint.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index d8923c9abb..564ae4c10d 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -113,10 +113,15 @@ class _WrappedConnection(object): if time.time() - self.last_request >= 2.5 * 60: self.abort() # Abort the underlying TLS connection. The abort() method calls - # loseConnection() on the underlying TLS connection which tries to + # loseConnection() on the TLS connection which tries to # shutdown the connection cleanly. We call abortConnection() - # since that will promptly close the underlying TCP connection. - self.transport.abortConnection() + # since that will promptly close the TLS connection. + # + # In Twisted >18.4; the TLS connection will be None if it has closed + # which will make abortConnection() throw. Check that the TLS connection + # is not None before trying to close it. + if self.transport.getHandle() is not None: + self.transport.abortConnection() def request(self, request): self.last_request = time.time() -- cgit 1.5.1 From 18e144fe088bda9e28697062cda62dca1e03724b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 19:55:03 +0100 Subject: Move RequestsMetrics to its own file This is useful in its own right, because server.py is full of stuff; but more importantly, I want to do some refactoring that will cause a circular reference as it is. --- synapse/http/request_metrics.py | 147 ++++++++++++++++++++++++++++++++++++++++ synapse/http/server.py | 128 ++-------------------------------- 2 files changed, 151 insertions(+), 124 deletions(-) create mode 100644 synapse/http/request_metrics.py (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py new file mode 100644 index 0000000000..4a843a36a7 --- /dev/null +++ b/synapse/http/request_metrics.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import synapse.metrics +from synapse.util.logcontext import LoggingContext + +logger = logging.getLogger(__name__) + +metrics = synapse.metrics.get_metrics_for("synapse.http.server") + +# total number of responses served, split by method/servlet/tag +response_count = metrics.register_counter( + "response_count", + labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) +) + +requests_counter = metrics.register_counter( + "requests_received", + labels=["method", "servlet", ], +) + +outgoing_responses_counter = metrics.register_counter( + "responses", + labels=["method", "code"], +) + +response_timer = metrics.register_counter( + "response_time_seconds", + labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), +) + +response_ru_utime = metrics.register_counter( + "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), +) + +response_ru_stime = metrics.register_counter( + "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), +) + +response_db_txn_count = metrics.register_counter( + "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), +) + +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request +response_db_txn_duration = metrics.register_counter( + "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), +) + +# seconds spent waiting for a db connection, when processing this request +response_db_sched_duration = metrics.register_counter( + "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +) + +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + + +class RequestMetrics(object): + def start(self, clock, name): + self.start = clock.time_msec() + self.start_context = LoggingContext.current_context() + self.name = name + + def stop(self, clock, request): + context = LoggingContext.current_context() + + tag = "" + if context: + tag = context.tag + + if context != self.start_context: + logger.warn( + "Context have unexpectedly changed %r, %r", + context, self.start_context + ) + return + + response_count.inc(request.method, self.name, tag) + + response_timer.inc_by( + clock.time_msec() - self.start, request.method, + self.name, tag + ) + + ru_utime, ru_stime = context.get_resource_usage() + + response_ru_utime.inc_by( + ru_utime, request.method, self.name, tag + ) + response_ru_stime.inc_by( + ru_stime, request.method, self.name, tag + ) + response_db_txn_count.inc_by( + context.db_txn_count, request.method, self.name, tag + ) + response_db_txn_duration.inc_by( + context.db_txn_duration_ms / 1000., request.method, self.name, tag + ) + response_db_sched_duration.inc_by( + context.db_sched_duration_ms / 1000., request.method, self.name, tag + ) + + response_size.inc_by(request.sentLength, request.method, self.name, tag) diff --git a/synapse/http/server.py b/synapse/http/server.py index 55b9ad5251..37b26b908e 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -18,6 +18,10 @@ from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes ) +from synapse.http.request_metrics import ( + RequestMetrics, requests_counter, + outgoing_responses_counter, +) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches import intern_dict from synapse.util.metrics import Measure @@ -41,82 +45,6 @@ import simplejson logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -# total number of responses served, split by method/servlet/tag -response_count = metrics.register_counter( - "response_count", - labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) -) - -requests_counter = metrics.register_counter( - "requests_received", - labels=["method", "servlet", ], -) - -outgoing_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) - -response_timer = metrics.register_counter( - "response_time_seconds", - labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), -) - -response_ru_utime = metrics.register_counter( - "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), -) - -response_ru_stime = metrics.register_counter( - "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), -) - -response_db_txn_count = metrics.register_counter( - "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), -) - -# seconds spent waiting for db txns, excluding scheduling time, when processing -# this request -response_db_txn_duration = metrics.register_counter( - "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), -) - -# seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = metrics.register_counter( - "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] -) - -# size in bytes of the response written -response_size = metrics.register_counter( - "response_size", labels=["method", "servlet", "tag"] -) _next_request_id = 0 @@ -386,54 +314,6 @@ def _unrecognised_request_handler(request): raise UnrecognizedRequestError() -class RequestMetrics(object): - def start(self, clock, name): - self.start = clock.time_msec() - self.start_context = LoggingContext.current_context() - self.name = name - - def stop(self, clock, request): - context = LoggingContext.current_context() - - tag = "" - if context: - tag = context.tag - - if context != self.start_context: - logger.warn( - "Context have unexpectedly changed %r, %r", - context, self.start_context - ) - return - - response_count.inc(request.method, self.name, tag) - - response_timer.inc_by( - clock.time_msec() - self.start, request.method, - self.name, tag - ) - - ru_utime, ru_stime = context.get_resource_usage() - - response_ru_utime.inc_by( - ru_utime, request.method, self.name, tag - ) - response_ru_stime.inc_by( - ru_stime, request.method, self.name, tag - ) - response_db_txn_count.inc_by( - context.db_txn_count, request.method, self.name, tag - ) - response_db_txn_duration.inc_by( - context.db_txn_duration_ms / 1000., request.method, self.name, tag - ) - response_db_sched_duration.inc_by( - context.db_sched_duration_ms / 1000., request.method, self.name, tag - ) - - response_size.inc_by(request.sentLength, request.method, self.name, tag) - - class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" -- cgit 1.5.1 From 8460e48d0639a7f575fd3e57dcd5cf263a6d6a19 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:00:11 +0100 Subject: Move request_id management into SynapseRequest --- synapse/http/server.py | 31 ++++++++++++++++--------------- synapse/http/site.py | 9 +++++++++ 2 files changed, 25 insertions(+), 15 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 37b26b908e..369f7bbdd1 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -46,38 +46,40 @@ import simplejson logger = logging.getLogger(__name__) -_next_request_id = 0 - - def request_handler(include_metrics=False): """Decorator for ``wrap_request_handler``""" return lambda request_handler: wrap_request_handler(request_handler, include_metrics) def wrap_request_handler(request_handler, include_metrics=False): - """Wraps a method that acts as a request handler with the necessary logging - and exception handling. + """Wraps a request handler method with the necessary logging and exception + handling. - The method must have a signature of "handle_foo(self, request)". The - argument "self" must have "version_string" and "clock" attributes. The - argument "request" must be a twisted HTTP request. + The handler method must have a signature of "handle_foo(self, request)", + where "self" must have "version_string" and "clock" attributes (and + "request" must be a SynapseRequest). - The method must return a deferred. If the deferred succeeds we assume that + The handler must return a deferred. If the deferred succeeds we assume that a response has been sent. If the deferred fails with a SynapseError we use it to send a JSON response with the appropriate HTTP reponse code. If the deferred fails with any other type of error we send a 500 reponse. - We insert a unique request-id into the logging context for this request and - log the response and duration for this request. + As well as calling `request.processing` (which will log the response and + duration for this request), the wrapped request handler will insert the + request id into the logging context. """ @defer.inlineCallbacks def wrapped_request_handler(self, request): - global _next_request_id - request_id = "%s-%s" % (request.method, _next_request_id) - _next_request_id += 1 + """ + Args: + self: + request (synapse.http.site.SynapseRequest): + """ + request_id = request.get_request_id() with LoggingContext(request_id) as request_context: + request_context.request = request_id with Measure(self.clock, "wrapped_request_handler"): request_metrics = RequestMetrics() # we start the request metrics timer here with an initial stab @@ -87,7 +89,6 @@ def wrap_request_handler(request_handler, include_metrics=False): servlet_name = self.__class__.__name__ request_metrics.start(self.clock, name=servlet_name) - request_context.request = request_id with request.processing(): try: with PreserveLoggingContext(request_context): diff --git a/synapse/http/site.py b/synapse/http/site.py index c8b46e1af2..6af276e69a 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -22,6 +22,8 @@ import time ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +_next_request_seq = 0 + class SynapseRequest(Request): def __init__(self, site, *args, **kw): @@ -30,6 +32,10 @@ class SynapseRequest(Request): self.authenticated_entity = None self.start_time = 0 + global _next_request_seq + self.request_seq = _next_request_seq + _next_request_seq += 1 + def __repr__(self): # We overwrite this so that we don't log ``access_token`` return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % ( @@ -41,6 +47,9 @@ class SynapseRequest(Request): self.site.site_tag, ) + def get_request_id(self): + return "%s-%i" % (self.method, self.request_seq) + def get_redacted_uri(self): return ACCESS_TOKEN_RE.sub( br'\1\3', -- cgit 1.5.1 From 09b29f9c4a941ae5721b1f5e296156dfe92e3395 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:03:11 +0100 Subject: Make RequestMetrics take a raw time rather than a clock ... which is going to make it easier to move around. --- synapse/http/request_metrics.py | 8 ++++---- synapse/http/server.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4a843a36a7..4e8a5f5306 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -100,12 +100,12 @@ response_size = metrics.register_counter( class RequestMetrics(object): - def start(self, clock, name): - self.start = clock.time_msec() + def start(self, time_msec, name): + self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name - def stop(self, clock, request): + def stop(self, time_msec, request): context = LoggingContext.current_context() tag = "" @@ -122,7 +122,7 @@ class RequestMetrics(object): response_count.inc(request.method, self.name, tag) response_timer.inc_by( - clock.time_msec() - self.start, request.method, + time_msec - self.start, request.method, self.name, tag ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 369f7bbdd1..b16c9c17f6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,7 +87,7 @@ def wrap_request_handler(request_handler, include_metrics=False): # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ - request_metrics.start(self.clock, name=servlet_name) + request_metrics.start(self.clock.time_msec(), name=servlet_name) with request.processing(): try: @@ -138,7 +138,7 @@ def wrap_request_handler(request_handler, include_metrics=False): finally: try: request_metrics.stop( - self.clock, request + self.clock.time_msec(), request ) except Exception as e: logger.warn("Failed to stop metrics: %r", e) -- cgit 1.5.1 From c6f730282c3a25468df0e624293aefd60cef7840 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:05:14 +0100 Subject: Move RequestMetrics handling into SynapseRequest.processing() It fits quite nicely here, and opens the path to getting rid of the "include_metrics" mess. --- synapse/http/server.py | 19 +++++--------- synapse/http/site.py | 69 ++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 64 insertions(+), 24 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index b16c9c17f6..8e5d1d58f5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -19,7 +19,7 @@ from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes ) from synapse.http.request_metrics import ( - RequestMetrics, requests_counter, + requests_counter, outgoing_responses_counter, ) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext @@ -81,19 +81,18 @@ def wrap_request_handler(request_handler, include_metrics=False): with LoggingContext(request_id) as request_context: request_context.request = request_id with Measure(self.clock, "wrapped_request_handler"): - request_metrics = RequestMetrics() # we start the request metrics timer here with an initial stab # at the servlet name. For most requests that name will be # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ - request_metrics.start(self.clock.time_msec(), name=servlet_name) - - with request.processing(): + with request.processing(servlet_name): try: with PreserveLoggingContext(request_context): if include_metrics: - yield request_handler(self, request, request_metrics) + yield request_handler( + self, request, request.request_metrics, + ) else: requests_counter.inc(request.method, servlet_name) yield request_handler(self, request) @@ -135,13 +134,7 @@ def wrap_request_handler(request_handler, include_metrics=False): pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) - finally: - try: - request_metrics.stop( - self.clock.time_msec(), request - ) - except Exception as e: - logger.warn("Failed to stop metrics: %r", e) + return wrapped_request_handler diff --git a/synapse/http/site.py b/synapse/http/site.py index 6af276e69a..bfd9832aa0 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -12,20 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import LoggingContext -from twisted.web.server import Site, Request - import contextlib import logging import re import time +from twisted.web.server import Site, Request + +from synapse.http.request_metrics import RequestMetrics +from synapse.util.logcontext import LoggingContext + +logger = logging.getLogger(__name__) + ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') _next_request_seq = 0 class SynapseRequest(Request): + """Class which encapsulates an HTTP request to synapse. + + All of the requests processed in synapse are of this type. + + It extends twisted's twisted.web.server.Request, and adds: + * Unique request ID + * Redaction of access_token query-params in __repr__ + * Logging at start and end + * Metrics to record CPU, wallclock and DB time by endpoint. + + It provides a method `processing` which should be called by the Resource + which is handling the request, and returns a context manager. + + """ def __init__(self, site, *args, **kw): Request.__init__(self, *args, **kw) self.site = site @@ -59,7 +77,11 @@ class SynapseRequest(Request): def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] - def started_processing(self): + def _started_processing(self, servlet_name): + self.start_time = int(time.time() * 1000) + self.request_metrics = RequestMetrics() + self.request_metrics.start(self.start_time, name=servlet_name) + self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), @@ -67,10 +89,8 @@ class SynapseRequest(Request): self.method, self.get_redacted_uri() ) - self.start_time = int(time.time() * 1000) - - def finished_processing(self): + def _finished_processing(self): try: context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() @@ -81,6 +101,8 @@ class SynapseRequest(Request): ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration_ms = (0, 0) + end_time = int(time.time() * 1000) + self.site.access_logger.info( "%s - %s - {%s}" " Processed request: %dms (%dms, %dms) (%dms/%dms/%d)" @@ -88,7 +110,7 @@ class SynapseRequest(Request): self.getClientIP(), self.site.site_tag, self.authenticated_entity, - int(time.time() * 1000) - self.start_time, + end_time - self.start_time, int(ru_utime * 1000), int(ru_stime * 1000), db_sched_duration_ms, @@ -102,11 +124,36 @@ class SynapseRequest(Request): self.get_user_agent(), ) + try: + self.request_metrics.stop(end_time, self) + except Exception as e: + logger.warn("Failed to stop metrics: %r", e) + @contextlib.contextmanager - def processing(self): - self.started_processing() + def processing(self, servlet_name): + """Record the fact that we are processing this request. + + Returns a context manager; the correct way to use this is: + + @defer.inlineCallbacks + def handle_request(request): + with request.processing("FooServlet"): + yield really_handle_the_request() + + This will log the request's arrival. Once the context manager is + closed, the completion of the request will be logged, and the various + metrics will be updated. + + Args: + servlet_name (str): the name of the servlet which will be + processing this request. This is used in the metrics. + + It is possible to update this afterwards by updating + self.request_metrics.servlet_name. + """ + self._started_processing(servlet_name) yield - self.finished_processing() + self._finished_processing() class XForwardedForRequest(SynapseRequest): -- cgit 1.5.1 From b8700dd7d0482813beb9c780b411de5108ae078a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 18:03:04 +0100 Subject: Bump requests_counter in wrapped_request_handler less magic --- synapse/http/server.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 8e5d1d58f5..1d05b53873 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -90,12 +90,20 @@ def wrap_request_handler(request_handler, include_metrics=False): try: with PreserveLoggingContext(request_context): if include_metrics: - yield request_handler( + d = request_handler( self, request, request.request_metrics, ) else: - requests_counter.inc(request.method, servlet_name) - yield request_handler(self, request) + d = request_handler(self, request) + + # record the arrival of the request *after* + # dispatching to the handler, so that the handler + # can update the servlet name in the request + # metrics + requests_counter.inc(request.method, + request.request_metrics.name) + yield d + except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): @@ -220,7 +228,6 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname = "%r" % callback request_metrics.name = servlet_classname - requests_counter.inc(request.method, servlet_classname) # Now trigger the callback. If it returns a response, we send it # here. If it throws an exception, that is handled by the wrapper -- cgit 1.5.1 From 49e5a613f1ac53498b31150f78332fc64932e61b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:44:22 +0100 Subject: Move outgoing_responses_counter handling to RequestMetrics it's much neater there. --- synapse/http/request_metrics.py | 2 ++ synapse/http/server.py | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 4e8a5f5306..8c850bf23f 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -119,6 +119,8 @@ class RequestMetrics(object): ) return + outgoing_responses_counter.inc(request.method, str(request.code)) + response_count.inc(request.method, self.name, tag) response_timer.inc_by( diff --git a/synapse/http/server.py b/synapse/http/server.py index 1d05b53873..200b2c4837 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -20,7 +20,6 @@ from synapse.api.errors import ( ) from synapse.http.request_metrics import ( requests_counter, - outgoing_responses_counter, ) from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches import intern_dict @@ -112,7 +111,6 @@ def wrap_request_handler(request_handler, include_metrics=False): ) else: logger.exception(e) - outgoing_responses_counter.inc(request.method, str(code)) respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), @@ -274,8 +272,6 @@ class JsonResource(HttpServer, resource.Resource): def _send_response(self, request, code, response_json_object, response_code_message=None): - outgoing_responses_counter.inc(request.method, str(code)) - # TODO: Only enable CORS for the requests that need it. respond_with_json( request, code, response_json_object, -- cgit 1.5.1 From 9589a1925e944360c68f7eb8a65f8ba94ec9bb84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 May 2018 23:49:29 +0100 Subject: Remove include_metrics param The metrics are now available via the request, so this is redundant and can go away at last. --- synapse/http/server.py | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 200b2c4837..9598969de8 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -45,12 +45,12 @@ import simplejson logger = logging.getLogger(__name__) -def request_handler(include_metrics=False): +def request_handler(): """Decorator for ``wrap_request_handler``""" - return lambda request_handler: wrap_request_handler(request_handler, include_metrics) + return wrap_request_handler -def wrap_request_handler(request_handler, include_metrics=False): +def wrap_request_handler(request_handler): """Wraps a request handler method with the necessary logging and exception handling. @@ -88,12 +88,7 @@ def wrap_request_handler(request_handler, include_metrics=False): with request.processing(servlet_name): try: with PreserveLoggingContext(request_context): - if include_metrics: - d = request_handler( - self, request, request.request_metrics, - ) - else: - d = request_handler(self, request) + d = request_handler(self, request) # record the arrival of the request *after* # dispatching to the handler, so that the handler @@ -206,13 +201,9 @@ class JsonResource(HttpServer, resource.Resource): self._async_render(request) return server.NOT_DONE_YET - # Disable metric reporting because _async_render does its own metrics. - # It does its own metric reporting because _async_render dispatches to - # a callback and it's the class name of that callback we want to report - # against rather than the JsonResource itself. - @request_handler(include_metrics=True) + @request_handler() @defer.inlineCallbacks - def _async_render(self, request, request_metrics): + def _async_render(self, request): """ This gets called from render() every time someone sends us a request. This checks if anyone has registered a callback for that method and path. @@ -224,8 +215,7 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname = servlet_instance.__class__.__name__ else: servlet_classname = "%r" % callback - - request_metrics.name = servlet_classname + request.request_metrics.name = servlet_classname # Now trigger the callback. If it returns a response, we send it # here. If it throws an exception, that is handled by the wrapper -- cgit 1.5.1 From 09f570b9357d40764a0716f7e287fa4dde44610a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 11:59:51 +0100 Subject: Factor wrap_request_handler_with_logging out of wrap_request_handler ... so that it can be used on non-JSON endpoints --- synapse/http/server.py | 120 +++++++++++++++++++++++++++---------------------- 1 file changed, 66 insertions(+), 54 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/server.py b/synapse/http/server.py index 9598969de8..fd58e65c4b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -50,9 +50,10 @@ def request_handler(): return wrap_request_handler -def wrap_request_handler(request_handler): - """Wraps a request handler method with the necessary logging and exception - handling. +def wrap_request_handler(h): + """Wraps a request handler method with exception handling. + + Also adds logging as per wrap_request_handler_with_logging. The handler method must have a signature of "handle_foo(self, request)", where "self" must have "version_string" and "clock" attributes (and @@ -62,12 +63,63 @@ def wrap_request_handler(request_handler): a response has been sent. If the deferred fails with a SynapseError we use it to send a JSON response with the appropriate HTTP reponse code. If the deferred fails with any other type of error we send a 500 reponse. + """ + + @defer.inlineCallbacks + def wrapped_request_handler(self, request): + try: + yield h(self, request) + except CodeMessageException as e: + code = e.code + if isinstance(e, SynapseError): + logger.info( + "%s SynapseError: %s - %s", request, code, e.msg + ) + else: + logger.exception(e) + respond_with_json( + request, code, cs_exception(e), send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + version_string=self.version_string, + ) + + except Exception: + # failure.Failure() fishes the original Failure out + # of our stack, and thus gives us a sensible stack + # trace. + f = failure.Failure() + logger.error( + "Failed handle request via %r: %r: %s", + h, + request, + f.getTraceback().rstrip(), + ) + respond_with_json( + request, + 500, + { + "error": "Internal server error", + "errcode": Codes.UNKNOWN, + }, + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + version_string=self.version_string, + ) + + return wrap_request_handler_with_logging(wrapped_request_handler) + + +def wrap_request_handler_with_logging(h): + """Wraps a request handler to provide logging and metrics + + The handler method must have a signature of "handle_foo(self, request)", + where "self" must have a "clock" attribute (and "request" must be a + SynapseRequest). As well as calling `request.processing` (which will log the response and duration for this request), the wrapped request handler will insert the request id into the logging context. """ - @defer.inlineCallbacks def wrapped_request_handler(self, request): """ @@ -86,56 +138,16 @@ def wrap_request_handler(request_handler): # will update it once it picks a servlet. servlet_name = self.__class__.__name__ with request.processing(servlet_name): - try: - with PreserveLoggingContext(request_context): - d = request_handler(self, request) - - # record the arrival of the request *after* - # dispatching to the handler, so that the handler - # can update the servlet name in the request - # metrics - requests_counter.inc(request.method, - request.request_metrics.name) - yield d - - except CodeMessageException as e: - code = e.code - if isinstance(e, SynapseError): - logger.info( - "%s SynapseError: %s - %s", request, code, e.msg - ) - else: - logger.exception(e) - respond_with_json( - request, code, cs_exception(e), send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, - ) - except Exception: - # failure.Failure() fishes the original Failure out - # of our stack, and thus gives us a sensible stack - # trace. - f = failure.Failure() - logger.error( - "Failed handle request %s.%s on %r: %r: %s", - request_handler.__module__, - request_handler.__name__, - self, - request, - f.getTraceback().rstrip(), - ) - respond_with_json( - request, - 500, - { - "error": "Internal server error", - "errcode": Codes.UNKNOWN, - }, - send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, - ) - + with PreserveLoggingContext(request_context): + d = h(self, request) + + # record the arrival of the request *after* + # dispatching to the handler, so that the handler + # can update the servlet name in the request + # metrics + requests_counter.inc(request.method, + request.request_metrics.name) + yield d return wrapped_request_handler -- cgit 1.5.1 From 645cb4bf06deee1c4c10ecc3d7df2c914168f19a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 12:10:27 +0100 Subject: Remove redundant request_handler decorator This is needless complexity; we might as well use the wrapper directly. Also rename wrap_request_handler->wrap_json_request_handler. --- synapse/http/additional_resource.py | 4 ++-- synapse/http/server.py | 9 ++------- synapse/rest/key/v2/remote_key_resource.py | 8 +++++--- synapse/rest/media/v1/download_resource.py | 16 +++++++++------- synapse/rest/media/v1/preview_url_resource.py | 5 +++-- synapse/rest/media/v1/thumbnail_resource.py | 23 +++++++++++++---------- synapse/rest/media/v1/upload_resource.py | 15 ++++++++------- 7 files changed, 42 insertions(+), 38 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 343e932cb1..d9e7f5dfb7 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import wrap_request_handler +from synapse.http.server import wrap_json_request_handler from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET @@ -50,6 +50,6 @@ class AdditionalResource(Resource): self._async_render(request) return NOT_DONE_YET - @wrap_request_handler + @wrap_json_request_handler def _async_render(self, request): return self._handler(request) diff --git a/synapse/http/server.py b/synapse/http/server.py index fd58e65c4b..f29e36f490 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -45,12 +45,7 @@ import simplejson logger = logging.getLogger(__name__) -def request_handler(): - """Decorator for ``wrap_request_handler``""" - return wrap_request_handler - - -def wrap_request_handler(h): +def wrap_json_request_handler(h): """Wraps a request handler method with exception handling. Also adds logging as per wrap_request_handler_with_logging. @@ -213,7 +208,7 @@ class JsonResource(HttpServer, resource.Resource): self._async_render(request) return server.NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render(self, request): """ This gets called from render() every time someone sends us a request. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 17e6079cba..17b3077926 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import request_handler, respond_with_json_bytes +from synapse.http.server import ( + respond_with_json_bytes, wrap_json_request_handler, +) from synapse.http.servlet import parse_integer, parse_json_object_from_request from synapse.api.errors import SynapseError, Codes from synapse.crypto.keyring import KeyLookupError @@ -99,7 +101,7 @@ class RemoteKey(Resource): self.async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def async_render_GET(self, request): if len(request.postpath) == 1: @@ -124,7 +126,7 @@ class RemoteKey(Resource): self.async_render_POST(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def async_render_POST(self, request): content = parse_json_object_from_request(request) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index fe7e17596f..3fc3f64d62 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -12,16 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.http.servlet +import logging -from ._base import parse_media_id, respond_404 +from twisted.internet import defer from twisted.web.resource import Resource -from synapse.http.server import request_handler, set_cors_headers - from twisted.web.server import NOT_DONE_YET -from twisted.internet import defer -import logging +from synapse.http.server import ( + set_cors_headers, + wrap_json_request_handler, +) +import synapse.http.servlet +from ._base import parse_media_id, respond_404 logger = logging.getLogger(__name__) @@ -43,7 +45,7 @@ class DownloadResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): set_cors_headers(request) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 9290d7946f..6b089689b4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -40,8 +40,9 @@ from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient from synapse.http.server import ( - request_handler, respond_with_json_bytes, + respond_with_json_bytes, respond_with_json, + wrap_json_request_handler, ) from synapse.util.async import ObservableDeferred from synapse.util.stringutils import is_ascii @@ -90,7 +91,7 @@ class PreviewUrlResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 58ada49711..6c12d79f56 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -14,18 +14,21 @@ # limitations under the License. -from ._base import ( - parse_media_id, respond_404, respond_with_file, FileInfo, - respond_with_responder, -) -from twisted.web.resource import Resource -from synapse.http.servlet import parse_string, parse_integer -from synapse.http.server import request_handler, set_cors_headers +import logging -from twisted.web.server import NOT_DONE_YET from twisted.internet import defer +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET -import logging +from synapse.http.server import ( + set_cors_headers, + wrap_json_request_handler, +) +from synapse.http.servlet import parse_integer, parse_string +from ._base import ( + FileInfo, parse_media_id, respond_404, respond_with_file, + respond_with_responder, +) logger = logging.getLogger(__name__) @@ -48,7 +51,7 @@ class ThumbnailResource(Resource): self._async_render_GET(request) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_GET(self, request): set_cors_headers(request) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index a31e75cb46..7d01c57fd1 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import respond_with_json, request_handler - -from synapse.api.errors import SynapseError +import logging -from twisted.web.server import NOT_DONE_YET from twisted.internet import defer - from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET -import logging +from synapse.api.errors import SynapseError +from synapse.http.server import ( + respond_with_json, + wrap_json_request_handler, +) logger = logging.getLogger(__name__) @@ -51,7 +52,7 @@ class UploadResource(Resource): respond_with_json(request, 200, {}, send_cors=True) return NOT_DONE_YET - @request_handler() + @wrap_json_request_handler @defer.inlineCallbacks def _async_render_POST(self, request): requester = yield self.auth.get_user_by_req(request) -- cgit 1.5.1 From 318711e1399da009910c3a9e5fa297c28a2d0a97 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 10 May 2018 18:46:59 +0100 Subject: Set Server header in SynapseRequest (instead of everywhere that writes a response. Or rather, the subset of places which write responses where we haven't forgotten it). This also means that we don't have to have the mysterious version_string attribute in anything with a request handler. Unfortunately it does mean that we have to pass the version string wherever we instantiate a SynapseSite, which has been c&ped 150 times, but that is code that ought to be cleaned up anyway really. --- synapse/app/appservice.py | 1 + synapse/app/client_reader.py | 1 + synapse/app/event_creator.py | 1 + synapse/app/federation_reader.py | 1 + synapse/app/federation_sender.py | 1 + synapse/app/frontend_proxy.py | 1 + synapse/app/homeserver.py | 2 ++ synapse/app/media_repository.py | 1 + synapse/app/pusher.py | 1 + synapse/app/synchrotron.py | 1 + synapse/app/user_dir.py | 1 + synapse/http/additional_resource.py | 3 +-- synapse/http/server.py | 14 ++++---------- synapse/http/site.py | 11 ++++++++++- synapse/rest/client/v1/pusher.py | 1 - synapse/rest/client/v2_alpha/auth.py | 2 -- synapse/rest/key/v1/server_key_resource.py | 2 -- synapse/rest/key/v2/local_key_resource.py | 2 -- synapse/rest/key/v2/remote_key_resource.py | 2 -- synapse/rest/media/v1/download_resource.py | 3 +-- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/rest/media/v1/thumbnail_resource.py | 1 - synapse/rest/media/v1/upload_resource.py | 1 - 23 files changed, 28 insertions(+), 27 deletions(-) (limited to 'synapse/http') diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 58f2c9d68c..b1efacc9f8 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -74,6 +74,7 @@ class AppserviceServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 267d34c881..38b98382c6 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -98,6 +98,7 @@ class ClientReaderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index b915d12d53..bd7f3d5679 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -114,6 +114,7 @@ class EventCreatorServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index c1dc66dd17..6e10b27b9e 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -87,6 +87,7 @@ class FederationReaderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index a08af83a4c..6f24e32d6d 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -101,6 +101,7 @@ class FederationSenderServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index b349e3e3ce..0f700ee786 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -152,6 +152,7 @@ class FrontendProxyServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a0e465d644..75f40fd5a4 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -140,6 +140,7 @@ class SynapseHomeServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ), self.tls_server_context_factory, ) @@ -153,6 +154,7 @@ class SynapseHomeServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) logger.info("Synapse now listening on port %d", port) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index fc8282bbc1..9c93195f0a 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -94,6 +94,7 @@ class MediaRepositoryServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 26930d1b3b..3912eae48c 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -104,6 +104,7 @@ class PusherServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 7152b1deb4..c6294a7a0c 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -281,6 +281,7 @@ class SynchrotronServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 5ba7e9b416..53eb3474da 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -126,6 +126,7 @@ class UserDirectoryServer(HomeServer): site_tag, listener_config, root_resource, + self.version_string, ) ) diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index d9e7f5dfb7..a797396ade 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -42,8 +42,7 @@ class AdditionalResource(Resource): Resource.__init__(self) self._handler = handler - # these are required by the request_handler wrapper - self.version_string = hs.version_string + # required by the request_handler wrapper self.clock = hs.get_clock() def render(self, request): diff --git a/synapse/http/server.py b/synapse/http/server.py index f29e36f490..b6e2ae14a2 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -51,8 +51,8 @@ def wrap_json_request_handler(h): Also adds logging as per wrap_request_handler_with_logging. The handler method must have a signature of "handle_foo(self, request)", - where "self" must have "version_string" and "clock" attributes (and - "request" must be a SynapseRequest). + where "self" must have a "clock" attribute (and "request" must be a + SynapseRequest). The handler must return a deferred. If the deferred succeeds we assume that a response has been sent. If the deferred fails with a SynapseError we use @@ -75,7 +75,6 @@ def wrap_json_request_handler(h): respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, ) except Exception: @@ -98,7 +97,6 @@ def wrap_json_request_handler(h): }, send_cors=True, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, ) return wrap_request_handler_with_logging(wrapped_request_handler) @@ -192,7 +190,6 @@ class JsonResource(HttpServer, resource.Resource): self.canonical_json = canonical_json self.clock = hs.get_clock() self.path_regexs = {} - self.version_string = hs.version_string self.hs = hs def register_paths(self, method, path_patterns, callback): @@ -275,7 +272,6 @@ class JsonResource(HttpServer, resource.Resource): send_cors=True, response_code_message=response_code_message, pretty_print=_request_user_agent_is_curl(request), - version_string=self.version_string, canonical_json=self.canonical_json, ) @@ -326,7 +322,7 @@ class RootRedirect(resource.Resource): def respond_with_json(request, code, json_object, send_cors=False, response_code_message=None, pretty_print=False, - version_string="", canonical_json=True): + canonical_json=True): # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. @@ -348,12 +344,11 @@ def respond_with_json(request, code, json_object, send_cors=False, request, code, json_bytes, send_cors=send_cors, response_code_message=response_code_message, - version_string=version_string ) def respond_with_json_bytes(request, code, json_bytes, send_cors=False, - version_string="", response_code_message=None): + response_code_message=None): """Sends encoded JSON in response to the given request. Args: @@ -367,7 +362,6 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False, request.setResponseCode(code, message=response_code_message) request.setHeader(b"Content-Type", b"application/json") - request.setHeader(b"Server", version_string) request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") diff --git a/synapse/http/site.py b/synapse/http/site.py index bfd9832aa0..202a990508 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -77,6 +77,11 @@ class SynapseRequest(Request): def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + def render(self, resrc): + # override the Server header which is set by twisted + self.setHeader("Server", self.site.server_version_string) + return Request.render(self, resrc) + def _started_processing(self, servlet_name): self.start_time = int(time.time() * 1000) self.request_metrics = RequestMetrics() @@ -151,6 +156,8 @@ class SynapseRequest(Request): It is possible to update this afterwards by updating self.request_metrics.servlet_name. """ + # TODO: we should probably just move this into render() and finish(), + # to save having to call a separate method. self._started_processing(servlet_name) yield self._finished_processing() @@ -191,7 +198,8 @@ class SynapseSite(Site): Subclass of a twisted http Site that does access logging with python's standard logging """ - def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs): + def __init__(self, logger_name, site_tag, config, resource, + server_version_string, *args, **kwargs): Site.__init__(self, resource, *args, **kwargs) self.site_tag = site_tag @@ -199,6 +207,7 @@ class SynapseSite(Site): proxied = config.get("x_forwarded", False) self.requestFactory = SynapseRequestFactory(self, proxied) self.access_logger = logging.getLogger(logger_name) + self.server_version_string = server_version_string def log(self, request): pass diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 0206e664c1..40e523cc5f 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -176,7 +176,6 @@ class PushersRemoveRestServlet(RestServlet): request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % ( len(PushersRemoveRestServlet.SUCCESS_HTML), )) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 8e5577148f..d6f3a19648 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -129,7 +129,6 @@ class AuthRestServlet(RestServlet): html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) @@ -175,7 +174,6 @@ class AuthRestServlet(RestServlet): html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Server", self.hs.version_string) request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index bd4fea5774..1498d188c1 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -49,7 +49,6 @@ class LocalKey(Resource): """ def __init__(self, hs): - self.version_string = hs.version_string self.response_body = encode_canonical_json( self.response_json_object(hs.config) ) @@ -84,7 +83,6 @@ class LocalKey(Resource): def render_GET(self, request): return respond_with_json_bytes( request, 200, self.response_body, - version_string=self.version_string ) def getChild(self, name, request): diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index be68d9a096..04775b3c45 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -63,7 +63,6 @@ class LocalKey(Resource): isLeaf = True def __init__(self, hs): - self.version_string = hs.version_string self.config = hs.config self.clock = hs.clock self.update_response_body(self.clock.time_msec()) @@ -115,5 +114,4 @@ class LocalKey(Resource): self.update_response_body(time_now) return respond_with_json_bytes( request, 200, self.response_body, - version_string=self.version_string ) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 17b3077926..21b4c1175e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -93,7 +93,6 @@ class RemoteKey(Resource): def __init__(self, hs): self.keyring = hs.get_keyring() self.store = hs.get_datastore() - self.version_string = hs.version_string self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist @@ -242,5 +241,4 @@ class RemoteKey(Resource): respond_with_json_bytes( request, 200, result_io.getvalue(), - version_string=self.version_string ) diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 3fc3f64d62..8cf8820c31 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -37,9 +37,8 @@ class DownloadResource(Resource): self.media_repo = media_repo self.server_name = hs.hostname - # Both of these are expected by @request_handler() + # this is expected by @wrap_json_request_handler self.clock = hs.get_clock() - self.version_string = hs.version_string def render_GET(self, request): self._async_render_GET(request) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 6b089689b4..2839207abc 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -58,7 +58,6 @@ class PreviewUrlResource(Resource): self.auth = hs.get_auth() self.clock = hs.get_clock() - self.version_string = hs.version_string self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.max_spider_size self.server_name = hs.hostname diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 6c12d79f56..aae6e464e8 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -44,7 +44,6 @@ class ThumbnailResource(Resource): self.media_storage = media_storage self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.server_name = hs.hostname - self.version_string = hs.version_string self.clock = hs.get_clock() def render_GET(self, request): diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 7d01c57fd1..7567476fce 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -41,7 +41,6 @@ class UploadResource(Resource): self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.max_upload_size - self.version_string = hs.version_string self.clock = hs.get_clock() def render_POST(self, request): -- cgit 1.5.1 From 47815edcfae73c5b938f8354853a09c0b80ef27e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 11 May 2018 00:17:11 +0100 Subject: ConsentResource to gather policy consent from users Hopefully there are enough comments and docs in this that it makes sense on its own. --- docs/privacy_policy_templates/README.md | 23 +++ docs/privacy_policy_templates/en/1.0.html | 17 ++ docs/privacy_policy_templates/en/success.html | 11 ++ synapse/app/homeserver.py | 9 + synapse/config/__init__.py | 6 + synapse/config/consent_config.py | 42 +++++ synapse/config/homeserver.py | 8 +- synapse/config/key.py | 10 + synapse/http/server.py | 76 +++++++- synapse/rest/consent/__init__.py | 0 synapse/rest/consent/consent_resource.py | 210 +++++++++++++++++++++ synapse/server.py | 3 + synapse/storage/registration.py | 18 ++ .../storage/schema/delta/48/add_user_consent.sql | 18 ++ 14 files changed, 446 insertions(+), 5 deletions(-) create mode 100644 docs/privacy_policy_templates/README.md create mode 100644 docs/privacy_policy_templates/en/1.0.html create mode 100644 docs/privacy_policy_templates/en/success.html create mode 100644 synapse/config/consent_config.py create mode 100644 synapse/rest/consent/__init__.py create mode 100644 synapse/rest/consent/consent_resource.py create mode 100644 synapse/storage/schema/delta/48/add_user_consent.sql (limited to 'synapse/http') diff --git a/docs/privacy_policy_templates/README.md b/docs/privacy_policy_templates/README.md new file mode 100644 index 0000000000..8e91c516b3 --- /dev/null +++ b/docs/privacy_policy_templates/README.md @@ -0,0 +1,23 @@ +If enabling the 'consent' resource in synapse, you will need some templates +for the HTML to be served to the user. This directory contains very simple +examples of the sort of thing that can be done. + +You'll need to add this sort of thing to your homeserver.yaml: + +``` +form_secret: + +user_consent: + template_dir: docs/privacy_policy_templates + default_version: 1.0 +``` + +You should then be able to enable the `consent` resource under a `listener` +entry. For example: + +``` +listeners: + - port: 8008 + resources: + - names: [client, consent] +``` diff --git a/docs/privacy_policy_templates/en/1.0.html b/docs/privacy_policy_templates/en/1.0.html new file mode 100644 index 0000000000..ab8666f0c3 --- /dev/null +++ b/docs/privacy_policy_templates/en/1.0.html @@ -0,0 +1,17 @@ + + + + Matrix.org Privacy policy + + +

+ All your base are belong to us. +

+
+ + + + +
+ + diff --git a/docs/privacy_policy_templates/en/success.html b/docs/privacy_policy_templates/en/success.html new file mode 100644 index 0000000000..d55e90c94f --- /dev/null +++ b/docs/privacy_policy_templates/en/success.html @@ -0,0 +1,11 @@ + + + + Matrix.org Privacy policy + + +

+ Sweet. +

+ + diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a0e465d644..730271628e 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -41,6 +41,7 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \ from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource +from synapse.rest.consent.consent_resource import ConsentResource from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource @@ -182,6 +183,14 @@ class SynapseHomeServer(HomeServer): "/_matrix/client/versions": client_resource, }) + if name == "consent": + consent_resource = ConsentResource(self) + if compress: + consent_resource = gz_wrap(consent_resource) + resources.update({ + "/_matrix/consent": consent_resource, + }) + if name == "federation": resources.update({ FEDERATION_PREFIX: TransportLayerServer(self), diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py index bfebb0f644..f2a5a41e92 100644 --- a/synapse/config/__init__.py +++ b/synapse/config/__init__.py @@ -12,3 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from ._base import ConfigError + +# export ConfigError if somebody does import * +# this is largely a fudge to stop PEP8 moaning about the import +__all__ = ["ConfigError"] diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py new file mode 100644 index 0000000000..675fce0911 --- /dev/null +++ b/synapse/config/consent_config.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config + +DEFAULT_CONFIG = """\ +# User Consent configuration +# +# uncomment and configure if enabling the 'consent' resource under 'listeners'. +# +# 'template_dir' gives the location of the templates for the HTML forms. +# This directory should contain one subdirectory per language (eg, 'en', 'fr'), +# and each language directory should contain the policy document (named as +# '.html') and a success page (success.html). +# +# 'default_version' gives the version of the policy document to serve up if +# there is no 'v' parameter. +# +# user_consent: +# template_dir: res/templates/privacy +# default_version: 1.0 +""" + + +class ConsentConfig(Config): + def read_config(self, config): + self.consent_config = config.get("user_consent") + + def default_config(self, **kwargs): + return DEFAULT_CONFIG diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index bf19cfee29..fb6bd3b421 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from .tls import TlsConfig from .server import ServerConfig from .logger import LoggingConfig @@ -37,6 +37,7 @@ from .push import PushConfig from .spam_checker import SpamCheckerConfig from .groups import GroupsConfig from .user_directory import UserDirectoryConfig +from .consent_config import ConsentConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, @@ -45,12 +46,13 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, JWTConfig, PasswordConfig, EmailConfig, WorkerConfig, PasswordAuthProviderConfig, PushConfig, - SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,): + SpamCheckerConfig, GroupsConfig, UserDirectoryConfig, + ConsentConfig): pass if __name__ == '__main__': import sys sys.stdout.write( - HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0] + HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0] ) diff --git a/synapse/config/key.py b/synapse/config/key.py index 4b8fc063d0..d1382ad9ac 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -59,14 +59,20 @@ class KeyConfig(Config): self.expire_access_token = config.get("expire_access_token", False) + # a secret which is used to calculate HMACs for form values, to stop + # falsification of values + self.form_secret = config.get("form_secret", None) + def default_config(self, config_dir_path, server_name, is_generating_file=False, **kwargs): base_key_name = os.path.join(config_dir_path, server_name) if is_generating_file: macaroon_secret_key = random_string_with_symbols(50) + form_secret = '"%s"' % random_string_with_symbols(50) else: macaroon_secret_key = None + form_secret = 'null' return """\ macaroon_secret_key: "%(macaroon_secret_key)s" @@ -74,6 +80,10 @@ class KeyConfig(Config): # Used to enable access token expiration. expire_access_token: False + # a secret which is used to calculate HMACs for form values, to stop + # falsification of values + form_secret: %(form_secret)s + ## Signing Keys ## # Path to the signing key to sign messages with diff --git a/synapse/http/server.py b/synapse/http/server.py index f29e36f490..a38209770d 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -13,7 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import cgi +from six.moves import http_client from synapse.api.errors import ( cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes @@ -44,6 +45,18 @@ import simplejson logger = logging.getLogger(__name__) +HTML_ERROR_TEMPLATE = """ + + + + Error {code} + + +

{msg}

+ + +""" + def wrap_json_request_handler(h): """Wraps a request handler method with exception handling. @@ -104,6 +117,65 @@ def wrap_json_request_handler(h): return wrap_request_handler_with_logging(wrapped_request_handler) +def wrap_html_request_handler(h): + """Wraps a request handler method with exception handling. + + Also adds logging as per wrap_request_handler_with_logging. + + The handler method must have a signature of "handle_foo(self, request)", + where "self" must have a "clock" attribute (and "request" must be a + SynapseRequest). + """ + def wrapped_request_handler(self, request): + d = defer.maybeDeferred(h, self, request) + d.addErrback(_return_html_error, request) + return d + + return wrap_request_handler_with_logging(wrapped_request_handler) + + +def _return_html_error(f, request): + """Sends an HTML error page corresponding to the given failure + + Args: + f (twisted.python.failure.Failure): + request (twisted.web.iweb.IRequest): + """ + if f.check(CodeMessageException): + cme = f.value + code = cme.code + msg = cme.msg + + if isinstance(cme, SynapseError): + logger.info( + "%s SynapseError: %s - %s", request, code, msg + ) + else: + logger.error( + "Failed handle request %r: %s", + request, + f.getTraceback().rstrip(), + ) + else: + code = http_client.INTERNAL_SERVER_ERROR + msg = "Internal server error" + + logger.error( + "Failed handle request %r: %s", + request, + f.getTraceback().rstrip(), + ) + + body = HTML_ERROR_TEMPLATE.format( + code=code, msg=cgi.escape(msg), + ).encode("utf-8") + request.setResponseCode(code) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%i" % (len(body),)) + request.write(body) + finish_request(request) + + def wrap_request_handler_with_logging(h): """Wraps a request handler to provide logging and metrics @@ -134,7 +206,7 @@ def wrap_request_handler_with_logging(h): servlet_name = self.__class__.__name__ with request.processing(servlet_name): with PreserveLoggingContext(request_context): - d = h(self, request) + d = defer.maybeDeferred(h, self, request) # record the arrival of the request *after* # dispatching to the handler, so that the handler diff --git a/synapse/rest/consent/__init__.py b/synapse/rest/consent/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py new file mode 100644 index 0000000000..d791302278 --- /dev/null +++ b/synapse/rest/consent/consent_resource.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hashlib import sha256 +import hmac +import logging +from os import path +from six.moves import http_client + +import jinja2 +from jinja2 import TemplateNotFound +from twisted.internet import defer +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET + +from synapse.api.errors import NotFoundError, SynapseError, StoreError +from synapse.config import ConfigError +from synapse.http.server import ( + finish_request, + wrap_html_request_handler, +) +from synapse.http.servlet import parse_string +from synapse.types import UserID + + +# language to use for the templates. TODO: figure this out from Accept-Language +TEMPLATE_LANGUAGE = "en" + +logger = logging.getLogger(__name__) + +# use hmac.compare_digest if we have it (python 2.7.7), else just use equality +if hasattr(hmac, "compare_digest"): + compare_digest = hmac.compare_digest +else: + def compare_digest(a, b): + return a == b + + +class ConsentResource(Resource): + """A twisted Resource to display a privacy policy and gather consent to it + + When accessed via GET, returns the privacy policy via a template. + + When accessed via POST, records the user's consent in the database and + displays a success page. + + The config should include a template_dir setting which contains templates + for the HTML. The directory should contain one subdirectory per language + (eg, 'en', 'fr'), and each language directory should contain the policy + document (named as '.html') and a success page (success.html). + + Both forms take a set of parameters from the browser. For the POST form, + these are normally sent as form parameters (but may be query-params); for + GET requests they must be query params. These are: + + u: the complete mxid, or the localpart of the user giving their + consent. Required for both GET (where it is used as an input to the + template) and for POST (where it is used to find the row in the db + to update). + + h: hmac_sha256(secret, u), where 'secret' is the privacy_secret in the + config file. If it doesn't match, the request is 403ed. + + v: the version of the privacy policy being agreed to. + + For GET: optional, and defaults to whatever was set in the config + file. Used to choose the version of the policy to pick from the + templates directory. + + For POST: required; gives the value to be recorded in the database + against the user. + """ + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): homeserver + """ + Resource.__init__(self) + + self.hs = hs + self.store = hs.get_datastore() + + # this is required by the request_handler wrapper + self.clock = hs.get_clock() + + consent_config = hs.config.consent_config + if consent_config is None: + raise ConfigError( + "Consent resource is enabled but user_consent section is " + "missing in config file.", + ) + + # daemonize changes the cwd to /, so make the path absolute now. + consent_template_directory = path.abspath( + consent_config["template_dir"], + ) + if not path.isdir(consent_template_directory): + raise ConfigError( + "Could not find template directory '%s'" % ( + consent_template_directory, + ), + ) + + loader = jinja2.FileSystemLoader(consent_template_directory) + self._jinja_env = jinja2.Environment(loader=loader) + + self._default_consent_verison = consent_config["default_version"] + + if hs.config.form_secret is None: + raise ConfigError( + "Consent resource is enabled but form_secret is not set in " + "config file. It should be set to an arbitrary secret string.", + ) + + self._hmac_secret = hs.config.form_secret.encode("utf-8") + + def render_GET(self, request): + self._async_render_GET(request) + return NOT_DONE_YET + + @wrap_html_request_handler + def _async_render_GET(self, request): + """ + Args: + request (twisted.web.http.Request): + """ + + version = parse_string(request, "v", + default=self._default_consent_verison) + username = parse_string(request, "u", required=True) + userhmac = parse_string(request, "h", required=True) + + self._check_hash(username, userhmac) + + try: + self._render_template( + request, "%s.html" % (version,), + user=username, userhmac=userhmac, version=version, + ) + except TemplateNotFound: + raise NotFoundError("Unknown policy version") + + def render_POST(self, request): + self._async_render_POST(request) + return NOT_DONE_YET + + @wrap_html_request_handler + @defer.inlineCallbacks + def _async_render_POST(self, request): + """ + Args: + request (twisted.web.http.Request): + """ + version = parse_string(request, "v", required=True) + username = parse_string(request, "u", required=True) + userhmac = parse_string(request, "h", required=True) + + self._check_hash(username, userhmac) + + if username.startswith('@'): + qualified_user_id = username + else: + qualified_user_id = UserID(username, self.hs.hostname).to_string() + + try: + yield self.store.user_set_consent_version(qualified_user_id, version) + except StoreError as e: + if e.code != 404: + raise + raise NotFoundError("Unknown user") + + try: + self._render_template(request, "success.html") + except TemplateNotFound: + raise NotFoundError("success.html not found") + + def _render_template(self, request, template_name, **template_args): + # get_template checks for ".." so we don't need to worry too much + # about path traversal here. + template_html = self._jinja_env.get_template( + path.join(TEMPLATE_LANGUAGE, template_name) + ) + html_bytes = template_html.render(**template_args).encode("utf8") + + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%i" % len(html_bytes)) + request.write(html_bytes) + finish_request(request) + + def _check_hash(self, userid, userhmac): + want_mac = hmac.new( + key=self._hmac_secret, + msg=userid, + digestmod=sha256, + ).hexdigest() + + if not compare_digest(want_mac, userhmac): + raise SynapseError(http_client.FORBIDDEN, "HMAC incorrect") diff --git a/synapse/server.py b/synapse/server.py index ebdea6b0c4..21cde5b6fc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -97,6 +97,9 @@ class HomeServer(object): which must be implemented by the subclass. This code may call any of the required "get" methods on the instance to obtain the sub-dependencies that one requires. + + Attributes: + config (synapse.config.homeserver.HomeserverConfig): """ DEPENDENCIES = [ diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index a50717db2d..6ffc397861 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -286,6 +286,24 @@ class RegistrationStore(RegistrationWorkerStore, "user_set_password_hash", user_set_password_hash_txn ) + def user_set_consent_version(self, user_id, consent_version): + """Updates the user table to record privacy policy consent + + Args: + user_id (str): full mxid of the user to update + consent_version (str): version of the policy the user has consented + to + + Raises: + StoreError(404) if user not found + """ + return self._simple_update_one( + table='users', + keyvalues={'name': user_id, }, + updatevalues={'consent_version': consent_version, }, + desc="user_set_consent_version" + ) + def user_delete_access_tokens(self, user_id, except_token_id=None, device_id=None): """ diff --git a/synapse/storage/schema/delta/48/add_user_consent.sql b/synapse/storage/schema/delta/48/add_user_consent.sql new file mode 100644 index 0000000000..5237491506 --- /dev/null +++ b/synapse/storage/schema/delta/48/add_user_consent.sql @@ -0,0 +1,18 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* record the version of the privacy policy the user has consented to + */ +ALTER TABLE users ADD COLUMN consent_version TEXT; -- cgit 1.5.1 From b932b4ea256fcbfa112f006ccb2e1c10ef195c17 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Mon, 30 Apr 2018 14:36:39 +0200 Subject: use repr, not str Signed-off-by: Adrian Tschira --- synapse/http/site.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/http') diff --git a/synapse/http/site.py b/synapse/http/site.py index 202a990508..75e32924e9 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -56,7 +56,7 @@ class SynapseRequest(Request): def __repr__(self): # We overwrite this so that we don't log ``access_token`` - return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % ( + return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), self.method, -- cgit 1.5.1 From d9fe2b2d9dbb539da32f969b8bd752159fd5eb6f Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Sun, 15 Apr 2018 21:43:35 +0200 Subject: Replace some more comparisons with six plus a bonus b"" string I missed last time Signed-off-by: Adrian Tschira --- synapse/events/utils.py | 4 +++- synapse/events/validator.py | 6 ++++-- synapse/groups/groups_server.py | 4 +++- synapse/http/matrixfederationclient.py | 8 +++++--- synapse/push/push_rule_evaluator.py | 4 +++- synapse/rest/client/v1/presence.py | 8 +++++--- 6 files changed, 23 insertions(+), 11 deletions(-) (limited to 'synapse/http') diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 824f4a42e3..29ae086786 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -20,6 +20,8 @@ from frozendict import frozendict import re +from six import string_types + # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # (? Date: Mon, 21 May 2018 16:03:39 +0100 Subject: Add in flight request metrics This tracks CPU and DB usage while requests are in flight, rather than when we write the response. --- synapse/http/request_metrics.py | 152 +++++++++++++++++++++++++++++++++++++++- synapse/http/site.py | 4 +- 2 files changed, 154 insertions(+), 2 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8c850bf23f..f1e9b41fde 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -98,14 +98,76 @@ response_size = metrics.register_counter( "response_size", labels=["method", "servlet", "tag"] ) +# In flight metrics are incremented while the requests are in flight, rather +# than when the response was written. + +in_flight_requests_ru_utime = metrics.register_counter( + "in_flight_requests_ru_utime_seconds", labels=["method", "servlet"], +) + +in_flight_requests_ru_stime = metrics.register_counter( + "in_flight_requests_ru_stime_seconds", labels=["method", "servlet"], +) + +in_flight_requests_db_txn_count = metrics.register_counter( + "in_flight_requests_db_txn_count", labels=["method", "servlet"], +) + +# seconds spent waiting for db txns, excluding scheduling time, when processing +# this request +in_flight_requests_db_txn_duration = metrics.register_counter( + "in_flight_requests_db_txn_duration_seconds", labels=["method", "servlet"], +) + +# seconds spent waiting for a db connection, when processing this request +in_flight_requests_db_sched_duration = metrics.register_counter( + "in_flight_requests_db_sched_duration_seconds", labels=["method", "servlet"] +) + +_in_flight_requests_count = metrics.register_gauge( + "in_flight_requests_count", labels=["method", "servlet"] +) + + +# The set of all in flight requests. +_in_flight_requests = set() + + +def _collect_in_flight(): + """Called just before metrics are collected, so we use it to update all + the in flight request metrics + """ + + # Map from (method, name) -> int, the number of in flight requests of that + # type + counts = {} + + for rm in _in_flight_requests: + rm.update_metrics() + key = (rm.method, rm.name,) + counts[key] = counts.get(key, 0) + 1 + + for (method, name), count in counts.iteritems(): + _in_flight_requests_count.set(count, method, name) + + +metrics.register_collector(_collect_in_flight) + class RequestMetrics(object): - def start(self, time_msec, name): + def start(self, time_msec, name, method): self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name + self.method = method + + self._request_stats = _RequestStats.from_context(self.start_context) + + _in_flight_requests.add(self) def stop(self, time_msec, request): + _in_flight_requests.discard(self) + context = LoggingContext.current_context() tag = "" @@ -147,3 +209,91 @@ class RequestMetrics(object): ) response_size.inc_by(request.sentLength, request.method, self.name, tag) + + # We always call this at the end to ensure that we update the metrics + # regardless of whether a call to /metrics while the request was in + # flight. + self.update_metrics() + + def update_metrics(self): + """Updates the in flight metrics with values from this request. + """ + + diff = self._request_stats.update() + + in_flight_requests_ru_utime.inc_by( + diff.ru_utime, self.method, self.name, + ) + + in_flight_requests_ru_stime.inc_by( + diff.ru_stime, self.method, self.name, + ) + + in_flight_requests_db_txn_count.inc_by( + diff.db_txn_count, self.method, self.name, + ) + + in_flight_requests_db_txn_duration.inc_by( + diff.db_txn_duration_ms / 1000., self.method, self.name, + ) + + in_flight_requests_db_sched_duration.inc_by( + diff.db_sched_duration_ms / 1000., self.method, self.name, + ) + + +class _RequestStats(object): + """Keeps tracks of various metrics for an in flight request. + """ + + __slots__ = [ + "context", "ru_utime", "ru_stime", + "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + ] + + def __init__(self, context, ru_utime, ru_stime, db_txn_count, + db_txn_duration_ms, db_sched_duration_ms): + self.context = context + self.ru_utime = ru_utime + self.ru_stime = ru_stime + self.db_txn_count = db_txn_count + self.db_txn_duration_ms = db_txn_duration_ms + self.db_sched_duration_ms = db_sched_duration_ms + + @staticmethod + def from_context(context): + ru_utime, ru_stime = context.get_resource_usage() + + return _RequestStats( + context, + ru_utime, ru_stime, + context.db_txn_count, + context.db_txn_duration_ms, + context.db_sched_duration_ms, + ) + + def update(self): + """Updates the current values and returns the difference between the + old and new values. + + Returns: + _RequestStats: The difference between the old and new values + """ + new = _RequestStats.from_context(self.context) + + diff = _RequestStats( + self.context, + new.ru_utime - self.ru_utime, + new.ru_stime - self.ru_stime, + new.db_txn_count - self.db_txn_count, + new.db_txn_duration_ms - self.db_txn_duration_ms, + new.db_sched_duration_ms - self.db_sched_duration_ms, + ) + + self.ru_utime = new.ru_utime + self.ru_stime = new.ru_stime + self.db_txn_count = new.db_txn_count + self.db_txn_duration_ms = new.db_txn_duration_ms + self.db_sched_duration_ms = new.db_sched_duration_ms + + return diff diff --git a/synapse/http/site.py b/synapse/http/site.py index 202a990508..b608504225 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -85,7 +85,9 @@ class SynapseRequest(Request): def _started_processing(self, servlet_name): self.start_time = int(time.time() * 1000) self.request_metrics = RequestMetrics() - self.request_metrics.start(self.start_time, name=servlet_name) + self.request_metrics.start( + self.start_time, name=servlet_name, method=self.method, + ) self.site.access_logger.info( "%s - %s - Received request: %s %s", -- cgit 1.5.1 From df9f72d9e5fe264b86005208e0f096156eb03e4b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 21 May 2018 19:47:37 -0500 Subject: replacing portions --- synapse/api/auth.py | 2 +- synapse/federation/federation_client.py | 17 ++- synapse/federation/federation_server.py | 16 ++- synapse/federation/send_queue.py | 8 +- synapse/federation/transaction_queue.py | 47 +++----- synapse/handlers/appservice.py | 19 +-- synapse/handlers/presence.py | 58 ++++----- synapse/http/client.py | 20 +--- synapse/http/matrixfederationclient.py | 14 +-- synapse/metrics/__init__.py | 182 ++++++++++------------------- synapse/notifier.py | 23 ++-- synapse/push/httppusher.py | 13 +-- synapse/push/push_rule_evaluator.py | 2 +- synapse/python_dependencies.py | 1 + synapse/replication/tcp/protocol.py | 88 ++++++-------- synapse/util/caches/__init__.py | 57 ++++++--- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/dictionary_cache.py | 2 +- synapse/util/caches/expiringcache.py | 4 +- synapse/util/caches/response_cache.py | 11 +- synapse/util/caches/stream_change_cache.py | 2 +- synapse/util/metrics.py | 91 ++++----------- tests/__init__.py | 3 + 23 files changed, 268 insertions(+), 414 deletions(-) (limited to 'synapse/http') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index f17fda6315..b052cf532b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -57,7 +57,7 @@ class Auth(object): self.TOKEN_NOT_FOUND_HTTP_STATUS = 401 self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000) - register_cache("token_cache", self.token_cache) + register_cache("cache", "token_cache", self.token_cache) @defer.inlineCallbacks def check_from_context(self, event, context, do_sig_check=True): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6163f7c466..2761ffae07 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -32,20 +32,17 @@ from synapse.federation.federation_base import ( FederationBase, event_from_pdu_json, ) -import synapse.metrics from synapse.util import logcontext, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination -logger = logging.getLogger(__name__) - +from prometheus_client import Counter -# synapse.federation.federation_client is a silly name -metrics = synapse.metrics.get_metrics_for("synapse.federation.client") +logger = logging.getLogger(__name__) -sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"]) +sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"]) PDU_RETRY_TIME_MS = 1 * 60 * 1000 @@ -108,7 +105,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc(query_type) + sent_queries_counter.labels(query_type).inc() return self.transport_layer.make_query( destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail, @@ -127,7 +124,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc("client_device_keys") + sent_queries_counter.labels("client_device_keys").inc() return self.transport_layer.query_client_keys( destination, content, timeout ) @@ -137,7 +134,7 @@ class FederationClient(FederationBase): """Query the device keys for a list of user ids hosted on a remote server. """ - sent_queries_counter.inc("user_devices") + sent_queries_counter.labels("user_devices").inc() return self.transport_layer.query_user_devices( destination, user_id, timeout ) @@ -154,7 +151,7 @@ class FederationClient(FederationBase): a Deferred which will eventually yield a JSON object from the response """ - sent_queries_counter.inc("client_one_time_keys") + sent_queries_counter.labels("client_one_time_keys").inc() return self.transport_layer.claim_client_keys( destination, content, timeout ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 247ddc89d5..8211273006 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -27,12 +27,13 @@ from synapse.federation.federation_base import ( from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction -import synapse.metrics from synapse.types import get_domain_from_id from synapse.util import async from synapse.util.caches.response_cache import ResponseCache from synapse.util.logutils import log_function +from prometheus_client import Counter + from six import iteritems # when processing incoming transactions, we try to handle multiple rooms in @@ -41,14 +42,11 @@ TRANSACTION_CONCURRENCY_LIMIT = 10 logger = logging.getLogger(__name__) -# synapse.federation.federation_server is a silly name -metrics = synapse.metrics.get_metrics_for("synapse.federation.server") - -received_pdus_counter = metrics.register_counter("received_pdus") +received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") -received_edus_counter = metrics.register_counter("received_edus") +received_edus_counter = Counter("synapse_federation_server_received_edus", "") -received_queries_counter = metrics.register_counter("received_queries", labels=["type"]) +received_queries_counter = Counter("synapse_federation_server_received_queries", "", ["type"]) class FederationServer(FederationBase): @@ -131,7 +129,7 @@ class FederationServer(FederationBase): logger.debug("[%s] Transaction is new", transaction.transaction_id) - received_pdus_counter.inc_by(len(transaction.pdus)) + received_pdus_counter.inc(len(transaction.pdus)) pdus_by_room = {} @@ -292,7 +290,7 @@ class FederationServer(FederationBase): @defer.inlineCallbacks def on_query_request(self, query_type, args): - received_queries_counter.inc(query_type) + received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) defer.returnValue((200, resp)) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0f0c687b37..e6e1888f3a 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -33,7 +33,7 @@ from .units import Edu from synapse.storage.presence import UserPresenceState from synapse.util.metrics import Measure -import synapse.metrics +from synapse.metrics import LaterGauge from blist import sorteddict from collections import namedtuple @@ -45,9 +45,6 @@ from six import itervalues, iteritems logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - - class FederationRemoteSendQueue(object): """A drop in replacement for TransactionQueue""" @@ -77,8 +74,7 @@ class FederationRemoteSendQueue(object): # lambda binds to the queue rather than to the name of the queue which # changes. ARGH. def register(name, queue): - metrics.register_callback( - queue_name + "_size", + LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), "", lambda: len(queue), ) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index ded2b1871a..778924a13c 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -26,23 +26,18 @@ from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.metrics import measure_func from synapse.handlers.presence import format_user_presence_state, get_interested_remotes import synapse.metrics +from synapse.metrics import LaterGauge +from synapse.metrics import ( + sent_edus_counter, sent_transactions_counter, events_processed_counter) + +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client") -sent_pdus_destination_dist = client_metrics.register_distribution( - "sent_pdu_destinations" -) -sent_edus_counter = client_metrics.register_counter("sent_edus") - -sent_transactions_counter = client_metrics.register_counter("sent_transactions") - -events_processed_counter = client_metrics.register_counter("events_processed") +sent_pdus_destination_dist = Counter("synapse_federation_client_sent_pdu_destinations", "") class TransactionQueue(object): @@ -69,8 +64,7 @@ class TransactionQueue(object): # done self.pending_transactions = {} - metrics.register_callback( - "pending_destinations", + LaterGauge("pending_destinations", "", [], lambda: len(self.pending_transactions), ) @@ -94,12 +88,12 @@ class TransactionQueue(object): # Map of destination -> (edu_type, key) -> Edu self.pending_edus_keyed_by_dest = edus_keyed = {} - metrics.register_callback( - "pending_pdus", + LaterGauge( + "pending_pdus", "", [], lambda: sum(map(len, pdus.values())), ) - metrics.register_callback( - "pending_edus", + LaterGauge( + "pending_edus", "", [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) @@ -241,18 +235,15 @@ class TransactionQueue(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_lag.set( - now - ts, "federation_sender", - ) - synapse.metrics.event_processing_last_ts.set( - ts, "federation_sender", - ) + synapse.metrics.event_processing_lag.labels( + "federation_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels( + "federation_sender").set(ts) - events_processed_counter.inc_by(len(events)) + events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_positions.set( - next_token, "federation_sender", - ) + synapse.metrics.event_processing_positions.labels( + "federation_sender").set(next_token) finally: self._is_processing = False @@ -275,7 +266,7 @@ class TransactionQueue(object): if not destinations: return - sent_pdus_destination_dist.inc_by(len(destinations)) + sent_pdus_destination_dist.inc(len(destinations)) for destination in destinations: self.pending_pdus_by_dest.setdefault(destination, []).append( diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index b596f098fd..a7345331af 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -21,14 +21,13 @@ from synapse.util.metrics import Measure from synapse.util.logcontext import ( make_deferred_yieldable, run_in_background, ) +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -events_processed_counter = metrics.register_counter("events_processed") +events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "") def log_failure(failure): @@ -128,18 +127,12 @@ class ApplicationServicesHandler(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_positions.set( - upper_bound, "appservice_sender", - ) + synapse.metrics.event_processing_positions.labels("appservice_sender").set(upper_bound) - events_processed_counter.inc_by(len(events)) + events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_lag.set( - now - ts, "appservice_sender", - ) - synapse.metrics.event_processing_last_ts.set( - ts, "appservice_sender", - ) + synapse.metrics.event_processing_lag.labels("appservice_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels("appservice_sender").set(ts) finally: self.is_processing = False diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 585f3e4da2..06d937ef3a 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -36,27 +36,27 @@ from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID, get_domain_from_id -import synapse.metrics +from synapse.metrics import LaterGauge import logging +from prometheus_client import Counter + logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) -notified_presence_counter = metrics.register_counter("notified_presence") -federation_presence_out_counter = metrics.register_counter("federation_presence_out") -presence_updates_counter = metrics.register_counter("presence_updates") -timers_fired_counter = metrics.register_counter("timers_fired") -federation_presence_counter = metrics.register_counter("federation_presence") -bump_active_time_counter = metrics.register_counter("bump_active_time") +notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "") +federation_presence_out_counter = Counter("synapse_handler_presence_federation_presence_out", "") +presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "") +timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "") +federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "") +bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "") -get_updates_counter = metrics.register_counter("get_updates", labels=["type"]) +get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"]) -notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"]) -state_transition_counter = metrics.register_counter( - "state_transition", labels=["from", "to"] +notify_reason_counter = Counter("synapse_handler_presence_notify_reason", "", ["reason"]) +state_transition_counter = Counter("synapse_handler_presence_state_transition", "", ["from", "to"] ) @@ -137,9 +137,9 @@ class PresenceHandler(object): for state in active_presence } - metrics.register_callback( - "user_to_current_state_size", lambda: len(self.user_to_current_state) - ) + LaterGauge( + "user_to_current_state_size", "", [], lambda: len(self.user_to_current_state) + ).register() now = self.clock.time_msec() for state in active_presence: @@ -208,7 +208,7 @@ class PresenceHandler(object): 60 * 1000, ) - metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer)) + LaterGauge("wheel_timer_size", "", [], lambda: len(self.wheel_timer)).register() @defer.inlineCallbacks def _on_shutdown(self): @@ -311,10 +311,10 @@ class PresenceHandler(object): # TODO: We should probably ensure there are no races hereafter - presence_updates_counter.inc_by(len(new_states)) + presence_updates_counter.inc(len(new_states)) if to_notify: - notified_presence_counter.inc_by(len(to_notify)) + notified_presence_counter.inc(len(to_notify)) yield self._persist_and_notify(to_notify.values()) self.unpersisted_users_changes |= set(s.user_id for s in new_states) @@ -325,7 +325,7 @@ class PresenceHandler(object): if user_id not in to_notify } if to_federation_ping: - federation_presence_out_counter.inc_by(len(to_federation_ping)) + federation_presence_out_counter.inc(len(to_federation_ping)) self._push_to_remotes(to_federation_ping.values()) @@ -363,7 +363,7 @@ class PresenceHandler(object): for user_id in users_to_check ] - timers_fired_counter.inc_by(len(states)) + timers_fired_counter.inc(len(states)) changes = handle_timeouts( states, @@ -707,7 +707,7 @@ class PresenceHandler(object): updates.append(prev_state.copy_and_replace(**new_fields)) if updates: - federation_presence_counter.inc_by(len(updates)) + federation_presence_counter.inc(len(updates)) yield self._update_states(updates) @defer.inlineCallbacks @@ -982,28 +982,28 @@ def should_notify(old_state, new_state): return False if old_state.status_msg != new_state.status_msg: - notify_reason_counter.inc("status_msg_change") + notify_reason_counter.labels("status_msg_change").inc() return True if old_state.state != new_state.state: - notify_reason_counter.inc("state_change") - state_transition_counter.inc(old_state.state, new_state.state) + notify_reason_counter.labels("state_change").inc() + state_transition_counter.labels(old_state.state, new_state.state).inc() return True if old_state.state == PresenceState.ONLINE: if new_state.currently_active != old_state.currently_active: - notify_reason_counter.inc("current_active_change") + notify_reason_counter.labels("current_active_change").inc() return True if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Only notify about last active bumps if we're not currently acive if not new_state.currently_active: - notify_reason_counter.inc("last_active_change_online") + notify_reason_counter.labels("last_active_change_online").inc() return True elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Always notify for a transition where last active gets bumped. - notify_reason_counter.inc("last_active_change_not_online") + notify_reason_counter.labels("last_active_change_not_online").inc() return True return False @@ -1077,14 +1077,14 @@ class PresenceEventSource(object): if changed is not None and len(changed) < 500: # For small deltas, its quicker to get all changes and then # work out if we share a room or they're in our presence list - get_updates_counter.inc("stream") + get_updates_counter.labels("stream").inc() for other_user_id in changed: if other_user_id in users_interested_in: user_ids_changed.add(other_user_id) else: # Too many possible updates. Find all users we can see and check # if any of them have changed. - get_updates_counter.inc("full") + get_updates_counter.labels("full").inc() if from_key: user_ids_changed = stream_change_cache.get_entities_changed( diff --git a/synapse/http/client.py b/synapse/http/client.py index 70a19d9b74..61a1d2e2b3 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -23,7 +23,6 @@ from synapse.http import cancelled_to_request_timed_out_error from synapse.util.async import add_timeout_to_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable -import synapse.metrics from synapse.http.endpoint import SpiderEndpoint from canonicaljson import encode_canonical_json @@ -42,6 +41,7 @@ from twisted.web._newclient import ResponseDone from six import StringIO +from prometheus_client import Counter import simplejson as json import logging import urllib @@ -49,16 +49,8 @@ import urllib logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) - -outgoing_requests_counter = metrics.register_counter( - "requests", - labels=["method"], -) -incoming_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) +incoming_responses_counter = Counter("synapse_http_client_responses", "", ["method", "code"]) class SimpleHttpClient(object): @@ -95,7 +87,7 @@ class SimpleHttpClient(object): def request(self, method, uri, *args, **kwargs): # A small wrapper around self.agent.request() so we can easily attach # counters to it - outgoing_requests_counter.inc(method) + outgoing_requests_counter.labels(method).inc() logger.info("Sending request %s %s", method, uri) @@ -109,14 +101,14 @@ class SimpleHttpClient(object): ) response = yield make_deferred_yieldable(request_deferred) - incoming_responses_counter.inc(method, response.code) + incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, uri, response.code ) defer.returnValue(response) except Exception as e: - incoming_responses_counter.inc(method, "ERR") + incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, uri, type(e).__name__, e.message diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 4b2b85464d..259d3884e2 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -43,19 +43,13 @@ import sys import urllib from six.moves.urllib import parse as urlparse +from prometheus_client import Counter + logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") -metrics = synapse.metrics.get_metrics_for(__name__) - -outgoing_requests_counter = metrics.register_counter( - "requests", - labels=["method"], -) -incoming_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) +incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", "", ["method", "code"]) MAX_LONG_RETRIES = 10 diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e3b831db67..973ba6506f 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -18,14 +18,13 @@ import functools import time import gc import platform +import attr -from twisted.internet import reactor +from prometheus_client import Gauge, Histogram, Counter +from prometheus_client.core import ( + GaugeMetricFamily, CounterMetricFamily, REGISTRY) -from .metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, - MemoryUsageMetric, GaugeMetric, -) -from .process_collector import register_process_collector +from twisted.internet import reactor logger = logging.getLogger(__name__) @@ -34,149 +33,94 @@ logger = logging.getLogger(__name__) running_on_pypy = platform.python_implementation() == 'PyPy' all_metrics = [] all_collectors = [] +all_gauges = {} +@attr.s(hash=True) +class LaterGauge(object): -class Metrics(object): - """ A single Metrics object gives a (mutable) slice view of the all_metrics - dict, allowing callers to easily register new metrics that are namespaced - nicely.""" - - def __init__(self, name): - self.name_prefix = name - - def make_subspace(self, name): - return Metrics("%s_%s" % (self.name_prefix, name)) - - def register_collector(self, func): - all_collectors.append(func) - - def _register(self, metric_class, name, *args, **kwargs): - full_name = "%s_%s" % (self.name_prefix, name) - - metric = metric_class(full_name, *args, **kwargs) - - all_metrics.append(metric) - return metric - - def register_counter(self, *args, **kwargs): - """ - Returns: - CounterMetric - """ - return self._register(CounterMetric, *args, **kwargs) - - def register_gauge(self, *args, **kwargs): - """ - Returns: - GaugeMetric - """ - return self._register(GaugeMetric, *args, **kwargs) + name = attr.ib() + desc = attr.ib() + labels = attr.ib(hash=False) + caller = attr.ib() - def register_callback(self, *args, **kwargs): - """ - Returns: - CallbackMetric - """ - return self._register(CallbackMetric, *args, **kwargs) + def collect(self): - def register_distribution(self, *args, **kwargs): - """ - Returns: - DistributionMetric - """ - return self._register(DistributionMetric, *args, **kwargs) - - def register_cache(self, *args, **kwargs): - """ - Returns: - CacheMetric - """ - return self._register(CacheMetric, *args, **kwargs) + g = GaugeMetricFamily(self.name, self.desc, self.labels) + try: + calls = self.caller() + except Exception as e: + print(e) + logger.err() + yield g -def register_memory_metrics(hs): - try: - import psutil - process = psutil.Process() - process.memory_info().rss - except (ImportError, AttributeError): - logger.warn( - "psutil is not installed or incorrect version." - " Disabling memory metrics." - ) - return - metric = MemoryUsageMetric(hs, psutil) - all_metrics.append(metric) + if isinstance(calls, dict): + for k, v in calls.items(): + g.add_metric(k, v) + else: + g.add_metric([], calls) + yield g -def get_metrics_for(pkg_name): - """ Returns a Metrics instance for conveniently creating metrics - namespaced with the given name prefix. """ + def register(self): + if self.name in all_gauges.keys(): + REGISTRY.unregister(all_gauges.pop(self.name)) - # Convert a "package.name" to "package_name" because Prometheus doesn't - # let us use . in metric names - return Metrics(pkg_name.replace(".", "_")) + REGISTRY.register(self) + all_gauges[self.name] = self -def render_all(): - strs = [] +# +# Python GC metrics +# - for collector in all_collectors: - collector() +gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) +gc_time = Histogram("python_gc_time", "Time taken to GC (ms)", ["gen"], buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) - for metric in all_metrics: - try: - strs += metric.render() - except Exception: - strs += ["# FAILED to render"] - logger.exception("Failed to render metric") +class GCCounts(object): + def collect(self): + gc_counts = gc.get_count() - strs.append("") # to generate a final CRLF + cm = GaugeMetricFamily("python_gc_counts", "GC cycle counts", labels=["gen"]) + for n, m in enumerate(gc.get_count()): + cm.add_metric([str(n)], m) - return "\n".join(strs) + yield cm +REGISTRY.register(GCCounts()) -register_process_collector(get_metrics_for("process")) +# +# Twisted reactor metrics +# +tick_time = Histogram("python_twisted_reactor_tick_time", "Tick time of the Twisted reactor (ms)", buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000]) +pending_calls_metric = Histogram("python_twisted_reactor_pending_calls", "Pending calls", buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) -python_metrics = get_metrics_for("python") +# +# Federation Metrics +# -gc_time = python_metrics.register_distribution("gc_time", labels=["gen"]) -gc_unreachable = python_metrics.register_counter("gc_unreachable_total", labels=["gen"]) -python_metrics.register_callback( - "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"] -) +sent_edus_counter = Counter("synapse_federation_client_sent_edus", "") -reactor_metrics = get_metrics_for("python.twisted.reactor") -tick_time = reactor_metrics.register_distribution("tick_time") -pending_calls_metric = reactor_metrics.register_distribution("pending_calls") +sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "") -synapse_metrics = get_metrics_for("synapse") +events_processed_counter = Counter("synapse_federation_client_events_processed", "") # Used to track where various components have processed in the event stream, # e.g. federation sending, appservice sending, etc. -event_processing_positions = synapse_metrics.register_gauge( - "event_processing_positions", labels=["name"], -) +event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"]) # Used to track the current max events stream position -event_persisted_position = synapse_metrics.register_gauge( - "event_persisted_position", -) +event_persisted_position = Gauge("synapse_event_persisted_position", "") # Used to track the received_ts of the last event processed by various # components -event_processing_last_ts = synapse_metrics.register_gauge( - "event_processing_last_ts", labels=["name"], -) +event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"]) # Used to track the lag processing events. This is the time difference # between the last processed event's received_ts and the time it was # finished being processed. -event_processing_lag = synapse_metrics.register_gauge( - "event_processing_lag", labels=["name"], -) - +event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) def runUntilCurrentTimer(func): @@ -206,8 +150,8 @@ def runUntilCurrentTimer(func): # since about 25% of time is actually spent running things triggered by # I/O events, but that is harder to capture without rewriting half the # reactor. - tick_time.inc_by(end - start) - pending_calls_metric.inc_by(num_pending) + tick_time.observe(end - start) + pending_calls_metric.observe(num_pending) if running_on_pypy: return ret @@ -224,8 +168,8 @@ def runUntilCurrentTimer(func): unreachable = gc.collect(i) end = time.time() * 1000 - gc_time.inc_by(end - start, i) - gc_unreachable.inc_by(unreachable, i) + gc_time.labels(i).observe(end - start) + gc_unreachable.labels(i).set(unreachable) return ret diff --git a/synapse/notifier.py b/synapse/notifier.py index 8355c7d621..123e6f1840 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -28,22 +28,19 @@ from synapse.util.logcontext import PreserveLoggingContext, run_in_background from synapse.util.metrics import Measure from synapse.types import StreamToken from synapse.visibility import filter_events_for_client -import synapse.metrics +from synapse.metrics import LaterGauge from collections import namedtuple +from prometheus_client import Counter import logging logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) +notified_events_counter = Counter("synapse_notifier_notified_events", "") -notified_events_counter = metrics.register_counter("notified_events") - -users_woken_by_stream_counter = metrics.register_counter( - "users_woken_by_stream", labels=["stream"] -) +users_woken_by_stream_counter = Counter("synapse_notifier_users_woken_by_stream", "", ["stream"]) # TODO(paul): Should be shared somewhere @@ -108,7 +105,7 @@ class _NotifierUserStream(object): self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred - users_woken_by_stream_counter.inc(stream_key) + users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) @@ -197,14 +194,14 @@ class Notifier(object): all_user_streams.add(x) return sum(stream.count_listeners() for stream in all_user_streams) - metrics.register_callback("listeners", count_listeners) + LaterGauge("listeners", "", [], count_listeners) - metrics.register_callback( - "rooms", + LaterGauge( + "rooms", "", [], lambda: count(bool, self.room_to_user_streams.values()), ) - metrics.register_callback( - "users", + LaterGauge( + "users", "", [], lambda: len(self.user_to_user_stream), ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index b077e1a446..e22088ad6f 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -20,22 +20,17 @@ from twisted.internet.error import AlreadyCalled, AlreadyCancelled from . import push_rule_evaluator from . import push_tools -import synapse from synapse.push import PusherConfigException from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure -logger = logging.getLogger(__name__) +from prometheus_client import Counter -metrics = synapse.metrics.get_metrics_for(__name__) +logger = logging.getLogger(__name__) -http_push_processed_counter = metrics.register_counter( - "http_pushes_processed", -) +http_push_processed_counter = Counter("http_pushes_processed", "") -http_push_failed_counter = metrics.register_counter( - "http_pushes_failed", -) +http_push_failed_counter = Counter("http_pushes_failed", "") class HttpPusher(object): diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 3601f2d365..c3e6c5c258 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -150,7 +150,7 @@ class PushRuleEvaluatorForEvent(object): # Caches (glob, word_boundary) -> regex for push. See _glob_matches regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR) -register_cache("regex_push_cache", regex_cache) +register_cache("cache", "regex_push_cache", regex_cache) def _glob_matches(glob, value, word_boundary=False): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 216db4d164..478c497722 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -56,6 +56,7 @@ REQUIREMENTS = { "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], "six": ["six"], + "prometheus_client": ["prometheus_client"], } CONDITIONAL_REQUIREMENTS = { "web_client": { diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index d7d38464b2..5848f57c5e 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -60,19 +60,19 @@ from .commands import ( ) from .streams import STREAMS_MAP +from synapse.metrics import LaterGauge from synapse.util.stringutils import random_string -from synapse.metrics.metric import CounterMetric + +from prometheus_client import Counter + +from collections import defaultdict import logging -import synapse.metrics import struct import fcntl - -metrics = synapse.metrics.get_metrics_for(__name__) - -connection_close_counter = metrics.register_counter( - "close_reason", labels=["reason_type"], +connection_close_counter = Counter( + "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"], ) @@ -136,12 +136,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): # The LoopingCall for sending pings. self._send_ping_loop = None - self.inbound_commands_counter = CounterMetric( - "inbound_commands", labels=["command"], - ) - self.outbound_commands_counter = CounterMetric( - "outbound_commands", labels=["command"], - ) + self.inbound_commands_counter = defaultdict(int) + self.outbound_commands_counter = defaultdict(int) def connectionMade(self): logger.info("[%s] Connection established", self.id()) @@ -201,7 +197,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.last_received_command = self.clock.time_msec() - self.inbound_commands_counter.inc(cmd_name) + self.inbound_commands_counter[cmd_name] = self.inbound_commands_counter[cmd_name] + 1 cmd_cls = COMMAND_MAP[cmd_name] try: @@ -251,8 +247,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self._queue_command(cmd) return - self.outbound_commands_counter.inc(cmd.NAME) - + self.outbound_commands_counter[cmd.NAME] = self.outbound_commands_counter[cmd.NAME] + 1 string = "%s %s" % (cmd.NAME, cmd.to_line(),) if "\n" in string: raise Exception("Unexpected newline in command: %r", string) @@ -317,9 +312,9 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): def connectionLost(self, reason): logger.info("[%s] Replication connection closed: %r", self.id(), reason) if isinstance(reason, Failure): - connection_close_counter.inc(reason.type.__name__) + connection_close_counter.labels(reason.type.__name__).inc() else: - connection_close_counter.inc(reason.__class__.__name__) + connection_close_counter.labels(reason.__class__.__name__).inc() try: # Remove us from list of connections to be monitored @@ -566,14 +561,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): # The following simply registers metrics for the replication connections -metrics.register_callback( - "pending_commands", +pending_commands = LaterGauge( + "pending_commands", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) def transport_buffer_size(protocol): @@ -583,14 +576,12 @@ def transport_buffer_size(protocol): return 0 -metrics.register_callback( - "transport_send_buffer", +transport_send_buffer = LaterGauge( + "synapse_replication_tcp_transport_send_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) def transport_kernel_read_buffer_size(protocol, read=True): @@ -608,48 +599,37 @@ def transport_kernel_read_buffer_size(protocol, read=True): return 0 -metrics.register_callback( - "transport_kernel_send_buffer", +tcp_transport_kernel_send_buffer = LaterGauge( + "synapse_replication_tcp_transport_kernel_send_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) -metrics.register_callback( - "transport_kernel_read_buffer", +tcp_transport_kernel_read_buffer = LaterGauge( + "synapse_replication_tcp_transport_kernel_read_buffer", "", ["name", "conn_id"], lambda: { (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) for p in connected_connections - }, - labels=["name", "conn_id"], -) + }) -metrics.register_callback( - "inbound_commands", +tcp_inbound_commands = LaterGauge( + "synapse_replication_tcp_inbound_commands", "", ["command", "name", "conn_id"], lambda: { (k[0], p.name, p.conn_id): count for p in connected_connections - for k, count in p.inbound_commands_counter.counts.iteritems() - }, - labels=["command", "name", "conn_id"], -) + for k, count in p.inbound_commands_counter.items() + }) -metrics.register_callback( - "outbound_commands", +tcp_outbound_commands = LaterGauge( + "synapse_replication_tcp_outbound_commands", "", ["command", "name", "conn_id"], lambda: { (k[0], p.name, p.conn_id): count for p in connected_connections - for k, count in p.outbound_commands_counter.counts.iteritems() - }, - labels=["command", "name", "conn_id"], -) + for k, count in p.outbound_commands_counter.items() + }) # number of updates received for each RDATA stream -inbound_rdata_count = metrics.register_counter( - "inbound_rdata_count", - labels=["stream_name"], -) +inbound_rdata_count = Counter("synapse_replication_tcp_inbound_rdata_count", "", ["stream_name"]) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 4adae96681..438dcddf55 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,29 +13,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -import synapse.metrics +from prometheus_client.core import GaugeMetricFamily, REGISTRY + import os CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) -metrics = synapse.metrics.get_metrics_for("synapse.util.caches") - caches_by_name = {} -# cache_counter = metrics.register_cache( -# "cache", -# lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()}, -# labels=["name"], -# ) - - -def register_cache(name, cache): - caches_by_name[name] = cache - return metrics.register_cache( - "cache", - lambda: len(cache), - name, - ) +collectors_by_name = {} + +def register_cache(name, cache_name, cache): + + # Check if the metric is already registered. Unregister it, if so. + metric_name = "synapse_util_caches_%s:%s" % (name, cache_name,) + if metric_name in collectors_by_name.keys(): + REGISTRY.unregister(collectors_by_name[metric_name]) + + class CacheMetric(object): + + hits = 0 + misses = 0 + evicted_size = 0 + + def inc_hits(self): + self.hits += 1 + + def inc_misses(self): + self.misses += 1 + + def inc_evictions(self, size=1): + self.evicted_size += size + + def collect(self): + cache_size = len(cache) + + gm = GaugeMetricFamily(metric_name, "", labels=["size", "hits", "misses", "total"]) + gm.add_metric(["size"], cache_size) + gm.add_metric(["hits"], self.hits) + gm.add_metric(["misses"], self.misses) + gm.add_metric(["total"], self.hits + self.misses) + yield gm + metric = CacheMetric() + REGISTRY.register(metric) + caches_by_name[cache_name] = cache + collectors_by_name[metric_name] = metric + return metric KNOWN_KEYS = { key: key for key in diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 68285a7594..a4188eb099 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -80,7 +80,7 @@ class Cache(object): self.name = name self.keylen = keylen self.thread = None - self.metrics = register_cache(name, self.cache) + self.metrics = register_cache("descriptor", name, self.cache) def _on_evicted(self, evicted_count): self.metrics.inc_evictions(evicted_count) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 1709e8b429..bdc21e348f 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -55,7 +55,7 @@ class DictionaryCache(object): __slots__ = [] self.sentinel = Sentinel() - self.metrics = register_cache(name, self.cache) + self.metrics = register_cache("dictionary", name, self.cache) def check_thread(self): expected_thread = self.thread diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 0aa103eecb..ff04c91955 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -52,12 +52,12 @@ class ExpiringCache(object): self._cache = OrderedDict() - self.metrics = register_cache(cache_name, self) - self.iterable = iterable self._size_estimate = 0 + self.metrics = register_cache("expiring", cache_name, self) + def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 7f79333e96..a8491b42d5 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer from synapse.util.async import ObservableDeferred -from synapse.util.caches import metrics as cache_metrics +from synapse.util.caches import register_cache from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -38,15 +38,16 @@ class ResponseCache(object): self.timeout_sec = timeout_ms / 1000. self._name = name - self._metrics = cache_metrics.register_cache( - "response_cache", - size_callback=lambda: self.size(), - cache_name=name, + self._metrics = register_cache( + "response_cache", name, self ) def size(self): return len(self.pending_result_cache) + def __len__(self): + return self.size() + def get(self, key): """Look up the given key. diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 941d873ab8..a7fe0397fa 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -38,7 +38,7 @@ class StreamChangeCache(object): self._cache = sorteddict() self._earliest_known_stream_pos = current_stream_pos self.name = name - self.metrics = register_cache(self.name, self._cache) + self.metrics = register_cache("cache", self.name, self._cache) for entity, stream_pos in prefilled_cache.items(): self.entity_has_changed(entity, stream_pos) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..a964286d85 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -15,8 +15,8 @@ from twisted.internet import defer +from prometheus_client import Counter from synapse.util.logcontext import LoggingContext -import synapse.metrics from functools import wraps import logging @@ -24,66 +24,21 @@ import logging logger = logging.getLogger(__name__) +block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"]) -metrics = synapse.metrics.get_metrics_for(__name__) - -# total number of times we have hit this block -block_counter = metrics.register_counter( - "block_count", - labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) -) - -block_timer = metrics.register_counter( - "block_time_seconds", - labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), -) - -block_ru_utime = metrics.register_counter( - "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), -) - -block_ru_stime = metrics.register_counter( - "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), -) - -block_db_txn_count = metrics.register_counter( - "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), -) +block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"]) + +block_ru_utime = Counter("synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) + +block_ru_stime = Counter("synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) + +block_db_txn_count = Counter("synapse_util_metrics_block_db_txn_count", "", ["block_name"]) # seconds spent waiting for db txns, excluding scheduling time, in this block -block_db_txn_duration = metrics.register_counter( - "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), -) +block_db_txn_duration = Counter("synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) # seconds spent waiting for a db connection, in this block -block_db_sched_duration = metrics.register_counter( - "block_db_sched_duration_seconds", labels=["block_name"], -) +block_db_sched_duration = Counter("synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) def measure_func(name): @@ -132,8 +87,8 @@ class Measure(object): duration = self.clock.time_msec() - self.start - block_counter.inc(self.name) - block_timer.inc_by(duration, self.name) + block_counter.labels(self.name).inc() + block_timer.labels(self.name).inc(duration) context = LoggingContext.current_context() @@ -150,19 +105,13 @@ class Measure(object): ru_utime, ru_stime = context.get_resource_usage() - block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name) - block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name) - block_db_txn_count.inc_by( - context.db_txn_count - self.db_txn_count, self.name - ) - block_db_txn_duration.inc_by( - (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000., - self.name - ) - block_db_sched_duration.inc_by( - (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000., - self.name - ) + block_ru_utime.labels(self.name).inc(ru_utime - self.ru_utime) + block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) + block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) + block_db_txn_duration.labels(self.name).inc( + (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000.) + block_db_sched_duration.labels(self.name).inc( + (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000.) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) diff --git a/tests/__init__.py b/tests/__init__.py index bfebb0f644..aab20e8e02 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,3 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from twisted.trial import util +util.DEFAULT_TIMEOUT_DURATION = 10 -- cgit 1.5.1 From fcc525b0b705703fead3d703c0df62a264ff8ce8 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 21 May 2018 19:48:57 -0500 Subject: rest of the changes --- synapse/http/request_metrics.py | 106 ++++------------- synapse/http/server.py | 4 +- synapse/push/bulk_push_rule_evaluator.py | 28 ++--- synapse/replication/tcp/resource.py | 30 +++-- synapse/storage/_base.py | 17 ++- synapse/storage/events.py | 28 ++--- tests/metrics/__init__.py | 0 tests/metrics/test_metric.py | 192 ------------------------------- 8 files changed, 68 insertions(+), 337 deletions(-) delete mode 100644 tests/metrics/__init__.py delete mode 100644 tests/metrics/test_metric.py (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8c850bf23f..34a730d5bc 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -16,86 +16,38 @@ import logging -import synapse.metrics +from prometheus_client.core import Counter, Histogram + from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for("synapse.http.server") # total number of responses served, split by method/servlet/tag -response_count = metrics.register_counter( - "response_count", - labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) -) +response_count = Counter("synapse_http_server_response_count", "", ["method", "servlet", "tag"]) -requests_counter = metrics.register_counter( - "requests_received", - labels=["method", "servlet", ], -) +requests_counter = Counter("synapse_http_server_requests_received", "", ["method", "servlet"]) -outgoing_responses_counter = metrics.register_counter( - "responses", - labels=["method", "code"], -) +outgoing_responses_counter = Counter("synapse_http_server_responses", "", ["method", "code"]) -response_timer = metrics.register_counter( - "response_time_seconds", - labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), -) +response_timer = Histogram("synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"]) -response_ru_utime = metrics.register_counter( - "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), -) +response_ru_utime = Counter("synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"]) -response_ru_stime = metrics.register_counter( - "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), -) +response_ru_stime = Counter("synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"]) -response_db_txn_count = metrics.register_counter( - "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), -) +response_db_txn_count = Counter("synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"]) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -response_db_txn_duration = metrics.register_counter( - "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), -) +response_db_txn_duration = Counter("synapse_http_server_response_db_txn_duration_seconds", "", ["method", "servlet", "tag"]) # seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = metrics.register_counter( - "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] +response_db_sched_duration = Counter("synapse_http_request_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"] ) # size in bytes of the response written -response_size = metrics.register_counter( - "response_size", labels=["method", "servlet", "tag"] +response_size = Counter("synapse_http_request_response_size", "", ["method", "servlet", "tag"] ) @@ -119,31 +71,19 @@ class RequestMetrics(object): ) return - outgoing_responses_counter.inc(request.method, str(request.code)) + outgoing_responses_counter.labels(request.method, str(request.code)).inc() - response_count.inc(request.method, self.name, tag) + response_count.labels(request.method, self.name, tag).inc() - response_timer.inc_by( - time_msec - self.start, request.method, - self.name, tag - ) + response_timer.labels(request.method, self.name, tag).observe(time_msec - self.start) ru_utime, ru_stime = context.get_resource_usage() - response_ru_utime.inc_by( - ru_utime, request.method, self.name, tag - ) - response_ru_stime.inc_by( - ru_stime, request.method, self.name, tag - ) - response_db_txn_count.inc_by( - context.db_txn_count, request.method, self.name, tag - ) - response_db_txn_duration.inc_by( - context.db_txn_duration_ms / 1000., request.method, self.name, tag - ) - response_db_sched_duration.inc_by( - context.db_sched_duration_ms / 1000., request.method, self.name, tag - ) - - response_size.inc_by(request.sentLength, request.method, self.name, tag) + response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) + response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) + response_db_txn_count.labels(request.method, self.name, tag).inc(context.db_txn_count) + response_db_txn_duration.labels(request.method, self.name, tag).inc(context.db_txn_duration_ms / 1000.) + response_db_sched_duration.labels(request.method, self.name, tag).inc( + context.db_sched_duration_ms / 1000.) + + response_size.labels(request.method, self.name, tag).inc(request.sentLength) diff --git a/synapse/http/server.py b/synapse/http/server.py index b6e2ae14a2..f72d986288 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -138,8 +138,8 @@ def wrap_request_handler_with_logging(h): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.inc(request.method, - request.request_metrics.name) + requests_counter.labels(request.method, + request.request_metrics.name).inc() yield d return wrapped_request_handler diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 7c680659b6..6fcca5e260 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -22,35 +22,29 @@ from .push_rule_evaluator import PushRuleEvaluatorForEvent from synapse.event_auth import get_user_power_level from synapse.api.constants import EventTypes, Membership -from synapse.metrics import get_metrics_for -from synapse.util.caches import metrics as cache_metrics +from synapse.util.caches import register_cache from synapse.util.caches.descriptors import cached from synapse.util.async import Linearizer from synapse.state import POWER_KEY from collections import namedtuple - +from prometheus_client import Counter logger = logging.getLogger(__name__) rules_by_room = {} -push_metrics = get_metrics_for(__name__) -push_rules_invalidation_counter = push_metrics.register_counter( - "push_rules_invalidation_counter" -) -push_rules_state_size_counter = push_metrics.register_counter( - "push_rules_state_size_counter" -) +push_rules_invalidation_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") +push_rules_state_size_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch -push_rules_delta_state_cache_metric = cache_metrics.register_cache( +push_rules_delta_state_cache_metric = register_cache( "cache", - size_callback=lambda: 0, # Meaningless size, as this isn't a cache that stores values - cache_name="push_rules_delta_state_cache_metric", + "push_rules_delta_state_cache_metric", + cache=[], # Meaningless size, as this isn't a cache that stores values ) @@ -64,10 +58,10 @@ class BulkPushRuleEvaluator(object): self.store = hs.get_datastore() self.auth = hs.get_auth() - self.room_push_rule_cache_metrics = cache_metrics.register_cache( + self.room_push_rule_cache_metrics = register_cache( "cache", - size_callback=lambda: 0, # There's not good value for this - cache_name="room_push_rule_cache", + "room_push_rule_cache", + cache=[], # Meaningless size, as this isn't a cache that stores values ) @defer.inlineCallbacks @@ -309,7 +303,7 @@ class RulesForRoom(object): current_state_ids = context.current_state_ids push_rules_delta_state_cache_metric.inc_misses() - push_rules_state_size_counter.inc_by(len(current_state_ids)) + push_rules_state_size_counter.inc(len(current_state_ids)) logger.debug( "Looking for member changes in %r %r", state_group, current_state_ids diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index a41af4fd6c..0e6b1957c6 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -22,20 +22,19 @@ from .streams import STREAMS_MAP, FederationStream from .protocol import ServerReplicationStreamProtocol from synapse.util.metrics import Measure, measure_func +from synapse.metrics import LaterGauge import logging -import synapse.metrics +from prometheus_client import Counter -metrics = synapse.metrics.get_metrics_for(__name__) -stream_updates_counter = metrics.register_counter( - "stream_updates", labels=["stream_name"] +stream_updates_counter = Counter("synapse_replication_tcp_resource_stream_updates", "", ["stream_name"] ) -user_sync_counter = metrics.register_counter("user_sync") -federation_ack_counter = metrics.register_counter("federation_ack") -remove_pusher_counter = metrics.register_counter("remove_pusher") -invalidate_cache_counter = metrics.register_counter("invalidate_cache") -user_ip_cache_counter = metrics.register_counter("user_ip_cache") +user_sync_counter = Counter("synapse_replication_tcp_resource_user_sync", "") +federation_ack_counter = Counter("synapse_replication_tcp_resource_federation_ack", "") +remove_pusher_counter = Counter("synapse_replication_tcp_resource_remove_pusher", "") +invalidate_cache_counter = Counter("synapse_replication_tcp_resource_invalidate_cache", "") +user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache", "") logger = logging.getLogger(__name__) @@ -73,7 +72,8 @@ class ReplicationStreamer(object): # Current connections. self.connections = [] - metrics.register_callback("total_connections", lambda: len(self.connections)) + l = LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], lambda: len(self.connections)) + l.register() # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been @@ -85,17 +85,15 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} - metrics.register_callback( - "connections_per_stream", + LaterGauge( + "synapse_replication_tcp_resource_connections_per_stream", "", ["stream_name"], lambda: { (stream_name,): len([ conn for conn in self.connections if stream_name in conn.replication_streams ]) for stream_name in self.streams_by_name - }, - labels=["stream_name"], - ) + }).register() self.federation_sender = None if not hs.config.send_federation: @@ -175,7 +173,7 @@ class ReplicationStreamer(object): logger.info( "Streaming: %s -> %s", stream.NAME, updates[-1][0] ) - stream_updates_counter.inc_by(len(updates), stream.NAME) + stream_updates_counter.labels(stream.NAME).inc(len(updates)) # Some streams return multiple rows with the same stream IDs, # we need to make sure they get sent out in batches. We do diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2262776ab2..d1b625dc30 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -18,8 +18,8 @@ from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches.descriptors import Cache from synapse.storage.engines import PostgresEngine -import synapse.metrics +from prometheus_client import Histogram from twisted.internet import defer @@ -34,13 +34,10 @@ sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") perf_logger = logging.getLogger("synapse.storage.TIME") +sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "") -metrics = synapse.metrics.get_metrics_for("synapse.storage") - -sql_scheduling_timer = metrics.register_distribution("schedule_time") - -sql_query_timer = metrics.register_distribution("query_time", labels=["verb"]) -sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"]) +sql_query_timer = Histogram("synapse_storage_query_time", "", ["verb"]) +sql_txn_timer = Histogram("synapse_storage_transaction_time", "", ["desc"]) class LoggingTransaction(object): @@ -117,7 +114,7 @@ class LoggingTransaction(object): finally: msecs = (time.time() * 1000) - start sql_logger.debug("[SQL time] {%s} %f", self.name, msecs) - sql_query_timer.inc_by(msecs, sql.split()[0]) + sql_query_timer.labels(sql.split()[0]).observe(msecs) class PerformanceCounters(object): @@ -287,7 +284,7 @@ class SQLBaseStore(object): self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) - sql_txn_timer.inc_by(duration, desc) + sql_txn_timer.labels(desc).observe(duration) @defer.inlineCallbacks def runInteraction(self, desc, func, *args, **kwargs): @@ -349,7 +346,7 @@ class SQLBaseStore(object): def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection") as context: sched_duration_ms = time.time() * 1000 - start_time - sql_scheduling_timer.inc_by(sched_duration_ms) + sql_scheduling_timer.observe(sched_duration_ms) current_context.add_database_scheduled(sched_duration_ms) if self.database_engine.is_connection_closed(conn): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 05cde96afc..96b48cfdbb 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -40,30 +40,24 @@ import synapse.metrics from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 -logger = logging.getLogger(__name__) +from prometheus_client import Counter +logger = logging.getLogger(__name__) -metrics = synapse.metrics.get_metrics_for(__name__) -persist_event_counter = metrics.register_counter("persisted_events") -event_counter = metrics.register_counter( - "persisted_events_sep", labels=["type", "origin_type", "origin_entity"] -) +persist_event_counter = Counter("synapse_storage_events_persisted_events", "") +event_counter = Counter("synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"]) # The number of times we are recalculating the current state -state_delta_counter = metrics.register_counter( - "state_delta", -) +state_delta_counter = Counter("synapse_storage_events_state_delta", "") + # The number of times we are recalculating state when there is only a # single forward extremity -state_delta_single_event_counter = metrics.register_counter( - "state_delta_single_event", -) +state_delta_single_event_counter = Counter("synapse_storage_events_state_delta_single_event", "") + # The number of times we are reculating state when we could have resonably # calculated the delta when we calculated the state for an event we were # persisting. -state_delta_reuse_delta_counter = metrics.register_counter( - "state_delta_reuse_delta", -) +state_delta_reuse_delta_counter = Counter("synapse_storage_events_state_delta_reuse_delta", "") def encode_json(json_object): @@ -445,7 +439,7 @@ class EventsStore(EventsWorkerStore): state_delta_for_room=state_delta_for_room, new_forward_extremeties=new_forward_extremeties, ) - persist_event_counter.inc_by(len(chunk)) + persist_event_counter.inc(len(chunk)) synapse.metrics.event_persisted_position.set( chunk[-1][0].internal_metadata.stream_ordering, ) @@ -460,7 +454,7 @@ class EventsStore(EventsWorkerStore): origin_type = "remote" origin_entity = get_domain_from_id(event.sender) - event_counter.inc(event.type, origin_type, origin_entity) + event_counter.labels(event.type, origin_type, origin_entity).inc() for room_id, new_state in current_state_for_room.iteritems(): self.get_current_state_ids.prefill( diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py deleted file mode 100644 index 069c0be762..0000000000 --- a/tests/metrics/test_metric.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tests import unittest - -from synapse.metrics.metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, - _escape_label_value, -) - - -class CounterMetricTestCase(unittest.TestCase): - - def test_scalar(self): - counter = CounterMetric("scalar") - - self.assertEquals(counter.render(), [ - 'scalar 0', - ]) - - counter.inc() - - self.assertEquals(counter.render(), [ - 'scalar 1', - ]) - - counter.inc_by(2) - - self.assertEquals(counter.render(), [ - 'scalar 3' - ]) - - def test_vector(self): - counter = CounterMetric("vector", labels=["method"]) - - # Empty counter doesn't yet know what values it has - self.assertEquals(counter.render(), []) - - counter.inc("GET") - - self.assertEquals(counter.render(), [ - 'vector{method="GET"} 1', - ]) - - counter.inc("GET") - counter.inc("PUT") - - self.assertEquals(counter.render(), [ - 'vector{method="GET"} 2', - 'vector{method="PUT"} 1', - ]) - - -class CallbackMetricTestCase(unittest.TestCase): - - def test_scalar(self): - d = dict() - - metric = CallbackMetric("size", lambda: len(d)) - - self.assertEquals(metric.render(), [ - 'size 0', - ]) - - d["key"] = "value" - - self.assertEquals(metric.render(), [ - 'size 1', - ]) - - def test_vector(self): - vals = dict() - - metric = CallbackMetric("values", lambda: vals, labels=["type"]) - - self.assertEquals(metric.render(), []) - - # Keys have to be tuples, even if they're 1-element - vals[("foo",)] = 1 - vals[("bar",)] = 2 - - self.assertEquals(metric.render(), [ - 'values{type="bar"} 2', - 'values{type="foo"} 1', - ]) - - -class DistributionMetricTestCase(unittest.TestCase): - - def test_scalar(self): - metric = DistributionMetric("thing") - - self.assertEquals(metric.render(), [ - 'thing:count 0', - 'thing:total 0', - ]) - - metric.inc_by(500) - - self.assertEquals(metric.render(), [ - 'thing:count 1', - 'thing:total 500', - ]) - - def test_vector(self): - metric = DistributionMetric("queries", labels=["verb"]) - - self.assertEquals(metric.render(), []) - - metric.inc_by(300, "SELECT") - metric.inc_by(200, "SELECT") - metric.inc_by(800, "INSERT") - - self.assertEquals(metric.render(), [ - 'queries:count{verb="INSERT"} 1', - 'queries:count{verb="SELECT"} 2', - 'queries:total{verb="INSERT"} 800', - 'queries:total{verb="SELECT"} 500', - ]) - - -class CacheMetricTestCase(unittest.TestCase): - - def test_cache(self): - d = dict() - - metric = CacheMetric("cache", lambda: len(d), "cache_name") - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 0', - 'cache:total{name="cache_name"} 0', - 'cache:size{name="cache_name"} 0', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_misses() - d["key"] = "value" - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 0', - 'cache:total{name="cache_name"} 1', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_hits() - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 1', - 'cache:total{name="cache_name"} 2', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 0', - ]) - - metric.inc_evictions(2) - - self.assertEquals(metric.render(), [ - 'cache:hits{name="cache_name"} 1', - 'cache:total{name="cache_name"} 2', - 'cache:size{name="cache_name"} 1', - 'cache:evicted_size{name="cache_name"} 2', - ]) - - -class LabelValueEscapeTestCase(unittest.TestCase): - def test_simple(self): - string = "safjhsdlifhyskljfksdfh" - self.assertEqual(string, _escape_label_value(string)) - - def test_escape(self): - self.assertEqual( - "abc\\\"def\\nghi\\\\", - _escape_label_value("abc\"def\nghi\\"), - ) - - def test_sequence_of_escapes(self): - self.assertEqual( - "abc\\\"def\\nghi\\\\\\n", - _escape_label_value("abc\"def\nghi\\\n"), - ) -- cgit 1.5.1 From fb2806b1868baaa24d0d6e21d10f2c1f6177717e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 09:31:53 +0100 Subject: Move in_flight_requests_count to be a callback metric --- synapse/http/request_metrics.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index f1e9b41fde..8cb23f4d64 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -124,12 +124,8 @@ in_flight_requests_db_sched_duration = metrics.register_counter( "in_flight_requests_db_sched_duration_seconds", labels=["method", "servlet"] ) -_in_flight_requests_count = metrics.register_gauge( - "in_flight_requests_count", labels=["method", "servlet"] -) - -# The set of all in flight requests. +# The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() @@ -138,20 +134,35 @@ def _collect_in_flight(): the in flight request metrics """ + for rm in _in_flight_requests: + rm.update_metrics() + + +metrics.register_collector(_collect_in_flight) + + +def _get_in_flight_counts(): + """Returns a count of all in flight requests by (method, server_name) + + Returns: + dict[tuple[str, str], int] + """ + # Map from (method, name) -> int, the number of in flight requests of that # type counts = {} - for rm in _in_flight_requests: - rm.update_metrics() key = (rm.method, rm.name,) counts[key] = counts.get(key, 0) + 1 - for (method, name), count in counts.iteritems(): - _in_flight_requests_count.set(count, method, name) + return counts -metrics.register_collector(_collect_in_flight) +metrics.register_callback( + "in_flight_requests_count", + _get_in_flight_counts, + labels=["method", "servlet"] +) class RequestMetrics(object): -- cgit 1.5.1 From c435b0b44172efe485ae1401deb8adca2e774d3c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 May 2018 09:34:26 +0100 Subject: Don't store context --- synapse/http/request_metrics.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 8cb23f4d64..5917c83958 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -230,7 +230,7 @@ class RequestMetrics(object): """Updates the in flight metrics with values from this request. """ - diff = self._request_stats.update() + diff = self._request_stats.update(self.start_context) in_flight_requests_ru_utime.inc_by( diff.ru_utime, self.method, self.name, @@ -258,13 +258,12 @@ class _RequestStats(object): """ __slots__ = [ - "context", "ru_utime", "ru_stime", + "ru_utime", "ru_stime", "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", ] - def __init__(self, context, ru_utime, ru_stime, db_txn_count, + def __init__(self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms): - self.context = context self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count @@ -276,24 +275,22 @@ class _RequestStats(object): ru_utime, ru_stime = context.get_resource_usage() return _RequestStats( - context, ru_utime, ru_stime, context.db_txn_count, context.db_txn_duration_ms, context.db_sched_duration_ms, ) - def update(self): + def update(self, context): """Updates the current values and returns the difference between the old and new values. Returns: _RequestStats: The difference between the old and new values """ - new = _RequestStats.from_context(self.context) + new = _RequestStats.from_context(context) diff = _RequestStats( - self.context, new.ru_utime - self.ru_utime, new.ru_stime - self.ru_stime, new.db_txn_count - self.db_txn_count, -- cgit 1.5.1 From 8f5a688d420c8f6b51826c561da9094b76fbea1e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 10:56:03 -0500 Subject: cleanups, self-registration --- synapse/handlers/presence.py | 7 ++++--- synapse/http/request_metrics.py | 15 +++------------ synapse/replication/tcp/resource.py | 9 +++++---- 3 files changed, 12 insertions(+), 19 deletions(-) (limited to 'synapse/http') diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index aca822c46a..4ee87d5714 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -142,8 +142,9 @@ class PresenceHandler(object): } LaterGauge( - "user_to_current_state_size", "", [], lambda: len(self.user_to_current_state) - ).register() + "synapse_handlers_presence_user_to_current_state_size", "", [], + lambda: len(self.user_to_current_state) + ) now = self.clock.time_msec() for state in active_presence: @@ -212,7 +213,7 @@ class PresenceHandler(object): 60 * 1000, ) - LaterGauge("wheel_timer_size", "", [], lambda: len(self.wheel_timer)).register() + LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], lambda: len(self.wheel_timer)) @defer.inlineCallbacks def _on_shutdown(self): diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 0984870e7e..e7f1bfc4ae 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -71,24 +71,14 @@ in_flight_requests_db_sched_duration = Counter("synapse_http_request_in_flight_r _in_flight_requests = set() -def _collect_in_flight(): - """Called just before metrics are collected, so we use it to update all - the in flight request metrics - """ - - for rm in _in_flight_requests: - rm.update_metrics() - - -metrics.register_collector(_collect_in_flight) - - def _get_in_flight_counts(): """Returns a count of all in flight requests by (method, server_name) Returns: dict[tuple[str, str], int] """ + for rm in _in_flight_requests: + rm.update_metrics() # Map from (method, name) -> int, the number of in flight requests of that # type @@ -99,6 +89,7 @@ def _get_in_flight_counts(): return counts + LaterGauge( "synapse_http_request_metrics_in_flight_requests_count", "", ["method", "servlet"], diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ed251c79ac..1ca30bc31a 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -73,8 +73,8 @@ class ReplicationStreamer(object): # Current connections. self.connections = [] - l = LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], lambda: len(self.connections)) - l.register() + LaterGauge("synapse_replication_tcp_resource_total_connections", "", [], + lambda: len(self.connections)) # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been @@ -87,14 +87,15 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} LaterGauge( - "synapse_replication_tcp_resource_connections_per_stream", "", ["stream_name"], + "synapse_replication_tcp_resource_connections_per_stream", "", + ["stream_name"], lambda: { (stream_name,): len([ conn for conn in self.connections if stream_name in conn.replication_streams ]) for stream_name in self.streams_by_name - }).register() + }) self.federation_sender = None if not hs.config.send_federation: -- cgit 1.5.1 From 071206304d088aac8bd0e2fff600141dae1d09b6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 16:54:22 -0500 Subject: cleanup pep8 errors --- synapse/federation/federation_server.py | 5 +- synapse/federation/transaction_queue.py | 22 ++++-- synapse/http/request_metrics.py | 114 ++++++++++++++++++++++++-------- synapse/notifier.py | 3 +- synapse/util/caches/__init__.py | 7 +- 5 files changed, 114 insertions(+), 37 deletions(-) (limited to 'synapse/http') diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8211273006..2d420a58a2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,10 +46,13 @@ received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") received_edus_counter = Counter("synapse_federation_server_received_edus", "") -received_queries_counter = Counter("synapse_federation_server_received_queries", "", ["type"]) +received_queries_counter = Counter( + "synapse_federation_server_received_queries", "", ["type"] +) class FederationServer(FederationBase): + def __init__(self, hs): super(FederationServer, self).__init__(hs) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 2049351fdd..53442688c8 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -28,7 +28,10 @@ from synapse.handlers.presence import format_user_presence_state, get_interested import synapse.metrics from synapse.metrics import LaterGauge from synapse.metrics import ( - sent_edus_counter, sent_transactions_counter, events_processed_counter) + sent_edus_counter, + sent_transactions_counter, + events_processed_counter, +) from prometheus_client import Counter @@ -37,7 +40,9 @@ import logging logger = logging.getLogger(__name__) -sent_pdus_destination_dist = Counter("synapse_federation_client_sent_pdu_destinations", "") +sent_pdus_destination_dist = Counter( + "synapse_federation_client_sent_pdu_destinations", "" +) class TransactionQueue(object): @@ -64,7 +69,10 @@ class TransactionQueue(object): # done self.pending_transactions = {} - LaterGauge("synapse_federation_client_pending_destinations", "", [], + LaterGauge( + "synapse_federation_client_pending_destinations", + "", + [], lambda: len(self.pending_transactions), ) @@ -89,11 +97,15 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "synapse_federation_client_pending_pdus", "", [], + "synapse_federation_client_pending_pdus", + "", + [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "synapse_federation_client_pending_edus", "", [], + "synapse_federation_client_pending_edus", + "", + [], lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index e7f1bfc4ae..7f11b5c5a4 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -25,47 +25,87 @@ logger = logging.getLogger(__name__) # total number of responses served, split by method/servlet/tag -response_count = Counter("synapse_http_server_response_count", "", ["method", "servlet", "tag"]) +response_count = Counter( + "synapse_http_server_response_count", "", ["method", "servlet", "tag"] +) -requests_counter = Counter("synapse_http_server_requests_received", "", ["method", "servlet"]) +requests_counter = Counter( + "synapse_http_server_requests_received", "", ["method", "servlet"] +) -outgoing_responses_counter = Counter("synapse_http_server_responses", "", ["method", "code"]) +outgoing_responses_counter = Counter( + "synapse_http_server_responses", "", ["method", "code"] +) -response_timer = Histogram("synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"]) +response_timer = Histogram( + "synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"] +) -response_ru_utime = Counter("synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"]) +response_ru_utime = Counter( + "synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"] +) -response_ru_stime = Counter("synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"]) +response_ru_stime = Counter( + "synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"] +) -response_db_txn_count = Counter("synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"]) +response_db_txn_count = Counter( + "synapse_http_server_response_db_txn_count", "", ["method", "servlet", "tag"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -response_db_txn_duration = Counter("synapse_http_server_response_db_txn_duration_seconds", "", ["method", "servlet", "tag"]) +response_db_txn_duration = Counter( + "synapse_http_server_response_db_txn_duration_seconds", + "", + ["method", "servlet", "tag"], +) # seconds spent waiting for a db connection, when processing this request -response_db_sched_duration = Counter("synapse_http_request_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"] +response_db_sched_duration = Counter( + "synapse_http_request_response_db_sched_duration_seconds", + "", + ["method", "servlet", "tag"], ) # size in bytes of the response written -response_size = Counter("synapse_http_request_response_size", "", ["method", "servlet", "tag"] +response_size = Counter( + "synapse_http_request_response_size", "", ["method", "servlet", "tag"] ) # In flight metrics are incremented while the requests are in flight, rather # than when the response was written. -in_flight_requests_ru_utime = Counter("synapse_http_request_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_utime = Counter( + "synapse_http_request_in_flight_requests_ru_utime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_ru_stime = Counter("synapse_http_request_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"]) +in_flight_requests_ru_stime = Counter( + "synapse_http_request_in_flight_requests_ru_stime_seconds", + "", + ["method", "servlet"], +) -in_flight_requests_db_txn_count = Counter("synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"]) +in_flight_requests_db_txn_count = Counter( + "synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"] +) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request -in_flight_requests_db_txn_duration = Counter("synapse_http_request_in_flight_requests_db_txn_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_txn_duration = Counter( + "synapse_http_request_in_flight_requests_db_txn_duration_seconds", + "", + ["method", "servlet"], +) # seconds spent waiting for a db connection, when processing this request -in_flight_requests_db_sched_duration = Counter("synapse_http_request_in_flight_requests_db_sched_duration_seconds", "", ["method", "servlet"]) +in_flight_requests_db_sched_duration = Counter( + "synapse_http_request_in_flight_requests_db_sched_duration_seconds", + "", + ["method", "servlet"], +) # The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() @@ -91,9 +131,10 @@ def _get_in_flight_counts(): LaterGauge( - "synapse_http_request_metrics_in_flight_requests_count", "", + "synapse_http_request_metrics_in_flight_requests_count", + "", ["method", "servlet"], - _get_in_flight_counts + _get_in_flight_counts, ) @@ -128,16 +169,23 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag).observe(time_msec - self.start) + response_timer.labels(request.method, self.name, tag).observe( + time_msec - self.start + ) ru_utime, ru_stime = context.get_resource_usage() response_ru_utime.labels(request.method, self.name, tag).inc(ru_utime) response_ru_stime.labels(request.method, self.name, tag).inc(ru_stime) - response_db_txn_count.labels(request.method, self.name, tag).inc(context.db_txn_count) - response_db_txn_duration.labels(request.method, self.name, tag).inc(context.db_txn_duration_ms / 1000.) + response_db_txn_count.labels(request.method, self.name, tag).inc( + context.db_txn_count + ) + response_db_txn_duration.labels(request.method, self.name, tag).inc( + context.db_txn_duration_ms / 1000. + ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000.) + context.db_sched_duration_ms / 1000. + ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -154,11 +202,17 @@ class RequestMetrics(object): in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime) in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime) - in_flight_requests_db_txn_count.labels(self.method, self.name).inc(diff.db_txn_count) + in_flight_requests_db_txn_count.labels(self.method, self.name).inc( + diff.db_txn_count + ) - in_flight_requests_db_txn_duration.labels(self.method, self.name).inc(diff.db_txn_duration_ms / 1000.) + in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( + diff.db_txn_duration_ms / 1000. + ) - in_flight_requests_db_sched_duration.labels(self.method, self.name).inc(diff.db_sched_duration_ms / 1000.) + in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( + diff.db_sched_duration_ms / 1000. + ) class _RequestStats(object): @@ -166,12 +220,16 @@ class _RequestStats(object): """ __slots__ = [ - "ru_utime", "ru_stime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "ru_utime", + "ru_stime", + "db_txn_count", + "db_txn_duration_ms", + "db_sched_duration_ms", ] - def __init__(self, ru_utime, ru_stime, db_txn_count, - db_txn_duration_ms, db_sched_duration_ms): + def __init__( + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count diff --git a/synapse/notifier.py b/synapse/notifier.py index 40cc553918..6dce20a284 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,8 @@ logger = logging.getLogger(__name__) notified_events_counter = Counter("synapse_notifier_notified_events", "") -users_woken_by_stream_counter = Counter("synapse_notifier_users_woken_by_stream", "", ["stream"]) +users_woken_by_stream_counter = Counter( + "synapse_notifier_users_woken_by_stream", "", ["stream"]) # TODO(paul): Should be shared somewhere diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 1c511a7072..e0c22df249 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -29,13 +29,16 @@ cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) -response_cache_evicted = Gauge("synapse_util_caches_response_cache:evicted_size", "", ["name"]) +response_cache_evicted = Gauge( + "synapse_util_caches_response_cache:evicted_size", "", ["name"] +) response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) + def register_cache(cache_type, cache_name, cache): # Check if the metric is already registered. Unregister it, if so. - metric_name = "cache_%s_%s" % (cache_type, cache_name,) + metric_name = "cache_%s_%s" % (cache_type, cache_name) if metric_name in collectors_by_name.keys(): REGISTRY.unregister(collectors_by_name[metric_name]) -- cgit 1.5.1 From 53cc2cde1f609ec34a4ce6a7c678302c65ddfe53 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 22 May 2018 17:32:57 -0500 Subject: cleanup --- synapse/federation/send_queue.py | 5 ++--- synapse/handlers/appservice.py | 9 ++++++--- synapse/handlers/presence.py | 12 ++++++++---- synapse/http/client.py | 3 ++- synapse/http/matrixfederationclient.py | 6 ++++-- synapse/metrics/__init__.py | 28 ++++++++++++++++++++++------ synapse/push/bulk_push_rule_evaluator.py | 6 ++++-- synapse/storage/events.py | 9 ++++++--- synapse/util/metrics.py | 15 ++++++++++----- 9 files changed, 64 insertions(+), 29 deletions(-) (limited to 'synapse/http') diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index e6e1888f3a..c7ed465617 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -74,9 +74,8 @@ class FederationRemoteSendQueue(object): # lambda binds to the queue rather than to the name of the queue which # changes. ARGH. def register(name, queue): - LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), "", - lambda: len(queue), - ) + LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,), + "", lambda: len(queue)) for queue_name in [ "presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed", diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index a7345331af..d9f35a5dba 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -127,12 +127,15 @@ class ApplicationServicesHandler(object): now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_positions.labels("appservice_sender").set(upper_bound) + synapse.metrics.event_processing_positions.labels( + "appservice_sender").set(upper_bound) events_processed_counter.inc(len(events)) - synapse.metrics.event_processing_lag.labels("appservice_sender").set(now - ts) - synapse.metrics.event_processing_last_ts.labels("appservice_sender").set(ts) + synapse.metrics.event_processing_lag.labels( + "appservice_sender").set(now - ts) + synapse.metrics.event_processing_last_ts.labels( + "appservice_sender").set(ts) finally: self.is_processing = False diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4ee87d5714..12939aa507 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -47,7 +47,8 @@ logger = logging.getLogger(__name__) notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "") -federation_presence_out_counter = Counter("synapse_handler_presence_federation_presence_out", "") +federation_presence_out_counter = Counter( + "synapse_handler_presence_federation_presence_out", "") presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "") timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "") federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "") @@ -55,8 +56,10 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"]) -notify_reason_counter = Counter("synapse_handler_presence_notify_reason", "", ["reason"]) -state_transition_counter = Counter("synapse_handler_presence_state_transition", "", ["from", "to"] +notify_reason_counter = Counter( + "synapse_handler_presence_notify_reason", "", ["reason"]) +state_transition_counter = Counter( + "synapse_handler_presence_state_transition", "", ["from", "to"] ) @@ -213,7 +216,8 @@ class PresenceHandler(object): 60 * 1000, ) - LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], lambda: len(self.wheel_timer)) + LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [], + lambda: len(self.wheel_timer)) @defer.inlineCallbacks def _on_shutdown(self): diff --git a/synapse/http/client.py b/synapse/http/client.py index 61a1d2e2b3..4d4eee3d64 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -50,7 +50,8 @@ import urllib logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) -incoming_responses_counter = Counter("synapse_http_client_responses", "", ["method", "code"]) +incoming_responses_counter = Counter("synapse_http_client_responses", "", + ["method", "code"]) class SimpleHttpClient(object): diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 259d3884e2..77eaa06a1a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -48,8 +48,10 @@ from prometheus_client import Counter logger = logging.getLogger(__name__) outbound_logger = logging.getLogger("synapse.http.outbound") -outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) -incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", "", ["method", "code"]) +outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", + "", ["method"]) +incoming_responses_counter = Counter("synapse_http_matrixfederationclient_responses", + "", ["method", "code"]) MAX_LONG_RETRIES = 10 diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 38408efb54..bed37b5f56 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -21,15 +21,14 @@ import platform import attr from prometheus_client import Gauge, Histogram, Counter -from prometheus_client.core import ( - GaugeMetricFamily, CounterMetricFamily, REGISTRY) +from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY from twisted.internet import reactor logger = logging.getLogger(__name__) -running_on_pypy = platform.python_implementation() == 'PyPy' +running_on_pypy = platform.python_implementation() == "PyPy" all_metrics = [] all_collectors = [] all_gauges = {} @@ -87,9 +86,16 @@ class LaterGauge(object): # gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) -gc_time = Histogram("python_gc_time", "Time taken to GC (ms)", ["gen"], buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) +gc_time = Histogram( + "python_gc_time", + "Time taken to GC (ms)", + ["gen"], + buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000], +) + class GCCounts(object): + def collect(self): gc_counts = gc.get_count() @@ -99,14 +105,23 @@ class GCCounts(object): yield cm + REGISTRY.register(GCCounts()) # # Twisted reactor metrics # -tick_time = Histogram("python_twisted_reactor_tick_time", "Tick time of the Twisted reactor (ms)", buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000]) -pending_calls_metric = Histogram("python_twisted_reactor_pending_calls", "Pending calls", buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000]) +tick_time = Histogram( + "python_twisted_reactor_tick_time", + "Tick time of the Twisted reactor (ms)", + buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000], +) +pending_calls_metric = Histogram( + "python_twisted_reactor_pending_calls", + "Pending calls", + buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000], +) # # Federation Metrics @@ -134,6 +149,7 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name" # finished being processed. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) + def runUntilCurrentTimer(func): @functools.wraps(func) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 6fcca5e260..b0053e7f3f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -36,8 +36,10 @@ logger = logging.getLogger(__name__) rules_by_room = {} -push_rules_invalidation_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") -push_rules_state_size_counter = Counter("synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") +push_rules_invalidation_counter = Counter( + "synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") +push_rules_state_size_counter = Counter( + "synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 00d66886ad..b96104ccae 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -45,19 +45,22 @@ from prometheus_client import Counter logger = logging.getLogger(__name__) persist_event_counter = Counter("synapse_storage_events_persisted_events", "") -event_counter = Counter("synapse_storage_events_persisted_events_sep", "", ["type", "origin_type", "origin_entity"]) +event_counter = Counter("synapse_storage_events_persisted_events_sep", "", + ["type", "origin_type", "origin_entity"]) # The number of times we are recalculating the current state state_delta_counter = Counter("synapse_storage_events_state_delta", "") # The number of times we are recalculating state when there is only a # single forward extremity -state_delta_single_event_counter = Counter("synapse_storage_events_state_delta_single_event", "") +state_delta_single_event_counter = Counter( + "synapse_storage_events_state_delta_single_event", "") # The number of times we are reculating state when we could have resonably # calculated the delta when we calculated the state for an event we were # persisting. -state_delta_reuse_delta_counter = Counter("synapse_storage_events_state_delta_reuse_delta", "") +state_delta_reuse_delta_counter = Counter( + "synapse_storage_events_state_delta_reuse_delta", "") def encode_json(json_object): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index a964286d85..424fdcb036 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -28,17 +28,22 @@ block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"]) block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"]) -block_ru_utime = Counter("synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) +block_ru_utime = Counter( + "synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]) -block_ru_stime = Counter("synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) +block_ru_stime = Counter( + "synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]) -block_db_txn_count = Counter("synapse_util_metrics_block_db_txn_count", "", ["block_name"]) +block_db_txn_count = Counter( + "synapse_util_metrics_block_db_txn_count", "", ["block_name"]) # seconds spent waiting for db txns, excluding scheduling time, in this block -block_db_txn_duration = Counter("synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) +block_db_txn_duration = Counter( + "synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]) # seconds spent waiting for a db connection, in this block -block_db_sched_duration = Counter("synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) +block_db_sched_duration = Counter( + "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) def measure_func(name): -- cgit 1.5.1 From e987079037eb89a3c5c04fe64d843a6f9e6c1718 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 23 May 2018 13:03:51 -0500 Subject: fixes --- synapse/federation/transaction_queue.py | 8 ++++---- synapse/http/request_metrics.py | 14 +++++++------- synapse/push/bulk_push_rule_evaluator.py | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'synapse/http') diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 53442688c8..69312ec233 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -41,7 +41,7 @@ import logging logger = logging.getLogger(__name__) sent_pdus_destination_dist = Counter( - "synapse_federation_client_sent_pdu_destinations", "" + "synapse_federation_transaction_queue_sent_pdu_destinations", "" ) @@ -70,7 +70,7 @@ class TransactionQueue(object): self.pending_transactions = {} LaterGauge( - "synapse_federation_client_pending_destinations", + "synapse_federation_transaction_queue_pending_destinations", "", [], lambda: len(self.pending_transactions), @@ -97,13 +97,13 @@ class TransactionQueue(object): self.pending_edus_keyed_by_dest = edus_keyed = {} LaterGauge( - "synapse_federation_client_pending_pdus", + "synapse_federation_transaction_queue_pending_pdus", "", [], lambda: sum(map(len, pdus.values())), ) LaterGauge( - "synapse_federation_client_pending_edus", + "synapse_federation_transaction_queue_pending_edus", "", [], lambda: ( diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 7f11b5c5a4..e7df494333 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -63,46 +63,46 @@ response_db_txn_duration = Counter( # seconds spent waiting for a db connection, when processing this request response_db_sched_duration = Counter( - "synapse_http_request_response_db_sched_duration_seconds", + "synapse_http_server_response_db_sched_duration_seconds", "", ["method", "servlet", "tag"], ) # size in bytes of the response written response_size = Counter( - "synapse_http_request_response_size", "", ["method", "servlet", "tag"] + "synapse_http_server_response_size", "", ["method", "servlet", "tag"] ) # In flight metrics are incremented while the requests are in flight, rather # than when the response was written. in_flight_requests_ru_utime = Counter( - "synapse_http_request_in_flight_requests_ru_utime_seconds", + "synapse_http_server_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"], ) in_flight_requests_ru_stime = Counter( - "synapse_http_request_in_flight_requests_ru_stime_seconds", + "synapse_http_server_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"], ) in_flight_requests_db_txn_count = Counter( - "synapse_http_request_in_flight_requests_db_txn_count", "", ["method", "servlet"] + "synapse_http_server_in_flight_requests_db_txn_count", "", ["method", "servlet"] ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request in_flight_requests_db_txn_duration = Counter( - "synapse_http_request_in_flight_requests_db_txn_duration_seconds", + "synapse_http_server_in_flight_requests_db_txn_duration_seconds", "", ["method", "servlet"], ) # seconds spent waiting for a db connection, when processing this request in_flight_requests_db_sched_duration = Counter( - "synapse_http_request_in_flight_requests_db_sched_duration_seconds", + "synapse_http_server_in_flight_requests_db_sched_duration_seconds", "", ["method", "servlet"], ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index b0053e7f3f..da1cde417f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -37,9 +37,9 @@ rules_by_room = {} push_rules_invalidation_counter = Counter( - "synapse_push_bulk_push_role_evaluator_push_rules_invalidation_counter", "") + "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", "") push_rules_state_size_counter = Counter( - "synapse_push_bulk_push_role_evaluator_push_rules_state_size_counter", "") + "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "") # Measures whether we use the fast path of using state deltas, or if we have to # recalculate from scratch -- cgit 1.5.1 From a2eb5db4a066148da5057b8ee844c7e4f5d888bc Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:10:27 +1000 Subject: update metrics to be in seconds --- synapse/http/request_metrics.py | 6 +++--- synapse/http/site.py | 20 ++++++++++---------- synapse/metrics/__init__.py | 19 ++++++++++--------- synapse/storage/_base.py | 30 +++++++++++++++--------------- 4 files changed, 38 insertions(+), 37 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index e7df494333..af3067b4bb 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -38,15 +38,15 @@ outgoing_responses_counter = Counter( ) response_timer = Histogram( - "synapse_http_server_response_time_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_time_seconds", "sec", ["method", "servlet", "tag"] ) response_ru_utime = Counter( - "synapse_http_server_response_ru_utime_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_ru_utime_seconds", "sec", ["method", "servlet", "tag"] ) response_ru_stime = Counter( - "synapse_http_server_response_ru_stime_seconds", "", ["method", "servlet", "tag"] + "synapse_http_server_response_ru_stime_seconds", "sec", ["method", "servlet", "tag"] ) response_db_txn_count = Counter( diff --git a/synapse/http/site.py b/synapse/http/site.py index 23c1b76922..60299657b9 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -83,7 +83,7 @@ class SynapseRequest(Request): return Request.render(self, resrc) def _started_processing(self, servlet_name): - self.start_time = int(time.time() * 1000) + self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( self.start_time, name=servlet_name, method=self.method, @@ -102,26 +102,26 @@ class SynapseRequest(Request): context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count - db_txn_duration_ms = context.db_txn_duration_ms - db_sched_duration_ms = context.db_sched_duration_ms + db_txn_duration_sec = context.db_txn_duration_sec + db_sched_duration_sec = context.db_sched_duration_sec except Exception: ru_utime, ru_stime = (0, 0) - db_txn_count, db_txn_duration_ms = (0, 0) + db_txn_count, db_txn_duration_sec = (0, 0) - end_time = int(time.time() * 1000) + end_time = time.time() self.site.access_logger.info( "%s - %s - {%s}" - " Processed request: %dms (%dms, %dms) (%dms/%dms/%d)" + " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" " %sB %s \"%s %s %s\" \"%s\"", self.getClientIP(), self.site.site_tag, self.authenticated_entity, end_time - self.start_time, - int(ru_utime * 1000), - int(ru_stime * 1000), - db_sched_duration_ms, - db_txn_duration_ms, + ru_utime, + ru_stime, + db_sched_duration_sec, + db_txn_duration_sec, int(db_txn_count), self.sentLength, self.code, diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e33ed6c9be..0c557a43f6 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -122,10 +122,10 @@ REGISTRY.register(CPUMetrics()) gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) gc_time = Histogram( "python_gc_time", - "Time taken to GC (ms)", + "Time taken to GC (sec)", ["gen"], - buckets=[2.5, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 7500, 15000, - 30000, 45000, 60000], + buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50, + 5.00, 7.50, 15.00, 30.00, 45.00, 60.00], ) @@ -147,8 +147,9 @@ REGISTRY.register(GCCounts()) tick_time = Histogram( "python_twisted_reactor_tick_time", - "Tick time of the Twisted reactor (ms)", - buckets=[1, 2, 5, 10, 50, 100, 250, 500, 1000, 2000], + "Tick time of the Twisted reactor (sec)", + buckets=[0.001, 0.002, 0.005, 0.001, 0.005, 0.01. 0.025, 0.05, 0.1, 0.2, + 0.5, 1, 2, 5], ) pending_calls_metric = Histogram( "python_twisted_reactor_pending_calls", @@ -202,9 +203,9 @@ def runUntilCurrentTimer(func): num_pending += 1 num_pending += len(reactor.threadCallQueue) - start = time.time() * 1000 + start = time.time() ret = func(*args, **kwargs) - end = time.time() * 1000 + end = time.time() # record the amount of wallclock time spent running pending calls. # This is a proxy for the actual amount of time between reactor polls, @@ -225,9 +226,9 @@ def runUntilCurrentTimer(func): if threshold[i] < counts[i]: logger.info("Collecting gc %d", i) - start = time.time() * 1000 + start = time.time() unreachable = gc.collect(i) - end = time.time() * 1000 + end = time.time() gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d963af5c89..22d6257a9f 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -42,10 +42,10 @@ sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") perf_logger = logging.getLogger("synapse.storage.TIME") -sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "") +sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "sec") -sql_query_timer = Histogram("synapse_storage_query_time", "", ["verb"]) -sql_txn_timer = Histogram("synapse_storage_transaction_time", "", ["desc"]) +sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"]) +sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"]) class LoggingTransaction(object): @@ -110,7 +110,7 @@ class LoggingTransaction(object): # Don't let logging failures stop SQL from working pass - start = time.time() * 1000 + start = time.time() try: return func( @@ -120,9 +120,9 @@ class LoggingTransaction(object): logger.debug("[SQL FAIL] {%s} %s", self.name, e) raise finally: - msecs = (time.time() * 1000) - start - sql_logger.debug("[SQL time] {%s} %f", self.name, msecs) - sql_query_timer.labels(sql.split()[0]).observe(msecs) + secs = time.time() - start + sql_logger.debug("[SQL time] {%s} %f sec", self.name, secs) + sql_query_timer.labels(sql.split()[0]).observe(secs) class PerformanceCounters(object): @@ -132,7 +132,7 @@ class PerformanceCounters(object): def update(self, key, start_time, end_time=None): if end_time is None: - end_time = time.time() * 1000 + end_time = time.time() duration = end_time - start_time count, cum_time = self.current_counters.get(key, (0, 0)) count += 1 @@ -222,7 +222,7 @@ class SQLBaseStore(object): def _new_transaction(self, conn, desc, after_callbacks, exception_callbacks, logging_context, func, *args, **kwargs): - start = time.time() * 1000 + start = time.time() txn_id = self._TXN_ID # We don't really need these to be unique, so lets stop it from @@ -282,13 +282,13 @@ class SQLBaseStore(object): logger.debug("[TXN FAIL] {%s} %s", name, e) raise finally: - end = time.time() * 1000 + end = time.time() duration = end - start if logging_context is not None: logging_context.add_database_transaction(duration) - transaction_logger.debug("[TXN END] {%s} %f", name, duration) + transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) self._current_txn_total_time += duration self._txn_perf_counters.update(desc, start, end) @@ -349,13 +349,13 @@ class SQLBaseStore(object): """ current_context = LoggingContext.current_context() - start_time = time.time() * 1000 + start_time = time.time() def inner_func(conn, *args, **kwargs): with LoggingContext("runWithConnection") as context: - sched_duration_ms = time.time() * 1000 - start_time - sql_scheduling_timer.observe(sched_duration_ms) - current_context.add_database_scheduled(sched_duration_ms) + sched_duration_sec = time.time() - start_time + sql_scheduling_timer.observe(sched_duration_sec) + current_context.add_database_scheduled(sched_duration_sec) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") -- cgit 1.5.1 From 3ef5cd74a6bff9b33144cb834782e0402e6eb152 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 28 May 2018 19:39:27 +1000 Subject: update to more consistently use seconds in any metrics or logging --- synapse/http/request_metrics.py | 38 +++++++++++++++++++------------------- synapse/util/logcontext.py | 24 ++++++++++++------------ synapse/util/logutils.py | 6 +++--- synapse/util/metrics.py | 8 ++++---- 4 files changed, 38 insertions(+), 38 deletions(-) (limited to 'synapse/http') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index af3067b4bb..dc06f6c443 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -139,8 +139,8 @@ LaterGauge( class RequestMetrics(object): - def start(self, time_msec, name, method): - self.start = time_msec + def start(self, time_sec, name, method): + self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method @@ -149,7 +149,7 @@ class RequestMetrics(object): _in_flight_requests.add(self) - def stop(self, time_msec, request): + def stop(self, time_sec, request): _in_flight_requests.discard(self) context = LoggingContext.current_context() @@ -170,7 +170,7 @@ class RequestMetrics(object): response_count.labels(request.method, self.name, tag).inc() response_timer.labels(request.method, self.name, tag).observe( - time_msec - self.start + time_sec - self.start ) ru_utime, ru_stime = context.get_resource_usage() @@ -181,10 +181,10 @@ class RequestMetrics(object): context.db_txn_count ) response_db_txn_duration.labels(request.method, self.name, tag).inc( - context.db_txn_duration_ms / 1000. + context.db_txn_duration_sec ) response_db_sched_duration.labels(request.method, self.name, tag).inc( - context.db_sched_duration_ms / 1000. + context.db_sched_duration_sec ) response_size.labels(request.method, self.name, tag).inc(request.sentLength) @@ -207,11 +207,11 @@ class RequestMetrics(object): ) in_flight_requests_db_txn_duration.labels(self.method, self.name).inc( - diff.db_txn_duration_ms / 1000. + diff.db_txn_duration_sec ) in_flight_requests_db_sched_duration.labels(self.method, self.name).inc( - diff.db_sched_duration_ms / 1000. + diff.db_sched_duration_sec ) @@ -223,18 +223,18 @@ class _RequestStats(object): "ru_utime", "ru_stime", "db_txn_count", - "db_txn_duration_ms", - "db_sched_duration_ms", + "db_txn_duration_sec", + "db_sched_duration_sec", ] def __init__( - self, ru_utime, ru_stime, db_txn_count, db_txn_duration_ms, db_sched_duration_ms + self, ru_utime, ru_stime, db_txn_count, db_txn_duration_sec, db_sched_duration_sec ): self.ru_utime = ru_utime self.ru_stime = ru_stime self.db_txn_count = db_txn_count - self.db_txn_duration_ms = db_txn_duration_ms - self.db_sched_duration_ms = db_sched_duration_ms + self.db_txn_duration_sec = db_txn_duration_sec + self.db_sched_duration_sec = db_sched_duration_sec @staticmethod def from_context(context): @@ -243,8 +243,8 @@ class _RequestStats(object): return _RequestStats( ru_utime, ru_stime, context.db_txn_count, - context.db_txn_duration_ms, - context.db_sched_duration_ms, + context.db_txn_duration_sec, + context.db_sched_duration_sec, ) def update(self, context): @@ -260,14 +260,14 @@ class _RequestStats(object): new.ru_utime - self.ru_utime, new.ru_stime - self.ru_stime, new.db_txn_count - self.db_txn_count, - new.db_txn_duration_ms - self.db_txn_duration_ms, - new.db_sched_duration_ms - self.db_sched_duration_ms, + new.db_txn_duration_sec - self.db_txn_duration_sec, + new.db_sched_duration_sec - self.db_sched_duration_sec, ) self.ru_utime = new.ru_utime self.ru_stime = new.ru_stime self.db_txn_count = new.db_txn_count - self.db_txn_duration_ms = new.db_txn_duration_ms - self.db_sched_duration_ms = new.db_sched_duration_ms + self.db_txn_duration_sec = new.db_txn_duration_sec + self.db_sched_duration_sec = new.db_sched_duration_sec return diff diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 914f616312..a58c723403 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -59,7 +59,7 @@ class LoggingContext(object): __slots__ = [ "previous_context", "name", "ru_stime", "ru_utime", - "db_txn_count", "db_txn_duration_ms", "db_sched_duration_ms", + "db_txn_count", "db_txn_duration_sec", "db_sched_duration_sec", "usage_start", "main_thread", "alive", "request", "tag", @@ -84,10 +84,10 @@ class LoggingContext(object): def stop(self): pass - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): pass - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): pass def __nonzero__(self): @@ -103,11 +103,11 @@ class LoggingContext(object): self.ru_utime = 0. self.db_txn_count = 0 - # ms spent waiting for db txns, excluding scheduling time - self.db_txn_duration_ms = 0 + # sec spent waiting for db txns, excluding scheduling time + self.db_txn_duration_sec = 0 - # ms spent waiting for db txns to be scheduled - self.db_sched_duration_ms = 0 + # sec spent waiting for db txns to be scheduled + self.db_sched_duration_sec = 0 # If alive has the thread resource usage when the logcontext last # became active. @@ -230,18 +230,18 @@ class LoggingContext(object): return ru_utime, ru_stime - def add_database_transaction(self, duration_ms): + def add_database_transaction(self, duration_sec): self.db_txn_count += 1 - self.db_txn_duration_ms += duration_ms + self.db_txn_duration_sec += duration_sec - def add_database_scheduled(self, sched_ms): + def add_database_scheduled(self, sched_sec): """Record a use of the database pool Args: - sched_ms (int): number of milliseconds it took us to get a + sched_sec (float): number of seconds it took us to get a connection """ - self.db_sched_duration_ms += sched_ms + self.db_sched_duration_sec += sched_sec class LoggingContextFilter(logging.Filter): diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index 3a83828d25..03249c5dc8 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -96,7 +96,7 @@ def time_function(f): id = _TIME_FUNC_ID _TIME_FUNC_ID += 1 - start = time.clock() * 1000 + start = time.clock() try: _log_debug_as_f( @@ -107,10 +107,10 @@ def time_function(f): r = f(*args, **kwargs) finally: - end = time.clock() * 1000 + end = time.clock() _log_debug_as_f( f, - "[FUNC END] {%s-%d} %f", + "[FUNC END] {%s-%d} %.3f sec", (func_name, id, end - start,), ) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 424fdcb036..23fc0ca20d 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -74,7 +74,7 @@ class Measure(object): self.created_context = False def __enter__(self): - self.start = self.clock.time_msec() + self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") @@ -90,7 +90,7 @@ class Measure(object): if isinstance(exc_type, Exception) or not self.start_context: return - duration = self.clock.time_msec() - self.start + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() block_timer.labels(self.name).inc(duration) @@ -114,9 +114,9 @@ class Measure(object): block_ru_stime.labels(self.name).inc(ru_stime - self.ru_stime) block_db_txn_count.labels(self.name).inc(context.db_txn_count - self.db_txn_count) block_db_txn_duration.labels(self.name).inc( - (context.db_txn_duration_ms - self.db_txn_duration_ms) / 1000.) + context.db_txn_duration_sec - self.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc( - (context.db_sched_duration_ms - self.db_sched_duration_ms) / 1000.) + context.db_sched_duration_sec - self.db_sched_duration_sec) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) -- cgit 1.5.1