diff --git a/synapse/http/client.py b/synapse/http/client.py
index 3797545824..3cef747a4d 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -49,7 +49,6 @@ from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util.async_helpers import timeout_deferred
-from synapse.util.caches import CACHE_SIZE_FACTOR
logger = logging.getLogger(__name__)
@@ -241,7 +240,10 @@ class SimpleHttpClient(object):
# tends to do so in batches, so we need to allow the pool to keep
# lots of idle connections around.
pool = HTTPConnectionPool(self.reactor)
- pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5))
+ # XXX: The justification for using the cache factor here is that larger instances
+ # will need both more cache and more connections.
+ # Still, this should probably be a separate dial
+ pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5))
pool.cachedConnectionTimeout = 2 * 60
self.agent = ProxyAgent(
@@ -359,6 +361,7 @@ class SimpleHttpClient(object):
actual_headers = {
b"Content-Type": [b"application/x-www-form-urlencoded"],
b"User-Agent": [self.user_agent],
+ b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers)
@@ -399,6 +402,7 @@ class SimpleHttpClient(object):
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
+ b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers)
@@ -434,6 +438,10 @@ class SimpleHttpClient(object):
ValueError: if the response was not JSON
"""
+ actual_headers = {b"Accept": [b"application/json"]}
+ if headers:
+ actual_headers.update(headers)
+
body = yield self.get_raw(uri, args, headers=headers)
return json.loads(body)
@@ -467,6 +475,7 @@ class SimpleHttpClient(object):
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
+ b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 6f1bb04d8b..225a47e3c3 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -408,7 +408,7 @@ class MatrixFederationHttpClient(object):
_sec_timeout,
)
- outgoing_requests_counter.labels(method_bytes).inc()
+ outgoing_requests_counter.labels(request.method).inc()
try:
with Measure(self.clock, "outbound_request"):
@@ -434,21 +434,29 @@ class MatrixFederationHttpClient(object):
logger.info("Failed to send request: %s", e)
raise_from(RequestSendFailed(e, can_retry=True), e)
- logger.info(
- "{%s} [%s] Got response headers: %d %s",
- request.txn_id,
- request.destination,
- response.code,
- response.phrase.decode("ascii", errors="replace"),
- )
-
- incoming_responses_counter.labels(method_bytes, response.code).inc()
+ incoming_responses_counter.labels(
+ request.method, response.code
+ ).inc()
set_tag(tags.HTTP_STATUS_CODE, response.code)
if 200 <= response.code < 300:
+ logger.debug(
+ "{%s} [%s] Got response headers: %d %s",
+ request.txn_id,
+ request.destination,
+ response.code,
+ response.phrase.decode("ascii", errors="replace"),
+ )
pass
else:
+ logger.info(
+ "{%s} [%s] Got response headers: %d %s",
+ request.txn_id,
+ request.destination,
+ response.code,
+ response.phrase.decode("ascii", errors="replace"),
+ )
# :'(
# Update transactions table?
d = treq.content(response)
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index 58f9cc61c8..b58ae3d9db 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -19,7 +19,7 @@ import threading
from prometheus_client.core import Counter, Histogram
-from synapse.logging.context import LoggingContext
+from synapse.logging.context import current_context
from synapse.metrics import LaterGauge
logger = logging.getLogger(__name__)
@@ -148,7 +148,7 @@ LaterGauge(
class RequestMetrics(object):
def start(self, time_sec, name, method):
self.start = time_sec
- self.start_context = LoggingContext.current_context()
+ self.start_context = current_context()
self.name = name
self.method = method
@@ -163,7 +163,7 @@ class RequestMetrics(object):
with _in_flight_requests_lock:
_in_flight_requests.discard(self)
- context = LoggingContext.current_context()
+ context = current_context()
tag = ""
if context:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index e092193c9c..514f2f1402 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -15,6 +15,7 @@ import contextlib
import logging
import time
+from twisted.python.failure import Failure
from twisted.web.server import Request, Site
from synapse.http import redact_uri
@@ -190,9 +191,21 @@ class SynapseRequest(Request):
Overrides twisted.web.server.Request.connectionLost to record the finish time and
do logging.
"""
+ # There is a bug in Twisted where reason is not wrapped in a Failure object
+ # Detect this and wrap it manually as a workaround
+ # More information: https://github.com/matrix-org/synapse/issues/7441
+ if not isinstance(reason, Failure):
+ reason = Failure(reason)
+
self.finish_time = time.time()
Request.connectionLost(self, reason)
+ if self.logcontext is None:
+ logger.info(
+ "Connection from %s lost before request headers were read", self.client
+ )
+ return
+
# we only get here if the connection to the client drops before we send
# the response.
#
@@ -236,13 +249,6 @@ class SynapseRequest(Request):
def _finished_processing(self):
"""Log the completion of this request and update the metrics
"""
-
- if self.logcontext is None:
- # this can happen if the connection closed before we read the
- # headers (so render was never called). In that case we'll already
- # have logged a warning, so just bail out.
- return
-
usage = self.logcontext.get_resource_usage()
if self._processing_finished_time is None:
|