summary refs log tree commit diff
path: root/synapse/logging
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2021-07-29 11:08:49 +0100
committerErik Johnston <erik@matrix.org>2021-07-29 11:08:49 +0100
commitc36c2777900284cf94e93e60e34c3b856bb31551 (patch)
tree5079c397821dab6f70dd0200a4c435c1b1d91db7 /synapse/logging
parentMerge tag 'v1.38.1' (diff)
parentFixup changelog (diff)
downloadsynapse-c36c2777900284cf94e93e60e34c3b856bb31551.tar.xz
Merge tag 'v1.39.0rc3'
Synapse 1.39.0rc3 (2021-07-28)
==============================

Bugfixes
--------

- Fix a bug introduced in Synapse 1.38 which caused an exception at startup when SAML authentication was enabled. ([\#10477](https://github.com/matrix-org/synapse/issues/10477))
- Fix a long-standing bug where Synapse would not inform clients that a device had exhausted its one-time-key pool, potentially causing problems decrypting events. ([\#10485](https://github.com/matrix-org/synapse/issues/10485))
- Fix reporting old R30 stats as R30v2 stats. Introduced in v1.39.0rc1. ([\#10486](https://github.com/matrix-org/synapse/issues/10486))

Internal Changes
----------------

- Fix an error which prevented the Github Actions workflow to build the docker images from running. ([\#10461](https://github.com/matrix-org/synapse/issues/10461))
- Fix release script to correctly version debian changelog when doing RCs. ([\#10465](https://github.com/matrix-org/synapse/issues/10465))
Diffstat (limited to 'synapse/logging')
-rw-r--r--synapse/logging/_remote.py14
-rw-r--r--synapse/logging/_structured.py2
-rw-r--r--synapse/logging/context.py16
-rw-r--r--synapse/logging/opentracing.py12
4 files changed, 22 insertions, 22 deletions
diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py

index c515690b38..8202d0494d 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py
@@ -110,9 +110,9 @@ class RemoteHandler(logging.Handler): self.port = port self.maximum_buffer = maximum_buffer - self._buffer = deque() # type: Deque[logging.LogRecord] - self._connection_waiter = None # type: Optional[Deferred] - self._producer = None # type: Optional[LogProducer] + self._buffer: Deque[logging.LogRecord] = deque() + self._connection_waiter: Optional[Deferred] = None + self._producer: Optional[LogProducer] = None # Connect without DNS lookups if it's a direct IP. if _reactor is None: @@ -123,9 +123,9 @@ class RemoteHandler(logging.Handler): try: ip = ip_address(self.host) if isinstance(ip, IPv4Address): - endpoint = TCP4ClientEndpoint( + endpoint: IStreamClientEndpoint = TCP4ClientEndpoint( _reactor, self.host, self.port - ) # type: IStreamClientEndpoint + ) elif isinstance(ip, IPv6Address): endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port) else: @@ -165,7 +165,7 @@ class RemoteHandler(logging.Handler): def writer(result: Protocol) -> None: # Force recognising transport as a Connection and not the more # generic ITransport. - transport = result.transport # type: Connection # type: ignore + transport: Connection = result.transport # type: ignore # We have a connection. If we already have a producer, and its # transport is the same, just trigger a resumeProducing. @@ -188,7 +188,7 @@ class RemoteHandler(logging.Handler): self._producer.resumeProducing() self._connection_waiter = None - deferred = self._service.whenConnected(failAfterFailures=1) # type: Deferred + deferred: Deferred = self._service.whenConnected(failAfterFailures=1) deferred.addCallbacks(writer, fail) self._connection_waiter = deferred diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
index c7a971a9d6..b9933a1528 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py
@@ -63,7 +63,7 @@ def parse_drain_configs( DrainType.CONSOLE_JSON, DrainType.FILE_JSON, ): - formatter = "json" # type: Optional[str] + formatter: Optional[str] = "json" elif logging_type in ( DrainType.CONSOLE_JSON_TERSE, DrainType.NETWORK_JSON_TERSE, diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 7fc11a9ac2..18ac507802 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py
@@ -113,13 +113,13 @@ class ContextResourceUsage: self.reset() else: # FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now - self.ru_utime = copy_from.ru_utime # type: float - self.ru_stime = copy_from.ru_stime # type: float - self.db_txn_count = copy_from.db_txn_count # type: int + self.ru_utime: float = copy_from.ru_utime + self.ru_stime: float = copy_from.ru_stime + self.db_txn_count: int = copy_from.db_txn_count - self.db_txn_duration_sec = copy_from.db_txn_duration_sec # type: float - self.db_sched_duration_sec = copy_from.db_sched_duration_sec # type: float - self.evt_db_fetch_count = copy_from.evt_db_fetch_count # type: int + self.db_txn_duration_sec: float = copy_from.db_txn_duration_sec + self.db_sched_duration_sec: float = copy_from.db_sched_duration_sec + self.evt_db_fetch_count: int = copy_from.evt_db_fetch_count def copy(self) -> "ContextResourceUsage": return ContextResourceUsage(copy_from=self) @@ -289,12 +289,12 @@ class LoggingContext: # The thread resource usage when the logcontext became active. None # if the context is not currently active. - self.usage_start = None # type: Optional[resource._RUsage] + self.usage_start: Optional[resource._RUsage] = None self.main_thread = get_thread_id() self.request = None self.tag = "" - self.scope = None # type: Optional[_LogContextScope] + self.scope: Optional["_LogContextScope"] = None # keep track of whether we have hit the __exit__ block for this context # (suggesting that the the thing that created the context thinks it should diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 140ed711e3..ecd51f1b4a 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py
@@ -251,7 +251,7 @@ try: except Exception: logger.exception("Failed to report span") - RustReporter = _WrappedRustReporter # type: Optional[Type[_WrappedRustReporter]] + RustReporter: Optional[Type[_WrappedRustReporter]] = _WrappedRustReporter except ImportError: RustReporter = None @@ -286,7 +286,7 @@ class SynapseBaggage: # Block everything by default # A regex which matches the server_names to expose traces for. # None means 'block everything'. -_homeserver_whitelist = None # type: Optional[Pattern[str]] +_homeserver_whitelist: Optional[Pattern[str]] = None # Util methods @@ -374,7 +374,7 @@ def init_tracer(hs: "HomeServer"): config = JaegerConfig( config=hs.config.jaeger_config, - service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()), + service_name=f"{hs.config.server_name} {hs.get_instance_name()}", scope_manager=LogContextScopeManager(hs.config), metrics_factory=PrometheusMetricsFactory(), ) @@ -662,7 +662,7 @@ def inject_header_dict( span = opentracing.tracer.active_span - carrier = {} # type: Dict[str, str] + carrier: Dict[str, str] = {} opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -704,7 +704,7 @@ def get_active_span_text_map(destination=None): if destination and not whitelisted_homeserver(destination): return {} - carrier = {} # type: Dict[str, str] + carrier: Dict[str, str] = {} opentracing.tracer.inject( opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) @@ -718,7 +718,7 @@ def active_span_context_as_string(): Returns: The active span context encoded as a string. """ - carrier = {} # type: Dict[str, str] + carrier: Dict[str, str] = {} if opentracing: opentracing.tracer.inject( opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier