summary refs log tree commit diff
path: root/synapse/logging/_terse_json.py
diff options
context:
space:
mode:
authorPatrick Cloke <clokep@users.noreply.github.com>2020-10-29 07:27:37 -0400
committerGitHub <noreply@github.com>2020-10-29 07:27:37 -0400
commit00b24aa545091395f9a92d531836f6bf7b4460e0 (patch)
tree10d7333f2d1d9aaa0a6888c9ce3afb7d6feebf58 /synapse/logging/_terse_json.py
parentDon't require hiredis to run unit tests (#8680) (diff)
downloadsynapse-00b24aa545091395f9a92d531836f6bf7b4460e0.tar.xz
Support generating structured logs in addition to standard logs. (#8607)
This modifies the configuration of structured logging to be usable from
the standard Python logging configuration.

This also separates the formatting of logs from the transport allowing
JSON logs to files or standard logs to sockets.
Diffstat (limited to 'synapse/logging/_terse_json.py')
-rw-r--r--synapse/logging/_terse_json.py192
1 files changed, 58 insertions, 134 deletions
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 9b46956ca9..2fbf5549a1 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -16,141 +16,65 @@
 """
 Log formatters that output terse JSON.
 """
-
 import json
-from typing import IO
-
-from twisted.logger import FileLogObserver
-
-from synapse.logging._remote import TCPLogObserver
+import logging
 
 _encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":"))
 
-
-def flatten_event(event: dict, metadata: dict, include_time: bool = False):
-    """
-    Flatten a Twisted logging event to an dictionary capable of being sent
-    as a log event to a logging aggregation system.
-
-    The format is vastly simplified and is not designed to be a "human readable
-    string" in the sense that traditional logs are. Instead, the structure is
-    optimised for searchability and filtering, with human-understandable log
-    keys.
-
-    Args:
-        event (dict): The Twisted logging event we are flattening.
-        metadata (dict): Additional data to include with each log message. This
-            can be information like the server name. Since the target log
-            consumer does not know who we are other than by host IP, this
-            allows us to forward through static information.
-        include_time (bool): Should we include the `time` key? If False, the
-            event time is stripped from the event.
-    """
-    new_event = {}
-
-    # If it's a failure, make the new event's log_failure be the traceback text.
-    if "log_failure" in event:
-        new_event["log_failure"] = event["log_failure"].getTraceback()
-
-    # If it's a warning, copy over a string representation of the warning.
-    if "warning" in event:
-        new_event["warning"] = str(event["warning"])
-
-    # Stdlib logging events have "log_text" as their human-readable portion,
-    # Twisted ones have "log_format". For now, include the log_format, so that
-    # context only given in the log format (e.g. what is being logged) is
-    # available.
-    if "log_text" in event:
-        new_event["log"] = event["log_text"]
-    else:
-        new_event["log"] = event["log_format"]
-
-    # We want to include the timestamp when forwarding over the network, but
-    # exclude it when we are writing to stdout. This is because the log ingester
-    # (e.g. logstash, fluentd) can add its own timestamp.
-    if include_time:
-        new_event["time"] = round(event["log_time"], 2)
-
-    # Convert the log level to a textual representation.
-    new_event["level"] = event["log_level"].name.upper()
-
-    # Ignore these keys, and do not transfer them over to the new log object.
-    # They are either useless (isError), transferred manually above (log_time,
-    # log_level, etc), or contain Python objects which are not useful for output
-    # (log_logger, log_source).
-    keys_to_delete = [
-        "isError",
-        "log_failure",
-        "log_format",
-        "log_level",
-        "log_logger",
-        "log_source",
-        "log_system",
-        "log_time",
-        "log_text",
-        "observer",
-        "warning",
-    ]
-
-    # If it's from the Twisted legacy logger (twisted.python.log), it adds some
-    # more keys we want to purge.
-    if event.get("log_namespace") == "log_legacy":
-        keys_to_delete.extend(["message", "system", "time"])
-
-    # Rather than modify the dictionary in place, construct a new one with only
-    # the content we want. The original event should be considered 'frozen'.
-    for key in event.keys():
-
-        if key in keys_to_delete:
-            continue
-
-        if isinstance(event[key], (str, int, bool, float)) or event[key] is None:
-            # If it's a plain type, include it as is.
-            new_event[key] = event[key]
-        else:
-            # If it's not one of those basic types, write out a string
-            # representation. This should probably be a warning in development,
-            # so that we are sure we are only outputting useful data.
-            new_event[key] = str(event[key])
-
-    # Add the metadata information to the event (e.g. the server_name).
-    new_event.update(metadata)
-
-    return new_event
-
-
-def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver:
-    """
-    A log observer that formats events to a flattened JSON representation.
-
-    Args:
-        outFile: The file object to write to.
-        metadata: Metadata to be added to each log object.
-    """
-
-    def formatEvent(_event: dict) -> str:
-        flattened = flatten_event(_event, metadata)
-        return _encoder.encode(flattened) + "\n"
-
-    return FileLogObserver(outFile, formatEvent)
-
-
-def TerseJSONToTCPLogObserver(
-    hs, host: str, port: int, metadata: dict, maximum_buffer: int
-) -> FileLogObserver:
-    """
-    A log observer that formats events to a flattened JSON representation.
-
-    Args:
-        hs (HomeServer): The homeserver that is being logged for.
-        host: The host of the logging target.
-        port: The logging target's port.
-        metadata: Metadata to be added to each log object.
-        maximum_buffer: The maximum buffer size.
-    """
-
-    def formatEvent(_event: dict) -> str:
-        flattened = flatten_event(_event, metadata, include_time=True)
-        return _encoder.encode(flattened) + "\n"
-
-    return TCPLogObserver(hs, host, port, formatEvent, maximum_buffer)
+# The properties of a standard LogRecord.
+_LOG_RECORD_ATTRIBUTES = {
+    "args",
+    "asctime",
+    "created",
+    "exc_info",
+    # exc_text isn't a public attribute, but is used to cache the result of formatException.
+    "exc_text",
+    "filename",
+    "funcName",
+    "levelname",
+    "levelno",
+    "lineno",
+    "message",
+    "module",
+    "msecs",
+    "msg",
+    "name",
+    "pathname",
+    "process",
+    "processName",
+    "relativeCreated",
+    "stack_info",
+    "thread",
+    "threadName",
+}
+
+
+class JsonFormatter(logging.Formatter):
+    def format(self, record: logging.LogRecord) -> str:
+        event = {
+            "log": record.getMessage(),
+            "namespace": record.name,
+            "level": record.levelname,
+        }
+
+        return self._format(record, event)
+
+    def _format(self, record: logging.LogRecord, event: dict) -> str:
+        # Add any extra attributes to the event.
+        for key, value in record.__dict__.items():
+            if key not in _LOG_RECORD_ATTRIBUTES:
+                event[key] = value
+
+        return _encoder.encode(event)
+
+
+class TerseJsonFormatter(JsonFormatter):
+    def format(self, record: logging.LogRecord) -> str:
+        event = {
+            "log": record.getMessage(),
+            "namespace": record.name,
+            "level": record.levelname,
+            "time": round(record.created, 2),
+        }
+
+        return self._format(record, event)