diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6107dced43..b58e518ec1 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -25,9 +25,9 @@ ARG PYTHON_VERSION=3.11
###
### Stage 0: generate requirements.txt
###
-# We hardcode the use of Debian bullseye here because this could change upstream
-# and other Dockerfiles used for testing are expecting bullseye.
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
+# We hardcode the use of Debian bookworm here because this could change upstream
+# and other Dockerfiles used for testing are expecting bookworm.
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
# install the OS build deps
RUN \
@@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \
### Stage 2: runtime
###
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
@@ -173,10 +173,10 @@ RUN \
gosu \
libjpeg62-turbo \
libpq5 \
- libwebp6 \
+ libwebp7 \
xmlsec1 \
libjemalloc2 \
- libicu67 \
+ libicu72 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index 2013732422..b7679924c2 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -24,16 +24,16 @@ ARG distro=""
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
# it's not obviously easier to use that than to build our own.)
-FROM ${distro} as builder
+FROM docker.io/library/${distro} as builder
RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
- -yqq --no-install-recommends \
- build-essential \
- ca-certificates \
- devscripts \
- equivs \
- wget
+ -yqq --no-install-recommends \
+ build-essential \
+ ca-certificates \
+ devscripts \
+ equivs \
+ wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
@@ -55,40 +55,36 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
###
### Stage 1
###
-FROM ${distro}
+FROM docker.io/library/${distro}
# Get the distro we want to pull from as a dynamic build variable
# (We need to define it in each build stage)
ARG distro=""
ENV distro ${distro}
-# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
-# http://bugs.python.org/issue19846
-ENV LANG C.UTF-8
-
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
- -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
- build-essential \
- curl \
- debhelper \
- devscripts \
- libsystemd-dev \
- lsb-release \
- pkg-config \
- python3-dev \
- python3-pip \
- python3-setuptools \
- python3-venv \
- sqlite3 \
- libpq-dev \
- libicu-dev \
- pkg-config \
- xmlsec1
+ -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
+ build-essential \
+ curl \
+ debhelper \
+ devscripts \
+ libsystemd-dev \
+ lsb-release \
+ pkg-config \
+ python3-dev \
+ python3-pip \
+ python3-setuptools \
+ python3-venv \
+ sqlite3 \
+ libpq-dev \
+ libicu-dev \
+ pkg-config \
+ xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index faf7f2cef8..2ceb6ab67c 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
-FROM debian:bullseye-slim AS deps_base
+FROM docker.io/library/debian:bookworm-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@@ -21,7 +21,7 @@ FROM debian:bullseye-slim AS deps_base
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
-FROM redis:6-bullseye AS redis_base
+FROM docker.io/library/redis:7-bookworm AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
diff --git a/docker/README.md b/docker/README.md
index eda3221c23..08372e95c6 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -73,7 +73,8 @@ The following environment variables are supported in `generate` mode:
will log sensitive information such as access tokens.
This should not be needed unless you are a developer attempting to debug something
particularly tricky.
-
+* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful
+ for testing.
## Postgres
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index be1aa1c55e..b511e2ab23 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -7,6 +7,7 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
+# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
FROM $FROM
@@ -19,8 +20,8 @@ FROM $FROM
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
- COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
- COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
+ COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
+ COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 63acf86a46..2b11b487f6 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable history backfilling support
- msc2716_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2
index 967fc65e79..d1e02af723 100644
--- a/docker/conf-workers/nginx.conf.j2
+++ b/docker/conf-workers/nginx.conf.j2
@@ -35,7 +35,11 @@ server {
# Send all other traffic to the main process
location ~* ^(\\/_matrix|\\/_synapse) {
+{% if using_unix_sockets %}
+ proxy_pass http://unix:/run/main_public.sock;
+{% else %}
proxy_pass http://localhost:8080;
+{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index 92d25386dc..1dfc60ad11 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -6,6 +6,9 @@
{% if enable_redis %}
redis:
enabled: true
+ {% if using_unix_sockets %}
+ path: /tmp/redis.sock
+ {% endif %}
{% endif %}
{% if appservice_registrations is not none %}
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index 9f1e03cfc0..da93358051 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -19,7 +19,11 @@ username=www-data
autorestart=true
[program:redis]
+{% if using_unix_sockets %}
+command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
+{% else %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
+{% endif %}
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2
index 44c6e413cf..29ec74b4ea 100644
--- a/docker/conf-workers/worker.yaml.j2
+++ b/docker/conf-workers/worker.yaml.j2
@@ -8,7 +8,11 @@ worker_name: "{{ name }}"
worker_listeners:
- type: http
+{% if using_unix_sockets %}
+ path: "/run/worker.{{ port }}"
+{% else %}
port: {{ port }}
+{% endif %}
{% if listener_resources %}
resources:
- names:
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index f10f78a48c..c46b955d63 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -36,12 +36,17 @@ listeners:
# Allow configuring in case we want to reverse proxy 8008
# using another process in the same container
+{% if SYNAPSE_USE_UNIX_SOCKET %}
+ # Unix sockets don't care about TLS or IP addresses or ports
+ - path: '/run/main_public.sock'
+ type: http
+{% else %}
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
tls: false
bind_addresses: ['::']
type: http
x_forwarded: false
-
+{% endif %}
resources:
- names: [client]
compress: true
@@ -57,8 +62,11 @@ database:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
+{% if not SYNAPSE_USE_UNIX_SOCKET %}
+{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
+{% endif %}
cp_min: 5
cp_max: 10
{% else %}
diff --git a/docker/conf/log.config b/docker/conf/log.config
index 90b5179838..5772321202 100644
--- a/docker/conf/log.config
+++ b/docker/conf/log.config
@@ -49,17 +49,35 @@ handlers:
class: logging.StreamHandler
formatter: precise
-{% if not SYNAPSE_LOG_SENSITIVE %}
-{#
- If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
- so that DEBUG entries (containing sensitive information) are not emitted.
-#}
loggers:
+ # This is just here so we can leave `loggers` in the config regardless of whether
+ # we configure other loggers below (avoid empty yaml dict error).
+ _placeholder:
+ level: "INFO"
+
+ {% if not SYNAPSE_LOG_SENSITIVE %}
+ {#
+ If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
+ so that DEBUG entries (containing sensitive information) are not emitted.
+ #}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
-{% endif %}
+ {% endif %}
+
+ {% if SYNAPSE_LOG_TESTING %}
+ {#
+ If Synapse is under test, log a few more useful things for a developer
+ attempting to debug something particularly tricky.
+
+ With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe
+ unexpectedly) filtered out of responses in tests. It's just nice to be able to
+ look at the CI log and figure out why an event isn't being returned.
+ #}
+ synapse.visibility.filtered_event_debug:
+ level: DEBUG
+ {% endif %}
root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 79b5b87397..62952e6b26 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -40,6 +40,8 @@
# log level. INFO is the default.
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
+# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
+# for testing.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -72,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
MAIN_PROCESS_INSTANCE_NAME = "main"
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
MAIN_PROCESS_REPLICATION_PORT = 9093
+# Obviously, these would only be used with the UNIX socket option
+MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
+MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
# during processing with the name of the worker.
@@ -178,6 +183,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
+ "^/_matrix/client/(r0|v3|unstable)/notifications$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -242,7 +248,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
- "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -406,11 +411,15 @@ def add_worker_roles_to_shared_config(
)
# Map of stream writer instance names to host/ports combos
- instance_map[worker_name] = {
- "host": "localhost",
- "port": worker_port,
- }
-
+ if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
+ instance_map[worker_name] = {
+ "path": f"/run/worker.{worker_port}",
+ }
+ else:
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
# Update the list of stream writers. It's convenient that the name of the worker
# type is the same as the stream to write. Iterate over the whole list in case there
# is more than one.
@@ -422,10 +431,15 @@ def add_worker_roles_to_shared_config(
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
- instance_map[worker_name] = {
- "host": "localhost",
- "port": worker_port,
- }
+ if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
+ instance_map[worker_name] = {
+ "path": f"/run/worker.{worker_port}",
+ }
+ else:
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
def merge_worker_template_configs(
@@ -717,17 +731,29 @@ def generate_worker_files(
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
+ # Convenience helper for if using unix sockets instead of host:port
+ using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
- listeners = [
- {
- "port": MAIN_PROCESS_REPLICATION_PORT,
- "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
- "type": "http",
- "resources": [{"names": ["replication"]}],
- }
- ]
+ listeners: List[Any]
+ if using_unix_sockets:
+ listeners = [
+ {
+ "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
+ "type": "http",
+ "resources": [{"names": ["replication"]}],
+ }
+ ]
+ else:
+ listeners = [
+ {
+ "port": MAIN_PROCESS_REPLICATION_PORT,
+ "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
+ "type": "http",
+ "resources": [{"names": ["replication"]}],
+ }
+ ]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
@@ -768,7 +794,17 @@ def generate_worker_files(
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
- healthcheck_urls = ["http://localhost:8080/health"]
+ # This list ends up being part of the command line to curl, (curl added support for
+ # Unix sockets in version 7.40).
+ if using_unix_sockets:
+ healthcheck_urls = [
+ f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
+ # The scheme and hostname from the following URL are ignored.
+ # The only thing that matters is the path `/health`
+ "http://localhost/health"
+ ]
+ else:
+ healthcheck_urls = ["http://localhost:8080/health"]
# Get the set of all worker types that we have configured
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
@@ -805,8 +841,12 @@ def generate_worker_files(
# given worker_type needs to stay assigned and not be replaced.
worker_config["shared_extra_conf"].update(shared_config)
shared_config = worker_config["shared_extra_conf"]
-
- healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
+ if using_unix_sockets:
+ healthcheck_urls.append(
+ f"--unix-socket /run/worker.{worker_port} http://localhost/health"
+ )
+ else:
+ healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
@@ -822,9 +862,10 @@ def generate_worker_files(
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
- "/conf/workers/{name}.yaml".format(name=worker_name),
+ f"/conf/workers/{worker_name}.yaml",
**worker_config,
worker_log_config_filepath=log_config_filepath,
+ using_unix_sockets=using_unix_sockets,
)
# Save this worker's port number to the correct nginx upstreams
@@ -845,8 +886,13 @@ def generate_worker_files(
nginx_upstream_config = ""
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
body = ""
- for port in upstream_worker_ports:
- body += f" server localhost:{port};\n"
+ if using_unix_sockets:
+ for port in upstream_worker_ports:
+ body += f" server unix:/run/worker.{port};\n"
+
+ else:
+ for port in upstream_worker_ports:
+ body += f" server localhost:{port};\n"
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
@@ -876,10 +922,15 @@ def generate_worker_files(
# If there are workers, add the main process to the instance_map too.
if workers_in_use:
instance_map = shared_config.setdefault("instance_map", {})
- instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
- "host": MAIN_PROCESS_LOCALHOST_ADDRESS,
- "port": MAIN_PROCESS_REPLICATION_PORT,
- }
+ if using_unix_sockets:
+ instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
+ "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
+ }
+ else:
+ instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
+ "host": MAIN_PROCESS_LOCALHOST_ADDRESS,
+ "port": MAIN_PROCESS_REPLICATION_PORT,
+ }
# Shared homeserver config
convert(
@@ -889,6 +940,7 @@ def generate_worker_files(
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
+ using_unix_sockets=using_unix_sockets,
)
# Nginx config
@@ -899,6 +951,7 @@ def generate_worker_files(
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
+ using_unix_sockets=using_unix_sockets,
)
# Supervisord config
@@ -908,6 +961,7 @@ def generate_worker_files(
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
+ using_unix_sockets=using_unix_sockets,
)
convert(
@@ -947,6 +1001,7 @@ def generate_worker_log_config(
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
"SYNAPSE_LOG_SENSITIVE"
)
+ extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
# Render and write the file
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile
index 0e8cf2e712..f18cf6a5d9 100644
--- a/docker/editable.Dockerfile
+++ b/docker/editable.Dockerfile
@@ -8,9 +8,9 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
-# We hardcode the use of Debian bullseye here because this could change upstream
-# and other Dockerfiles used for testing are expecting bullseye.
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+# We hardcode the use of Debian bookworm here because this could change upstream
+# and other Dockerfiles used for testing are expecting bookworm.
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
# Install Rust and other dependencies (stolen from normal Dockerfile)
# install the OS build deps
@@ -33,7 +33,7 @@ RUN \
gosu \
libjpeg62-turbo \
libpq5 \
- libwebp6 \
+ libwebp7 \
xmlsec1 \
libjemalloc2 \
&& rm -rf /var/lib/apt/lists/*
diff --git a/docker/start.py b/docker/start.py
index ebcc599f04..12c444da9a 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -82,7 +82,7 @@ def generate_config_from_template(
with open(filename) as handle:
value = handle.read()
else:
- log("Generating a random secret for {}".format(secret))
+ log(f"Generating a random secret for {secret}")
value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle:
handle.write(value)
@@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
log("Could not find %s, will not use" % (jemallocpath,))
# if there are no config files passed to synapse, try adding the default file
- if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
+ if not any(p.startswith(("--config-path", "-c")) for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
|