diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1da196b12e..15c458fa28 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -20,45 +20,16 @@
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
-ARG PYTHON_VERSION=3.11
+ARG DEBIAN_VERSION=bookworm
+ARG PYTHON_VERSION=3.12
+ARG POETRY_VERSION=2.1.1
###
### Stage 0: generate requirements.txt
###
-# We hardcode the use of Debian bookworm here because this could change upstream
-# and other Dockerfiles used for testing are expecting bookworm.
-FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
-
-# RUN --mount is specific to buildkit and is documented at
-# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
-# Here we use it to set up a cache for apt (and below for pip), to improve
-# rebuild speeds on slow connections.
-RUN \
- --mount=type=cache,target=/var/cache/apt,sharing=locked \
- --mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update -qq && apt-get install -yqq \
- build-essential curl git libffi-dev libssl-dev pkg-config \
- && rm -rf /var/lib/apt/lists/*
-
-# Install rust and ensure its in the PATH.
-# (Rust may be needed to compile `cryptography`---which is one of poetry's
-# dependencies---on platforms that don't have a `cryptography` wheel.
-ENV RUSTUP_HOME=/rust
-ENV CARGO_HOME=/cargo
-ENV PATH=/cargo/bin:/rust/bin:$PATH
-RUN mkdir /rust /cargo
-
-RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
-
-# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
-# set to true, so we expose it as a build-arg.
-ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
-ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
-
-# We install poetry in its own build stage to avoid its dependencies conflicting with
-# synapse's dependencies.
-RUN --mount=type=cache,target=/root/.cache/pip \
- pip install --user "poetry==1.3.2"
+### This stage is platform-agnostic, so we can use the build platform in case of cross-compilation.
+###
+FROM --platform=$BUILDPLATFORM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS requirements
WORKDIR /synapse
@@ -75,41 +46,30 @@ ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
# Instead, we'll just install what a regular `pip install` would from PyPI.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
+# This silences a warning as uv isn't able to do hardlinks between its cache
+# (mounted as --mount=type=cache) and the target directory.
+ENV UV_LINK_MODE=copy
+
# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
-RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
- /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
+ARG POETRY_VERSION
+RUN --mount=type=cache,target=/root/.cache/uv \
+ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+ uvx --with poetry-plugin-export==1.9.0 \
+ poetry@${POETRY_VERSION} export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
- touch /synapse/requirements.txt; \
+ touch /synapse/requirements.txt; \
fi
###
### Stage 1: builder
###
-FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
-
-# install the OS build deps
-RUN \
- --mount=type=cache,target=/var/cache/apt,sharing=locked \
- --mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update -qq && apt-get install -yqq \
- build-essential \
- libffi-dev \
- libjpeg-dev \
- libpq-dev \
- libssl-dev \
- libwebp-dev \
- libxml++2.6-dev \
- libxslt1-dev \
- openssl \
- zlib1g-dev \
- git \
- curl \
- libicu-dev \
- pkg-config \
- && rm -rf /var/lib/apt/lists/*
+FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS builder
+# This silences a warning as uv isn't able to do hardlinks between its cache
+# (mounted as --mount=type=cache) and the target directory.
+ENV UV_LINK_MODE=copy
# Install rust and ensure its in the PATH
ENV RUSTUP_HOME=/rust
@@ -119,7 +79,6 @@ RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
-
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
@@ -131,8 +90,8 @@ ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
#
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
COPY --from=requirements /synapse/requirements.txt /synapse/
-RUN --mount=type=cache,target=/root/.cache/pip \
- pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
+RUN --mount=type=cache,target=/root/.cache/uv \
+ uv pip install --prefix="/install" --no-deps -r /synapse/requirements.txt
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
@@ -146,41 +105,85 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
-RUN --mount=type=cache,target=/synapse/target,sharing=locked \
+RUN \
+ --mount=type=cache,target=/root/.cache/uv \
+ --mount=type=cache,target=/synapse/target,sharing=locked \
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
- pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
+ uv pip install --prefix="/install" --no-deps /synapse[all]; \
else \
- pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
+ uv pip install --prefix="/install" /synapse[all]; \
fi
###
-### Stage 2: runtime
+### Stage 2: runtime dependencies download for ARM64 and AMD64
###
+FROM --platform=$BUILDPLATFORM docker.io/library/debian:${DEBIAN_VERSION} AS runtime-deps
-FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
+# Tell apt to keep downloaded package files, as we're using cache mounts.
+RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
-LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
-LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md'
-LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
-LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
+# Add both target architectures
+RUN dpkg --add-architecture arm64
+RUN dpkg --add-architecture amd64
+# Fetch the runtime dependencies debs for both architectures
+# We do that by building a recursive list of packages we need to download with `apt-cache depends`
+# and then downloading them with `apt-get download`.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update -qq && apt-get install -yqq \
- curl \
- gosu \
- libjpeg62-turbo \
- libpq5 \
- libwebp7 \
- xmlsec1 \
- libjemalloc2 \
- libicu72 \
- libssl-dev \
- openssl \
- && rm -rf /var/lib/apt/lists/*
+ apt-get update -qq && \
+ apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
+ curl \
+ gosu \
+ libjpeg62-turbo \
+ libpq5 \
+ libwebp7 \
+ xmlsec1 \
+ libjemalloc2 \
+ libicu \
+ | grep '^\w' > /tmp/pkg-list && \
+ for arch in arm64 amd64; do \
+ mkdir -p /tmp/debs-${arch} && \
+ cd /tmp/debs-${arch} && \
+ apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \
+ done
+
+# Extract the debs for each architecture
+RUN \
+ for arch in arm64 amd64; do \
+ mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \
+ for deb in /tmp/debs-${arch}/*.deb; do \
+ package_name=$(dpkg-deb -I ${deb} | awk '/^ Package: .*$/ {print $2}'); \
+ echo "Extracting: ${package_name}"; \
+ dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \
+ dpkg --extract $deb /install-${arch}; \
+ done; \
+ done
+
+
+###
+### Stage 3: runtime
+###
+
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION}
+
+ARG TARGETARCH
+
+LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
+LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md'
+LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
+LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
+# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
+# libraries to the right place, else the `COPY` won't work.
+# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
+# already present in the runtime image.
+COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib
+COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc
+COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr
+COPY --from=runtime-deps /install-${TARGETARCH}/var /var
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 2ceb6ab67c..6d0fc1440b 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -2,18 +2,38 @@
ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
+ARG DEBIAN_VERSION=bookworm
+ARG PYTHON_VERSION=3.12
-# first of all, we create a base image with an nginx which we can copy into the
+# first of all, we create a base image with dependencies which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
-FROM docker.io/library/debian:bookworm-slim AS deps_base
+FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
+
+ # Tell apt to keep downloaded package files, as we're using cache mounts.
+ RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
+
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
- redis-server nginx-light
+ nginx-light
+
+ RUN \
+ # remove default page
+ rm /etc/nginx/sites-enabled/default && \
+ # have nginx log to stderr/out
+ ln -sf /dev/stdout /var/log/nginx/access.log && \
+ ln -sf /dev/stderr /var/log/nginx/error.log
+
+ # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
+ # (mounted as --mount=type=cache) and the target directory.
+ RUN --mount=type=cache,target=/root/.cache/uv \
+ uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
+
+ RUN mkdir -p /uv/etc/supervisor/conf.d
# Similarly, a base to copy the redis server from.
#
@@ -21,31 +41,21 @@ FROM docker.io/library/debian:bookworm-slim AS deps_base
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
-FROM docker.io/library/redis:7-bookworm AS redis_base
+FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
- # Install supervisord with pip instead of apt, to avoid installing a second
- # copy of python.
- RUN --mount=type=cache,target=/root/.cache/pip \
- pip install supervisor~=4.2
- RUN mkdir -p /etc/supervisor/conf.d
-
- # Copy over redis and nginx
+ # Copy over dependencies
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
-
+ COPY --from=deps_base /uv /
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
COPY --from=deps_base /etc/nginx /etc/nginx
- RUN rm /etc/nginx/sites-enabled/default
- RUN mkdir /var/log/nginx /var/lib/nginx
- RUN chown www-data /var/lib/nginx
-
- # have nginx log to stderr/out
- RUN ln -sf /dev/stdout /var/log/nginx/access.log
- RUN ln -sf /dev/stderr /var/log/nginx/error.log
+ COPY --from=deps_base /var/log/nginx /var/log/nginx
+ # chown to allow non-root user to write to http-*-temp-path dirs
+ COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
@@ -64,4 +74,4 @@ FROM $FROM
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
- CMD /bin/sh /healthcheck.sh
+ CMD ["/healthcheck.sh"]
diff --git a/docker/README.md b/docker/README.md
index 8dba6fdb05..3438e9c441 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -114,6 +114,9 @@ The following environment variables are supported in `run` mode:
is set via `docker run --user`, defaults to `991`, `991`. Note that this user
must have permission to read the config files, and write to the data directories.
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
+* `SYNAPSE_HTTP_PROXY`: Passed through to the Synapse process as the `http_proxy` environment variable.
+* `SYNAPSE_HTTPS_PROXY`: Passed through to the Synapse process as the `https_proxy` environment variable.
+* `SYNAPSE_NO_PROXY`: Passed through to the Synapse process as `no_proxy` environment variable.
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index ce82c400eb..6ed084fe5d 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -9,6 +9,9 @@
ARG SYNAPSE_VERSION=latest
# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+ARG DEBIAN_VERSION=bookworm
+
+FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
@@ -20,9 +23,9 @@ FROM $FROM
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
-COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
-COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
-RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
+COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
+COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
+COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
@@ -55,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
- CMD /bin/sh /healthcheck.sh
+ CMD ["/healthcheck.sh"]
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index cc798a3210..a5e06396e2 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -5,12 +5,12 @@
set -e
echo "Complement Synapse launcher"
-echo " Args: $@"
+echo " Args: $*"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
function log {
- d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
- echo "$d $@"
+ d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
+ echo "$d $*"
}
# Set the server name of the homeserver
@@ -103,12 +103,11 @@ fi
# Note that both the key and certificate are in PEM format (not DER).
# First generate a configuration file to set up a Subject Alternative Name.
-cat > /conf/server.tls.conf <<EOF
+echo "\
.include /etc/ssl/openssl.cnf
[SAN]
-subjectAltName=DNS:${SERVER_NAME}
-EOF
+subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
# Generate an RSA key
openssl genrsa -out /conf/server.tls.key 2048
@@ -123,12 +122,12 @@ openssl x509 -req -in /conf/server.tls.csr \
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
# Assert that we have a Subject Alternative Name in the certificate.
-# (grep will exit with 1 here if there isn't a SAN in the certificate.)
-openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
+# (the test will exit with 1 here if there isn't a SAN in the certificate.)
+[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
-exec /configure_workers_and_start.py
+exec /configure_workers_and_start.py "$@"
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 6588b3ce14..48b44ddf90 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -7,6 +7,7 @@
#}
## Server ##
+public_baseurl: http://127.0.0.1:8008/
report_stats: False
trusted_key_servers: []
enable_registration: true
@@ -84,6 +85,18 @@ rc_invites:
per_user:
per_second: 1000
burst_count: 1000
+ per_issuer:
+ per_second: 1000
+ burst_count: 1000
+
+rc_presence:
+ per_user:
+ per_second: 9999
+ burst_count: 9999
+
+rc_delayed_event_mgmt:
+ per_second: 9999
+ burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
@@ -104,6 +117,18 @@ experimental_features:
msc3967_enabled: true
# Expose a room summary for public rooms
msc3266_enabled: true
+ # Send to-device messages to application services
+ msc2409_to_device_messages_enabled: true
+ # Allow application services to masquerade devices
+ msc3202_device_masquerading: true
+ # Sending device list changes, one-time key counts and fallback key usage to application services
+ msc3202_transaction_extensions: true
+ # Proxy OTK claim requests to exclusive ASes
+ msc3983_appservice_otk_claims: true
+ # Proxy key queries to exclusive ASes
+ msc3984_appservice_key_query: true
+ # Invite filtering
+ msc4155_enabled: true
server_notices:
system_mxid_localpart: _server
@@ -111,10 +136,18 @@ server_notices:
system_mxid_avatar_url: ""
room_name: "Server Alert"
+# Enable delayed events (msc4140)
+max_event_delay_duration: 24h
+
# Disable sync cache so that initial `/sync` requests are up-to-date.
caches:
sync_response_cache_duration: 0
+# Complement assumes that it can publish to the room list by default.
+room_list_publication_rules:
+ - action: allow
+
+
{% include "shared-orig.yaml.j2" %}
diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2
index d1e02af723..95d2f760d2 100644
--- a/docker/conf-workers/nginx.conf.j2
+++ b/docker/conf-workers/nginx.conf.j2
@@ -38,10 +38,13 @@ server {
{% if using_unix_sockets %}
proxy_pass http://unix:/run/main_public.sock;
{% else %}
+ # note: do not add a path (even a single /) after the port in `proxy_pass`,
+ # otherwise nginx will canonicalise the URI and cause signature verification
+ # errors.
proxy_pass http://localhost:8080;
{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header Host $host;
+ proxy_set_header Host $host:$server_port;
}
}
diff --git a/docker/conf-workers/synapse.supervisord.conf.j2 b/docker/conf-workers/synapse.supervisord.conf.j2
index 481eb4fc92..4fb11b259e 100644
--- a/docker/conf-workers/synapse.supervisord.conf.j2
+++ b/docker/conf-workers/synapse.supervisord.conf.j2
@@ -1,5 +1,6 @@
{% if use_forking_launcher %}
[program:synapse_fork]
+environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s"
command=/usr/local/bin/python -m synapse.app.complement_fork_starter
{{ main_config_path }}
synapse.app.homeserver
@@ -20,6 +21,7 @@ exitcodes=0
{% else %}
[program:synapse_main]
+environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s"
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
@@ -36,6 +38,7 @@ exitcodes=0
{% for worker in workers %}
[program:synapse_{{ worker.name }}]
+environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s"
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 15d8d7b558..102a88fad1 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/local/bin/python
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
@@ -202,6 +202,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
"endpoint_patterns": [
+ "^/_matrix/federation/v1/version$",
"^/_matrix/federation/(v1|v2)/event/",
"^/_matrix/federation/(v1|v2)/state/",
"^/_matrix/federation/(v1|v2)/state_ids/",
@@ -351,6 +352,11 @@ def error(txt: str) -> NoReturn:
def flush_buffers() -> None:
+ """
+ Python's `print()` buffers output by default, typically waiting until ~8KB
+ accumulates. This method can be used to flush the buffers so we can see the output
+ of any print statements so far.
+ """
sys.stdout.flush()
sys.stderr.flush()
@@ -376,9 +382,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
#
# We use append mode in case the files have already been written to by something else
# (for instance, as part of the instructions in a dockerfile).
+ exists = os.path.isfile(dst)
with open(dst, "a") as outfile:
# In case the existing file doesn't end with a newline
- outfile.write("\n")
+ if exists:
+ outfile.write("\n")
outfile.write(rendered)
@@ -604,7 +612,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
- subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
+ subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
def parse_worker_types(
@@ -998,6 +1006,7 @@ def generate_worker_files(
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
+ os.chmod("/healthcheck.sh", 0o755)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"
@@ -1099,6 +1108,13 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
else:
log("Could not find %s, will not use" % (jemallocpath,))
+ # Empty strings are falsy in Python so this default is fine. We just can't have these
+ # be undefined because supervisord will complain about our
+ # `%(ENV_SYNAPSE_HTTP_PROXY)s` usage.
+ environ.setdefault("SYNAPSE_HTTP_PROXY", "")
+ environ.setdefault("SYNAPSE_HTTPS_PROXY", "")
+ environ.setdefault("SYNAPSE_NO_PROXY", "")
+
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
diff --git a/docker/prefix-log b/docker/prefix-log
index 32dddbbfd4..2a38de5686 100755
--- a/docker/prefix-log
+++ b/docker/prefix-log
@@ -10,6 +10,9 @@
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
# confusion due to to interleaving of the different processes.
-exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
-exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
+prefixer() {
+ mawk -W interactive '{printf("%s | %s\n", ENVIRON["SUPERVISOR_PROCESS_NAME"], $0); fflush() }'
+}
+exec 1> >(prefixer)
+exec 2> >(prefixer >&2)
exec "$@"
diff --git a/docker/start.py b/docker/start.py
index 818a5355ca..0be9976a0c 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -22,6 +22,11 @@ def error(txt: str) -> NoReturn:
def flush_buffers() -> None:
+ """
+ Python's `print()` buffers output by default, typically waiting until ~8KB
+ accumulates. This method can be used to flush the buffers so we can see the output
+ of any print statements so far.
+ """
sys.stdout.flush()
sys.stderr.flush()
|