diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6f87702cc8..4523c60645 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -59,7 +59,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
-COPY pyproject.toml poetry.lock README.rst /synapse/
+COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
@@ -98,9 +98,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
-# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
-# pyproject.toml here, ditching setup.py and MANIFEST.in.
-COPY setup.py MANIFEST.in README.rst /synapse/
+COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 6fb1cdbfb0..9ccb2b22a7 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -2,10 +2,19 @@
FROM matrixdotorg/synapse
# Install deps
-RUN apt-get update
-RUN apt-get install -y supervisor redis nginx
+RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ redis-server nginx-light
-# Remove the default nginx sites
+# Install supervisord with pip instead of apt, to avoid installing a second
+# copy of python.
+RUN --mount=type=cache,target=/root/.cache/pip \
+ pip install supervisor~=4.2
+
+# Disable the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
@@ -19,5 +28,7 @@ EXPOSE 8080/tcp
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
+# Replace the healthcheck with one which checks *all* the workers. The script
+# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile
index 982219a91e..65df2d114d 100644
--- a/docker/complement/SynapseWorkers.Dockerfile
+++ b/docker/complement/SynapseWorkers.Dockerfile
@@ -13,8 +13,8 @@ RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/cadd
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
-RUN apt-get update
-RUN apt-get install -y postgresql
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
@@ -34,40 +34,14 @@ WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
+# Copy the entrypoint
+COPY conf-workers/start-complement-synapse-workers.sh /
+
# Expose caddy's listener ports
EXPOSE 8008 8448
-ENTRYPOINT \
- # Replace the server name in the caddy config
- sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
- # Start postgres
- pg_ctlcluster 13 main start 2>&1 && \
- # Start caddy
- /root/caddy start --config /root/caddy.json 2>&1 && \
- # Set the server name of the homeserver
- SYNAPSE_SERVER_NAME=${SERVER_NAME} \
- # No need to report stats here
- SYNAPSE_REPORT_STATS=no \
- # Set postgres authentication details which will be placed in the homeserver config file
- POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
- # Specify the workers to test with
- SYNAPSE_WORKER_TYPES="\
- event_persister, \
- event_persister, \
- background_worker, \
- frontend_proxy, \
- event_creator, \
- user_dir, \
- media_repository, \
- federation_inbound, \
- federation_reader, \
- federation_sender, \
- synchrotron, \
- appservice, \
- pusher" \
- # Run the script that writes the necessary config files and starts supervisord, which in turn
- # starts everything else
- /configure_workers_and_start.py
+ENTRYPOINT /start-complement-synapse-workers.sh
+# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh
new file mode 100755
index 0000000000..2c1e05bd62
--- /dev/null
+++ b/docker/complement/conf-workers/start-complement-synapse-workers.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
+
+set -e
+
+function log {
+ d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
+ echo "$d $@"
+}
+
+# Replace the server name in the caddy config
+sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
+
+log "starting postgres"
+pg_ctlcluster 13 main start
+
+log "starting caddy"
+/root/caddy start --config /root/caddy.json
+
+# Set the server name of the homeserver
+export SYNAPSE_SERVER_NAME=${SERVER_NAME}
+
+# No need to report stats here
+export SYNAPSE_REPORT_STATS=no
+
+# Set postgres authentication details which will be placed in the homeserver config file
+export POSTGRES_PASSWORD=somesecret
+export POSTGRES_USER=postgres
+export POSTGRES_HOST=localhost
+
+# Specify the workers to test with
+export SYNAPSE_WORKER_TYPES="\
+ event_persister, \
+ event_persister, \
+ background_worker, \
+ frontend_proxy, \
+ event_creator, \
+ user_dir, \
+ media_repository, \
+ federation_inbound, \
+ federation_reader, \
+ federation_sender, \
+ synchrotron, \
+ appservice, \
+ pusher"
+
+# Run the script that writes the necessary config files and starts supervisord, which in turn
+# starts everything else
+exec /configure_workers_and_start.py
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index 0de2c6143b..408ef72787 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -5,6 +5,9 @@
nodaemon=true
user=root
+[include]
+files = /etc/supervisor/conf.d/*.conf
+
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
priority=500
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 3e91024e8c..23cac18e8d 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -29,7 +29,7 @@
import os
import subprocess
import sys
-from typing import Any, Dict, Set
+from typing import Any, Dict, Mapping, Set
import jinja2
import yaml
@@ -341,7 +341,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
- shared_config = {"listeners": listeners}
+ shared_config: Dict[str, Any] = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
@@ -446,21 +446,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Write out the worker's logging config file
- # Check whether we should write worker logs to disk, in addition to the console
- extra_log_template_args = {}
- if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
- extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
- dir=data_dir, name=worker_name
- )
-
- # Render and write the file
- log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
- convert(
- "/conf/log.config",
- log_config_filepath,
- worker_name=worker_name,
- **extra_log_template_args,
- )
+ log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Then a worker config file
convert(
@@ -496,6 +482,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Finally, we'll write out the config files.
+ # log config for the master process
+ master_log_config = generate_worker_log_config(environ, "master", data_dir)
+ shared_config["log_config"] = master_log_config
+
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
@@ -512,9 +502,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
)
# Supervisord config
+ os.makedirs("/etc/supervisor", exist_ok=True)
convert(
"/conf/supervisord.conf.j2",
- "/etc/supervisor/conf.d/supervisord.conf",
+ "/etc/supervisor/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
@@ -532,12 +523,28 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
os.mkdir(log_dir)
-def start_supervisord():
- """Starts up supervisord which then starts and monitors all other necessary processes
+def generate_worker_log_config(
+ environ: Mapping[str, str], worker_name: str, data_dir: str
+) -> str:
+ """Generate a log.config file for the given worker.
- Raises: CalledProcessError if calling start.py return a non-zero exit code.
+ Returns: the path to the generated file
"""
- subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
+ # Check whether we should write worker logs to disk, in addition to the console
+ extra_log_template_args = {}
+ if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
+ extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
+ dir=data_dir, name=worker_name
+ )
+ # Render and write the file
+ log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
+ convert(
+ "/conf/log.config",
+ log_config_filepath,
+ worker_name=worker_name,
+ **extra_log_template_args,
+ )
+ return log_config_filepath
def main(args, environ):
@@ -567,7 +574,13 @@ def main(args, environ):
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
- start_supervisord()
+ log("Starting supervisord")
+ os.execl(
+ "/usr/local/bin/supervisord",
+ "supervisord",
+ "-c",
+ "/etc/supervisor/supervisord.conf",
+ )
if __name__ == "__main__":
|