summary refs log tree commit diff
path: root/docker
diff options
context:
space:
mode:
Diffstat (limited to 'docker')
-rw-r--r--docker/Dockerfile49
-rw-r--r--docker/Dockerfile-workers99
-rw-r--r--docker/README-testing.md99
-rw-r--r--docker/README.md9
-rw-r--r--docker/complement/Dockerfile78
-rw-r--r--docker/complement/README.md33
-rw-r--r--docker/complement/SynapseWorkers.Dockerfile40
-rwxr-xr-xdocker/complement/conf-workers/start-complement-synapse-workers.sh61
-rw-r--r--docker/complement/conf/homeserver.yaml129
-rw-r--r--docker/complement/conf/log_config.yaml24
-rw-r--r--docker/complement/conf/postgres.supervisord.conf (renamed from docker/complement/conf-workers/postgres.supervisord.conf)5
-rwxr-xr-xdocker/complement/conf/start.sh30
-rwxr-xr-xdocker/complement/conf/start_for_complement.sh109
-rw-r--r--docker/complement/conf/workers-shared-extra.yaml.j2 (renamed from docker/complement/conf-workers/workers-shared.yaml)32
-rw-r--r--docker/conf-workers/shared.yaml.j22
-rw-r--r--docker/conf-workers/supervisord.conf.j217
-rw-r--r--docker/conf-workers/synapse.supervisord.conf.j252
-rw-r--r--docker/conf/log.config10
-rwxr-xr-xdocker/configure_workers_and_start.py91
-rwxr-xr-xdocker/start.py6
20 files changed, 509 insertions, 466 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 7af0e51f97..b87d263cff 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -40,29 +40,38 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
 RUN \
    --mount=type=cache,target=/var/cache/apt,sharing=locked \
    --mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update && apt-get install -y git \
+    apt-get update -qq && apt-get install -yqq \
+      build-essential cargo git libffi-dev libssl-dev \
     && rm -rf /var/lib/apt/lists/*
 
 # We install poetry in its own build stage to avoid its dependencies conflicting with
 # synapse's dependencies.
-# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
-# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
-#    https://github.com/python-poetry/poetry/pull/5156 and
-#    https://github.com/python-poetry/poetry/issues/5141 ;
-# without it, we generate a requirements.txt with incorrect environment markers,
-# which causes necessary packages to be omitted when we `pip install`.
-#
-# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
-# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
 RUN --mount=type=cache,target=/root/.cache/pip \
-  pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
+  pip install --user "poetry==1.2.0"
 
 WORKDIR /synapse
 
 # Copy just what we need to run `poetry export`...
 COPY pyproject.toml poetry.lock /synapse/
 
-RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
+
+# If specified, we won't verify the hashes of dependencies.
+# This is only needed if the hashes of dependencies cannot be checked for some
+# reason, such as when a git repository is used directly as a dependency.
+ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
+
+# If specified, we won't use the Poetry lockfile.
+# Instead, we'll just install what a regular `pip install` would from PyPI.
+ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
+
+# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
+# Otherwise, just create an empty requirements file so that the Dockerfile can
+# proceed.
+RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+    /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
+  else \
+    touch /synapse/requirements.txt; \
+  fi
 
 ###
 ### Stage 1: builder
@@ -73,7 +82,7 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as builder
 RUN \
    --mount=type=cache,target=/var/cache/apt,sharing=locked \
    --mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update && apt-get install -y \
+ apt-get update -qq && apt-get install -yqq \
     build-essential \
     libffi-dev \
     libjpeg-dev \
@@ -85,6 +94,7 @@ RUN \
     openssl \
     rustc \
     zlib1g-dev \
+    git \
     && rm -rf /var/lib/apt/lists/*
 
 # To speed up rebuilds, install all of the dependencies before we copy over
@@ -101,8 +111,17 @@ COPY synapse /synapse/synapse/
 # ... and what we need to `pip install`.
 COPY pyproject.toml README.rst /synapse/
 
+# Repeat of earlier build argument declaration, as this is a new build stage.
+ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
+
 # Install the synapse package itself.
-RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
+# If we have populated requirements.txt, we don't install any dependencies
+# as we should already have those from the previous `pip install` step.
+RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+    pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
+  else \
+    pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
+  fi
 
 ###
 ### Stage 2: runtime
@@ -118,7 +137,7 @@ LABEL org.opencontainers.image.licenses='Apache-2.0'
 RUN \
    --mount=type=cache,target=/var/cache/apt,sharing=locked \
    --mount=type=cache,target=/var/lib/apt,sharing=locked \
-  apt-get update && apt-get install -y \
+  apt-get update -qq && apt-get install -yqq \
     curl \
     gosu \
     libjpeg62-turbo \
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 24b03585f9..003a1cc3bf 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -1,37 +1,62 @@
-# Inherit from the official Synapse docker image
-FROM matrixdotorg/synapse
-
-# Install deps
-RUN \
-   --mount=type=cache,target=/var/cache/apt,sharing=locked \
-   --mount=type=cache,target=/var/lib/apt,sharing=locked \
-  apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
-     redis-server nginx-light
-
-# Install supervisord with pip instead of apt, to avoid installing a second
-# copy of python.
-RUN --mount=type=cache,target=/root/.cache/pip \
-    pip install supervisor~=4.2
-
-# Disable the default nginx sites
-RUN rm /etc/nginx/sites-enabled/default
-
-# Copy Synapse worker, nginx and supervisord configuration template files
-COPY ./docker/conf-workers/* /conf/
-
-# Copy a script to prefix log lines with the supervisor program name
-COPY ./docker/prefix-log /usr/local/bin/
-
-# Expose nginx listener port
-EXPOSE 8080/tcp
-
-# A script to read environment variables and create the necessary
-# files to run the desired worker configuration. Will start supervisord.
-COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
-ENTRYPOINT ["/configure_workers_and_start.py"]
-
-# Replace the healthcheck with one which checks *all* the workers. The script
-# is generated by configure_workers_and_start.py.
-HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
-    CMD /bin/sh /healthcheck.sh
+# syntax=docker/dockerfile:1
+
+ARG SYNAPSE_VERSION=latest
+
+# first of all, we create a base image with an nginx which we can copy into the
+# target image. For repeated rebuilds, this is much faster than apt installing
+# each time.
+
+FROM debian:bullseye-slim AS deps_base
+    RUN \
+       --mount=type=cache,target=/var/cache/apt,sharing=locked \
+       --mount=type=cache,target=/var/lib/apt,sharing=locked \
+      apt-get update -qq && \
+      DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
+          redis-server nginx-light
+
+# Similarly, a base to copy the redis server from.
+#
+# The redis docker image has fewer dynamic libraries than the debian package,
+# which makes it much easier to copy (but we need to make sure we use an image
+# based on the same debian version as the synapse image, to make sure we get
+# the expected version of libc.
+FROM redis:6-bullseye AS redis_base
+
+# now build the final image, based on the the regular Synapse docker image
+FROM matrixdotorg/synapse:$SYNAPSE_VERSION
+
+    # Install supervisord with pip instead of apt, to avoid installing a second
+    # copy of python.
+    RUN --mount=type=cache,target=/root/.cache/pip \
+        pip install supervisor~=4.2
+    RUN mkdir -p /etc/supervisor/conf.d
+
+    # Copy over redis and nginx
+    COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
+
+    COPY --from=deps_base /usr/sbin/nginx /usr/sbin
+    COPY --from=deps_base /usr/share/nginx /usr/share/nginx
+    COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
+    COPY --from=deps_base /etc/nginx /etc/nginx
+    RUN rm /etc/nginx/sites-enabled/default
+    RUN mkdir /var/log/nginx /var/lib/nginx
+    RUN chown www-data /var/log/nginx /var/lib/nginx
+
+    # Copy Synapse worker, nginx and supervisord configuration template files
+    COPY ./docker/conf-workers/* /conf/
+
+    # Copy a script to prefix log lines with the supervisor program name
+    COPY ./docker/prefix-log /usr/local/bin/
+
+    # Expose nginx listener port
+    EXPOSE 8080/tcp
+
+    # A script to read environment variables and create the necessary
+    # files to run the desired worker configuration. Will start supervisord.
+    COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
+    ENTRYPOINT ["/configure_workers_and_start.py"]
+
+    # Replace the healthcheck with one which checks *all* the workers. The script
+    # is generated by configure_workers_and_start.py.
+    HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
+        CMD /bin/sh /healthcheck.sh
diff --git a/docker/README-testing.md b/docker/README-testing.md
index c38cae7530..21b99963d8 100644
--- a/docker/README-testing.md
+++ b/docker/README-testing.md
@@ -8,79 +8,54 @@ docker images that can be run inside Complement for testing purposes.
 
 Note that running Synapse's unit tests from within the docker image is not supported.
 
-## Testing with SQLite and single-process Synapse
+## Using the Complement launch script
 
-> Note that `scripts-dev/complement.sh` is a script that will automatically build
-> and run an SQLite-based, single-process of Synapse against Complement.
+`scripts-dev/complement.sh` is a script that will automatically build
+and run Synapse against Complement.
+Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
 
-The instructions below will set up Complement testing for a single-process,
-SQLite-based Synapse deployment.
 
-Start by building the base Synapse docker image. If you wish to run tests with the latest
-release of Synapse, instead of your current checkout, you can skip this step. From the
-root of the repository:
-
-```sh
-docker build -t matrixdotorg/synapse -f docker/Dockerfile .
-```
-
-This will build an image with the tag `matrixdotorg/synapse`.
-
-Next, build the Synapse image for Complement.
+[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
 
-```sh
-docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
-```
+## Building and running the images manually
 
-This will build an image with the tag `complement-synapse`, which can be handed to
-Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
-[Complement's documentation](https://github.com/matrix-org/complement/#running) for
-how to run the tests, as well as the various available command line flags.
-
-## Testing with PostgreSQL and single or multi-process Synapse
+Under some circumstances, you may wish to build the images manually.
+The instructions below will lead you to doing that.
 
-The above docker image only supports running Synapse with SQLite and in a
-single-process topology. The following instructions are used to build a Synapse image for
-Complement that supports either single or multi-process topology with a PostgreSQL
-database backend.
+Note that these images can only be built using [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/),
+therefore BuildKit needs to be enabled when calling `docker build`. This can be done by
+setting `DOCKER_BUILDKIT=1` in your environment.
 
-As with the single-process image, build the base Synapse docker image. If you wish to run
-tests with the latest release of Synapse, instead of your current checkout, you can skip
-this step. From the root of the repository:
+Start by building the base Synapse docker image. If you wish to run tests with the latest
+release of Synapse, instead of your current checkout, you can skip this step. From the
+root of the repository:
 
 ```sh
 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
 ```
 
-This will build an image with the tag `matrixdotorg/synapse`.
-
-Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
-Again, from the root of the repository:
+Next, build the workerised Synapse docker image, which is a layer over the base
+image.
 
 ```sh
 docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
 ```
 
-This will build an image with the tag` matrixdotorg/synapse-workers`.
-
-It's worth noting at this point that this image is fully functional, and
-can be used for testing against locally. See instructions for using the container
-under
-[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
-below.
-
-Finally, build the Synapse image for Complement, which is based on
-`matrixdotorg/synapse-workers`.
+Finally, build the multi-purpose image for Complement, which is a layer over the workers image.
 
 ```sh
-docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
+docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement
 ```
 
-This will build an image with the tag `complement-synapse-workers`, which can be handed to
+This will build an image with the tag `complement-synapse`, which can be handed to
 Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
 [Complement's documentation](https://github.com/matrix-org/complement/#running) for
 how to run the tests, as well as the various available command line flags.
 
+See [the Complement image README](./complement/README.md) for information about the
+expected environment variables.
+
+
 ## Running the Dockerfile-worker image standalone
 
 For manual testing of a multi-process Synapse instance in Docker,
@@ -113,6 +88,9 @@ docker run -d --name synapse \
 ...substituting `POSTGRES*` variables for those that match a postgres host you have
 available (usually a running postgres docker container).
 
+
+### Workers
+
 The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
 use when running the container. All possible worker names are defined by the keys of the
 `WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
@@ -125,8 +103,11 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
 (e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
 
 Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
-(leaving only the main process). The container is configured to use redis-based worker
-mode.
+(leaving only the main process).
+The container will only be configured to use Redis-based worker mode if there are
+workers enabled.
+
+### Logging
 
 Logs for workers and the main process are logged to stdout and can be viewed with
 standard `docker logs` tooling. Worker logs contain their worker name
@@ -136,3 +117,21 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ
 `<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
 00, according to the container's clock. Logging for the main process must still be
 configured by modifying the homeserver's log config in your Synapse data volume.
+
+
+### Application Services
+
+Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of
+a directory (within the container) will cause the configuration script to scan
+that directory for `.yaml`/`.yml` registration files.
+Synapse will be configured to load these configuration files.
+
+
+### TLS Termination
+
+Nginx is present in the image to route requests to the appropriate workers,
+but it does not serve TLS by default.
+
+You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
+TLS certificate and key (respectively), both in PEM (textual) format.
+In this case, Nginx will additionally serve using HTTPS on port 8448.
diff --git a/docker/README.md b/docker/README.md
index 67c3bc65f0..017f046c58 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -67,6 +67,13 @@ The following environment variables are supported in `generate` mode:
 * `UID`, `GID`: the user id and group id to use for creating the data
   directories. If unset, and no user is set via `docker run --user`, defaults
   to `991`, `991`.
+* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
+  Defaults to `INFO`.
+* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
+  will log sensitive information such as access tokens.
+  This should not be needed unless you are a developer attempting to debug something
+  particularly tricky.
+
 
 ## Postgres
 
@@ -184,7 +191,7 @@ If you need to build the image from a Synapse checkout, use the following `docke
  build` command from the repo's root:
 
 ```
-docker build -t matrixdotorg/synapse -f docker/Dockerfile .
+DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
 ```
 
 You can choose to build a different docker image by changing the value of the `-f` flag to
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index 4823ce7364..3cfff19f9a 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -1,22 +1,62 @@
-# A dockerfile which builds an image suitable for testing Synapse under
-# complement.
+# syntax=docker/dockerfile:1
+# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse
+# by including a built-in postgres instance, as well as setting up the homeserver so
+# that it is ready for testing via Complement.
+#
+# Instructions for building this image from those it depends on is detailed in this guide:
+# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
 
 ARG SYNAPSE_VERSION=latest
 
-FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
-
-ENV SERVER_NAME=localhost
-
-COPY conf/* /conf/
-
-# generate a signing key
-RUN generate_signing_key -o /conf/server.signing.key
-
-WORKDIR /data
-
-EXPOSE 8008 8448
-
-ENTRYPOINT ["/conf/start.sh"]
-
-HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
-    CMD curl -fSs http://localhost:8008/health || exit 1
+# first of all, we create a base image with a postgres server and database,
+# which we can copy into the target image. For repeated rebuilds, this is
+# much faster than apt installing postgres each time.
+#
+# This trick only works because (a) the Synapse image happens to have all the
+# shared libraries that postgres wants, (b) we use a postgres image based on
+# the same debian version as Synapse's docker image (so the versions of the
+# shared libraries match).
+
+FROM postgres:13-bullseye AS postgres_base
+    # initialise the database cluster in /var/lib/postgresql
+    RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
+
+    # Configure a password and create a database for Synapse
+    RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
+    RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
+
+# now build the final image, based on the Synapse image.
+
+FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+    # copy the postgres installation over from the image we built above
+    RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
+    COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
+    COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
+    COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
+    RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
+    ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
+    ENV PGDATA=/var/lib/postgresql/data
+
+    # Extend the shared homeserver config to disable rate-limiting,
+    # set Complement's static shared secret, enable registration, amongst other
+    # tweaks to get Synapse ready for testing.
+    # To do this, we copy the old template out of the way and then include it
+    # with Jinja2.
+    RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
+    COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
+
+    WORKDIR /data
+
+    COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
+
+    # Copy the entrypoint
+    COPY conf/start_for_complement.sh /
+
+    # Expose nginx's listener ports
+    EXPOSE 8008 8448
+
+    ENTRYPOINT ["/start_for_complement.sh"]
+
+    # Update the healthcheck to have a shorter check interval
+    HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
+        CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/README.md b/docker/complement/README.md
index e075418e4a..62682219e8 100644
--- a/docker/complement/README.md
+++ b/docker/complement/README.md
@@ -1 +1,32 @@
-Stuff for building the docker image used for testing under complement.
+# Unified Complement image for Synapse
+
+This is an image for testing Synapse with [the *Complement* integration test suite][complement].
+It contains some insecure defaults that are only suitable for testing purposes,
+so **please don't use this image for a production server**.
+
+This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory
+and can be switched using environment variables between the following configurations:
+
+- Monolithic Synapse with SQLite (default, or `SYNAPSE_COMPLEMENT_DATABASE=sqlite`)
+- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`)
+- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`)
+
+The image is self-contained; it contains an integrated Postgres, Redis and Nginx.
+
+
+## How to get Complement to pass the environment variables through
+
+To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv]
+variable to configure an environment prefix to pass through, then prefix the above options
+with that prefix.
+
+Example:
+```
+COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
+```
+
+Consult `scripts-dev/complement.sh` in the repository root for a real example.
+
+
+[complement]: https://github.com/matrix-org/complement
+[complementEnv]: https://github.com/matrix-org/complement/pull/382
diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile
deleted file mode 100644
index 99a09cbc2b..0000000000
--- a/docker/complement/SynapseWorkers.Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
-# by including a built-in postgres instance, as well as setting up the homeserver so
-# that it is ready for testing via Complement.
-#
-# Instructions for building this image from those it depends on is detailed in this guide:
-# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
-FROM matrixdotorg/synapse-workers
-
-# Install postgresql
-RUN apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
-
-# Configure a user and create a database for Synapse
-RUN pg_ctlcluster 13 main start &&  su postgres -c "echo \
- \"ALTER USER postgres PASSWORD 'somesecret'; \
- CREATE DATABASE synapse \
-  ENCODING 'UTF8' \
-  LC_COLLATE='C' \
-  LC_CTYPE='C' \
-  template=template0;\" | psql" && pg_ctlcluster 13 main stop
-
-# Modify the shared homeserver config with postgres support, certificate setup
-# and the disabling of rate-limiting
-COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
-
-WORKDIR /data
-
-COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
-
-# Copy the entrypoint
-COPY conf-workers/start-complement-synapse-workers.sh /
-
-# Expose nginx's listener ports
-EXPOSE 8008 8448
-
-ENTRYPOINT ["/start-complement-synapse-workers.sh"]
-
-# Update the healthcheck to have a shorter check interval
-HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
-    CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh
deleted file mode 100755
index b7e2444000..0000000000
--- a/docker/complement/conf-workers/start-complement-synapse-workers.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-#
-# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
-
-set -e
-
-function log {
-    d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
-    echo "$d $@"
-}
-
-# Set the server name of the homeserver
-export SYNAPSE_SERVER_NAME=${SERVER_NAME}
-
-# No need to report stats here
-export SYNAPSE_REPORT_STATS=no
-
-# Set postgres authentication details which will be placed in the homeserver config file
-export POSTGRES_PASSWORD=somesecret
-export POSTGRES_USER=postgres
-export POSTGRES_HOST=localhost
-
-# Specify the workers to test with
-export SYNAPSE_WORKER_TYPES="\
-    event_persister, \
-    event_persister, \
-    background_worker, \
-    frontend_proxy, \
-    event_creator, \
-    user_dir, \
-    media_repository, \
-    federation_inbound, \
-    federation_reader, \
-    federation_sender, \
-    synchrotron, \
-    appservice, \
-    pusher"
-
-# Add Complement's appservice registration directory, if there is one
-# (It can be absent when there are no application services in this test!)
-if [ -d /complement/appservice ]; then
-    export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
-fi
-
-# Generate a TLS key, then generate a certificate by having Complement's CA sign it
-# Note that both the key and certificate are in PEM format (not DER).
-openssl genrsa -out /conf/server.tls.key 2048
-
-openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-  -subj "/CN=${SERVER_NAME}"
-
-openssl x509 -req -in /conf/server.tls.csr \
-  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-  -out /conf/server.tls.crt
-
-export SYNAPSE_TLS_CERT=/conf/server.tls.crt
-export SYNAPSE_TLS_KEY=/conf/server.tls.key
-
-# Run the script that writes the necessary config files and starts supervisord, which in turn
-# starts everything else
-exec /configure_workers_and_start.py
diff --git a/docker/complement/conf/homeserver.yaml b/docker/complement/conf/homeserver.yaml
deleted file mode 100644
index e2be540bbb..0000000000
--- a/docker/complement/conf/homeserver.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-## Server ##
-
-server_name: SERVER_NAME
-log_config: /conf/log_config.yaml
-report_stats: False
-signing_key_path: /conf/server.signing.key
-trusted_key_servers: []
-enable_registration: true
-enable_registration_without_verification: true
-
-## Listeners ##
-
-tls_certificate_path: /conf/server.tls.crt
-tls_private_key_path: /conf/server.tls.key
-bcrypt_rounds: 4
-registration_shared_secret: complement
-
-listeners:
-  - port: 8448
-    bind_addresses: ['::']
-    type: http
-    tls: true
-    resources:
-      - names: [federation]
-
-  - port: 8008
-    bind_addresses: ['::']
-    type: http
-
-    resources:
-      - names: [client]
-
-## Database ##
-
-database:
-  name: "sqlite3"
-  args:
-    # We avoid /data, as it is a volume and is not transferred when the container is committed,
-    # which is a fundamental necessity in complement.
-    database: "/conf/homeserver.db"
-
-## Federation ##
-
-# trust certs signed by the complement CA
-federation_custom_ca_list:
-- /complement/ca/ca.crt
-
-# unblacklist RFC1918 addresses
-ip_range_blacklist: []
-
-# Disable server rate-limiting
-rc_federation:
-  window_size: 1000
-  sleep_limit: 10
-  sleep_delay: 500
-  reject_limit: 99999
-  concurrent: 3
-
-rc_message:
-  per_second: 9999
-  burst_count: 9999
-
-rc_registration:
-  per_second: 9999
-  burst_count: 9999
-
-rc_login:
-  address:
-    per_second: 9999
-    burst_count: 9999
-  account:
-    per_second: 9999
-    burst_count: 9999
-  failed_attempts:
-    per_second: 9999
-    burst_count: 9999
-
-rc_admin_redaction:
-  per_second: 9999
-  burst_count: 9999
-
-rc_joins:
-  local:
-    per_second: 9999
-    burst_count: 9999
-  remote:
-    per_second: 9999
-    burst_count: 9999
-
-rc_3pid_validation:
-  per_second: 1000
-  burst_count: 1000
-
-rc_invites:
-  per_room:
-    per_second: 1000
-    burst_count: 1000
-  per_user:
-    per_second: 1000
-    burst_count: 1000
-
-federation_rr_transactions_per_room_per_second: 9999
-
-## API Configuration ##
-
-# A list of application service config files to use
-#
-app_service_config_files:
-AS_REGISTRATION_FILES  
-
-## Experimental Features ##
-
-experimental_features:
-  # Enable spaces support
-  spaces_enabled: true
-  # Enable history backfilling support
-  msc2716_enabled: true
-  # server-side support for partial state in /send_join responses
-  msc3706_enabled: true
-  # client-side support for partial state in /send_join responses
-  faster_joins: true
-  # Enable jump to date endpoint
-  msc3030_enabled: true
-
-server_notices:
-  system_mxid_localpart: _server
-  system_mxid_display_name: "Server Alert"
-  system_mxid_avatar_url: ""
-  room_name: "Server Alert"
diff --git a/docker/complement/conf/log_config.yaml b/docker/complement/conf/log_config.yaml
deleted file mode 100644
index c33fd6cd00..0000000000
--- a/docker/complement/conf/log_config.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: 1
-
-formatters:
-  precise:
-   format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
-
-filters:
-  context:
-    (): synapse.logging.context.LoggingContextFilter
-    request: ""
-
-handlers:
-  console:
-    class: logging.StreamHandler
-    formatter: precise
-    filters: [context]
-    # log to stdout, for easier use with 'docker logs'
-    stream: 'ext://sys.stdout'
-
-root:
-    level: INFO
-    handlers: [console]
-
-disable_existing_loggers: false
diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf
index 5608342d1a..b88bfc772e 100644
--- a/docker/complement/conf-workers/postgres.supervisord.conf
+++ b/docker/complement/conf/postgres.supervisord.conf
@@ -1,5 +1,8 @@
 [program:postgres]
-command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
+command=/usr/local/bin/prefix-log gosu postgres postgres
+
+# Only start if START_POSTGRES=1
+autostart=%(ENV_START_POSTGRES)s
 
 # Lower priority number = starts first
 priority=1
diff --git a/docker/complement/conf/start.sh b/docker/complement/conf/start.sh
deleted file mode 100755
index 5d8d0fe016..0000000000
--- a/docker/complement/conf/start.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-
-set -e
-
-sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
-
-# Add the application service registration files to the homeserver.yaml config
-for filename in /complement/appservice/*.yaml; do
-  [ -f "$filename" ] || break
-
-  as_id=$(basename "$filename" .yaml)
-
-  # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after 
-  # so we can add the next application service in the next iteration of this for loop
-  sed -i "s/AS_REGISTRATION_FILES/  - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
-done
-# Remove the AS_REGISTRATION_FILES entry
-sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
-
-# generate an ssl key and cert for the server, signed by the complement CA
-openssl genrsa -out /conf/server.tls.key 2048
-
-openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-  -subj "/CN=${SERVER_NAME}"
-openssl x509 -req -in /conf/server.tls.csr \
-  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-  -out /conf/server.tls.crt
-
-exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"
-
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
new file mode 100755
index 0000000000..cc6482f763
--- /dev/null
+++ b/docker/complement/conf/start_for_complement.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+#
+# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
+
+set -e
+
+echo "Complement Synapse launcher"
+echo "  Args: $@"
+echo "  Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
+
+function log {
+    d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
+    echo "$d $@"
+}
+
+# Set the server name of the homeserver
+export SYNAPSE_SERVER_NAME=${SERVER_NAME}
+
+# No need to report stats here
+export SYNAPSE_REPORT_STATS=no
+
+
+case "$SYNAPSE_COMPLEMENT_DATABASE" in
+  postgres)
+    # Set postgres authentication details which will be placed in the homeserver config file
+    export POSTGRES_PASSWORD=somesecret
+    export POSTGRES_USER=postgres
+    export POSTGRES_HOST=localhost
+
+    # configure supervisord to start postgres
+    export START_POSTGRES=true
+    ;;
+
+  sqlite|"")
+    # Configure supervisord not to start Postgres, as we don't need it
+    export START_POSTGRES=false
+    ;;
+
+  *)
+    echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2
+    exit 1
+    ;;
+esac
+
+
+if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
+  # Specify the workers to test with
+  export SYNAPSE_WORKER_TYPES="\
+      event_persister, \
+      event_persister, \
+      background_worker, \
+      frontend_proxy, \
+      event_creator, \
+      user_dir, \
+      media_repository, \
+      federation_inbound, \
+      federation_reader, \
+      federation_sender, \
+      synchrotron, \
+      appservice, \
+      pusher"
+
+  # Improve startup times by using a launcher based on fork()
+  export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
+else
+  # Empty string here means 'main process only'
+  export SYNAPSE_WORKER_TYPES=""
+fi
+
+
+# Add Complement's appservice registration directory, if there is one
+# (It can be absent when there are no application services in this test!)
+if [ -d /complement/appservice ]; then
+    export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
+fi
+
+# Generate a TLS key, then generate a certificate by having Complement's CA sign it
+# Note that both the key and certificate are in PEM format (not DER).
+
+# First generate a configuration file to set up a Subject Alternative Name.
+cat > /conf/server.tls.conf <<EOF
+.include /etc/ssl/openssl.cnf
+
+[SAN]
+subjectAltName=DNS:${SERVER_NAME}
+EOF
+
+# Generate an RSA key
+openssl genrsa -out /conf/server.tls.key 2048
+
+# Generate a certificate signing request
+openssl req -new -config /conf/server.tls.conf -key /conf/server.tls.key -out /conf/server.tls.csr \
+  -subj "/CN=${SERVER_NAME}" -reqexts SAN
+
+# Make the Complement Certificate Authority sign and generate a certificate.
+openssl x509 -req -in /conf/server.tls.csr \
+  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
+  -out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
+
+# Assert that we have a Subject Alternative Name in the certificate.
+# (grep will exit with 1 here if there isn't a SAN in the certificate.)
+openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
+
+export SYNAPSE_TLS_CERT=/conf/server.tls.crt
+export SYNAPSE_TLS_KEY=/conf/server.tls.key
+
+# Run the script that writes the necessary config files and starts supervisord, which in turn
+# starts everything else
+exec /configure_workers_and_start.py
diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf/workers-shared-extra.yaml.j2
index cd7b50c65c..9e554a865e 100644
--- a/docker/complement/conf-workers/workers-shared.yaml
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -1,3 +1,11 @@
+{#
+  This file extends the default 'shared' configuration file (from the 'synapse-workers'
+  docker image) with Complement-specific  tweak.
+
+  The base configuration is moved out of the default path to `shared-orig.yaml.j2`
+  in the Complement Dockerfile and below we include that original file.
+#}
+
 ## Server ##
 report_stats: False
 trusted_key_servers: []
@@ -59,6 +67,10 @@ rc_joins:
     per_second: 9999
     burst_count: 9999
 
+rc_joins_per_room:
+    per_second: 9999
+    burst_count: 9999
+
 rc_3pid_validation:
   per_second: 1000
   burst_count: 1000
@@ -73,13 +85,21 @@ rc_invites:
 
 federation_rr_transactions_per_room_per_second: 9999
 
+allow_device_name_lookup_over_federation: true
+
 ## Experimental Features ##
 
 experimental_features:
-  # Enable history backfilling support
-  msc2716_enabled: true
   # Enable spaces support
   spaces_enabled: true
+  # Enable history backfilling support
+  msc2716_enabled: true
+  # server-side support for partial state in /send_join responses
+  msc3706_enabled: true
+  {% if not workers_in_use %}
+  # client-side support for partial state in /send_join responses
+  faster_joins: true
+  {% endif %}
   # Enable jump to date endpoint
   msc3030_enabled: true
 
@@ -88,3 +108,11 @@ server_notices:
   system_mxid_display_name: "Server Alert"
   system_mxid_avatar_url: ""
   room_name: "Server Alert"
+
+
+# Disable sync cache so that initial `/sync` requests are up-to-date.
+caches:
+  sync_response_cache_duration: 0
+
+
+{% include "shared-orig.yaml.j2" %}
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index 644ed788f3..92d25386dc 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -3,8 +3,10 @@
 # configure_workers_and_start.py uses and amends to this file depending on the workers
 # that have been selected.
 
+{% if enable_redis %}
 redis:
     enabled: true
+{% endif %}
 
 {% if appservice_registrations is not none %}
 ## Application Services ##
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index ca1f7aef8e..9f1e03cfc0 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -19,7 +19,7 @@ username=www-data
 autorestart=true
 
 [program:redis]
-command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
+command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
 priority=1
 stdout_logfile=/dev/stdout
 stdout_logfile_maxbytes=0
@@ -28,17 +28,6 @@ stderr_logfile_maxbytes=0
 username=redis
 autorestart=true
 
-[program:synapse_main]
-command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
-priority=10
-# Log startup failures to supervisord's stdout/err
-# Regular synapse logs will still go in the configured data directory
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-autorestart=unexpected
-exitcodes=0
+# Redis can be disabled if the image is being used without workers
+autostart={{ enable_redis }}
 
-# Additional process blocks
-{{ worker_config }}
\ No newline at end of file
diff --git a/docker/conf-workers/synapse.supervisord.conf.j2 b/docker/conf-workers/synapse.supervisord.conf.j2
new file mode 100644
index 0000000000..481eb4fc92
--- /dev/null
+++ b/docker/conf-workers/synapse.supervisord.conf.j2
@@ -0,0 +1,52 @@
+{% if use_forking_launcher %}
+[program:synapse_fork]
+command=/usr/local/bin/python -m synapse.app.complement_fork_starter
+  {{ main_config_path }}
+  synapse.app.homeserver
+  --config-path="{{ main_config_path }}"
+  --config-path=/conf/workers/shared.yaml
+  {%- for worker in workers %}
+    -- {{ worker.app }}
+    --config-path="{{ main_config_path }}"
+    --config-path=/conf/workers/shared.yaml
+    --config-path=/conf/workers/{{ worker.name }}.yaml
+  {%- endfor %}
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autorestart=unexpected
+exitcodes=0
+
+{% else %}
+[program:synapse_main]
+command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
+  --config-path="{{ main_config_path }}"
+  --config-path=/conf/workers/shared.yaml
+priority=10
+# Log startup failures to supervisord's stdout/err
+# Regular synapse logs will still go in the configured data directory
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autorestart=unexpected
+exitcodes=0
+
+
+  {% for worker in workers %}
+[program:synapse_{{ worker.name }}]
+command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
+  --config-path="{{ main_config_path }}"
+  --config-path=/conf/workers/shared.yaml
+  --config-path=/conf/workers/{{ worker.name }}.yaml
+autorestart=unexpected
+priority=500
+exitcodes=0
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+
+  {% endfor %}
+{% endif %}
diff --git a/docker/conf/log.config b/docker/conf/log.config
index dc8c70befd..90b5179838 100644
--- a/docker/conf/log.config
+++ b/docker/conf/log.config
@@ -2,7 +2,11 @@ version: 1
 
 formatters:
   precise:
+    {% if include_worker_name_in_log_line %}
+    format: '{{ worker_name }} | %(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
+    {% else %}
     format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
+    {% endif %}
 
 handlers:
 {% if LOG_FILE_PATH %}
@@ -45,11 +49,17 @@ handlers:
     class: logging.StreamHandler
     formatter: precise
 
+{% if not SYNAPSE_LOG_SENSITIVE %}
+{#
+  If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
+  so that DEBUG entries (containing sensitive information) are not emitted.
+#}
 loggers:
     synapse.storage.SQL:
         # beware: increasing this to DEBUG will make synapse log sensitive
         # information such as access tokens.
         level: INFO
+{% endif %}
 
 root:
     level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index f7dac90222..51583dc13d 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -26,6 +26,13 @@
 #   * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
 #   * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
 #         Nginx will be configured to serve TLS on port 8448.
+#   * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
+#         only intended for usage in Complement at the moment.
+#         No stability guarantees are provided.
+#   * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
+#         log level. INFO is the default.
+#   * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
+#         regardless of the SYNAPSE_LOG_LEVEL setting.
 #
 # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
 # in the project's README), this script may be run multiple times, and functionality should
@@ -35,10 +42,10 @@ import os
 import subprocess
 import sys
 from pathlib import Path
-from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
+from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
 
-import jinja2
 import yaml
+from jinja2 import Environment, FileSystemLoader
 
 MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
 
@@ -52,12 +59,12 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
         "worker_extra_conf": "",
     },
     "user_dir": {
-        "app": "synapse.app.user_dir",
+        "app": "synapse.app.generic_worker",
         "listener_resources": ["client"],
         "endpoint_patterns": [
             "^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
         ],
-        "shared_extra_conf": {"update_user_directory": False},
+        "shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
         "worker_extra_conf": "",
     },
     "media_repository": {
@@ -78,7 +85,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
         "app": "synapse.app.generic_worker",
         "listener_resources": [],
         "endpoint_patterns": [],
-        "shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
+        "shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
         "worker_extra_conf": "",
     },
     "federation_sender": {
@@ -176,21 +183,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
 }
 
 # Templates for sections that may be inserted multiple times in config files
-SUPERVISORD_PROCESS_CONFIG_BLOCK = """
-[program:synapse_{name}]
-command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
-    --config-path="{config_path}" \
-    --config-path=/conf/workers/shared.yaml \
-    --config-path=/conf/workers/{name}.yaml
-autorestart=unexpected
-priority=500
-exitcodes=0
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-"""
-
 NGINX_LOCATION_CONFIG_BLOCK = """
     location ~* {endpoint} {{
         proxy_pass {upstream};
@@ -236,12 +228,13 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
         template_vars: The arguments to replace placeholder variables in the template with.
     """
     # Read the template file
-    with open(src) as infile:
-        template = infile.read()
+    # We disable autoescape to prevent template variables from being escaped,
+    # as we're not using HTML.
+    env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
+    template = env.get_template(os.path.basename(src))
 
-    # Generate a string from the template. We disable autoescape to prevent template
-    # variables from being escaped.
-    rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
+    # Generate a string from the template.
+    rendered = template.render(**template_vars)
 
     # Write the generated contents to a file
     #
@@ -352,13 +345,10 @@ def generate_worker_files(
     # This config file will be passed to all workers, included Synapse's main process.
     shared_config: Dict[str, Any] = {"listeners": listeners}
 
-    # The supervisord config. The contents of which will be inserted into the
-    # base supervisord jinja2 template.
-    #
-    # Supervisord will be in charge of running everything, from redis to nginx to Synapse
-    # and all of its worker processes. Load the config template, which defines a few
-    # services that are necessary to run.
-    supervisord_config = ""
+    # List of dicts that describe workers.
+    # We pass this to the Supervisor template later to generate the appropriate
+    # program blocks.
+    worker_descriptors: List[Dict[str, Any]] = []
 
     # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
     # ports of each worker. For example:
@@ -378,8 +368,8 @@ def generate_worker_files(
     nginx_locations = {}
 
     # Read the desired worker configuration from the environment
-    worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
-    if worker_types_env is None:
+    worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
+    if not worker_types_env:
         # No workers, just the main process
         worker_types = []
     else:
@@ -436,7 +426,7 @@ def generate_worker_files(
             )
 
         # Enable the worker in supervisord
-        supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
+        worker_descriptors.append(worker_config)
 
         # Add nginx location blocks for this worker's endpoints (if any are defined)
         for pattern in worker_config["endpoint_patterns"]:
@@ -506,12 +496,16 @@ def generate_worker_files(
             if reg_path.suffix.lower() in (".yaml", ".yml")
         ]
 
+    workers_in_use = len(worker_types) > 0
+
     # Shared homeserver config
     convert(
         "/conf/shared.yaml.j2",
         "/conf/workers/shared.yaml",
         shared_worker_config=yaml.dump(shared_config),
         appservice_registrations=appservice_registrations,
+        enable_redis=workers_in_use,
+        workers_in_use=workers_in_use,
     )
 
     # Nginx config
@@ -530,7 +524,15 @@ def generate_worker_files(
         "/conf/supervisord.conf.j2",
         "/etc/supervisor/supervisord.conf",
         main_config_path=config_path,
-        worker_config=supervisord_config,
+        enable_redis=workers_in_use,
+    )
+
+    convert(
+        "/conf/synapse.supervisord.conf.j2",
+        "/etc/supervisor/conf.d/synapse.conf",
+        workers=worker_descriptors,
+        main_config_path=config_path,
+        use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
     )
 
     # healthcheck config
@@ -554,18 +556,25 @@ def generate_worker_log_config(
     Returns: the path to the generated file
     """
     # Check whether we should write worker logs to disk, in addition to the console
-    extra_log_template_args = {}
+    extra_log_template_args: Dict[str, Optional[str]] = {}
     if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
-        extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
-            dir=data_dir, name=worker_name
-        )
+        extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
+
+    extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
+    extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
+        "SYNAPSE_LOG_SENSITIVE"
+    )
+
     # Render and write the file
-    log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
+    log_config_filepath = f"/conf/workers/{worker_name}.log.config"
     convert(
         "/conf/log.config",
         log_config_filepath,
         worker_name=worker_name,
         **extra_log_template_args,
+        include_worker_name_in_log_line=environ.get(
+            "SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
+        ),
     )
     return log_config_filepath
 
diff --git a/docker/start.py b/docker/start.py
index 4ac8f03477..5a98dce551 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -110,7 +110,11 @@ def generate_config_from_template(
 
     log_config_file = environ["SYNAPSE_LOG_CONFIG"]
     log("Generating log config file " + log_config_file)
-    convert("/conf/log.config", log_config_file, environ)
+    convert(
+        "/conf/log.config",
+        log_config_file,
+        {**environ, "include_worker_name_in_log_line": False},
+    )
 
     # Hopefully we already have a signing key, but generate one if not.
     args = [