diff options
Diffstat (limited to 'docker')
-rw-r--r-- | docker/Dockerfile | 18 | ||||
-rw-r--r-- | docker/Dockerfile-dhvirtualenv | 4 | ||||
-rw-r--r-- | docker/Dockerfile-workers | 3 | ||||
-rw-r--r-- | docker/complement/Dockerfile | 3 | ||||
-rwxr-xr-x | docker/complement/conf/start_for_complement.sh | 10 | ||||
-rw-r--r-- | docker/complement/conf/workers-shared-extra.yaml.j2 | 8 | ||||
-rwxr-xr-x | docker/configure_workers_and_start.py | 174 | ||||
-rw-r--r-- | docker/editable.Dockerfile | 75 | ||||
-rwxr-xr-x | docker/start.py | 18 |
9 files changed, 255 insertions, 58 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile index b20951d4cf..7e5123210a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -43,7 +43,7 @@ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - build-essential cargo git libffi-dev libssl-dev \ + build-essential git libffi-dev libssl-dev \ && rm -rf /var/lib/apt/lists/* # We install poetry in its own build stage to avoid its dependencies conflicting with @@ -97,6 +97,8 @@ RUN \ zlib1g-dev \ git \ curl \ + libicu-dev \ + pkg-config \ && rm -rf /var/lib/apt/lists/* @@ -106,7 +108,13 @@ ENV CARGO_HOME=/cargo ENV PATH=/cargo/bin:/rust/bin:$PATH RUN mkdir /rust /cargo -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal + + +# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not +# set to true, so we expose it as a build-arg. +ARG CARGO_NET_GIT_FETCH_WITH_CLI=false +ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI # To speed up rebuilds, install all of the dependencies before we copy over # the whole synapse project, so that this layer in the Docker cache can be @@ -121,7 +129,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ COPY synapse /synapse/synapse/ COPY rust /synapse/rust/ # ... and what we need to `pip install`. -COPY pyproject.toml README.rst build_rust.py /synapse/ +COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/ # Repeat of earlier build argument declaration, as this is a new build stage. ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE @@ -129,7 +137,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE # Install the synapse package itself. # If we have populated requirements.txt, we don't install any dependencies # as we should already have those from the previous `pip install` step. -RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ +RUN --mount=type=cache,target=/synapse/target,sharing=locked \ + --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \ + if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \ else \ pip install --prefix="/install" --no-warn-script-location /synapse[all]; \ diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index ca3a259081..f3b5b00ce6 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -84,6 +84,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \ python3-venv \ sqlite3 \ libpq-dev \ + libicu-dev \ + pkg-config \ xmlsec1 # Install rust and ensure it's in the PATH @@ -92,7 +94,7 @@ ENV CARGO_HOME=/cargo ENV PATH=/cargo/bin:/rust/bin:$PATH RUN mkdir /rust /cargo -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb / diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 0c2d4f3047..faf7f2cef8 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,6 +1,7 @@ # syntax=docker/dockerfile:1 ARG SYNAPSE_VERSION=latest +ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION # first of all, we create a base image with an nginx which we can copy into the # target image. For repeated rebuilds, this is much faster than apt installing @@ -23,7 +24,7 @@ FROM debian:bullseye-slim AS deps_base FROM redis:6-bullseye AS redis_base # now build the final image, based on the the regular Synapse docker image -FROM matrixdotorg/synapse:$SYNAPSE_VERSION +FROM $FROM # Install supervisord with pip instead of apt, to avoid installing a second # copy of python. diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index c0935c99a8..be1aa1c55e 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -7,8 +7,9 @@ # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION -FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION +FROM $FROM # First of all, we copy postgres server from the official postgres image, # since for repeated rebuilds, this is much faster than apt installing # postgres each time. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index cc6482f763..49d79745b0 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -45,7 +45,12 @@ esac if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then # Specify the workers to test with - export SYNAPSE_WORKER_TYPES="\ + # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still + # utilizing WORKERS=1 for backwards compatibility. + # -n True if the length of string is non-zero. + # -z True if the length of string is zero. + if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then + export SYNAPSE_WORKER_TYPES="\ event_persister, \ event_persister, \ background_worker, \ @@ -57,9 +62,12 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then federation_reader, \ federation_sender, \ synchrotron, \ + client_reader, \ appservice, \ pusher" + fi + log "Workers requested: $SYNAPSE_WORKER_TYPES" # Improve startup times by using a launcher based on fork() export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1 else diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 9e554a865e..ca640c343b 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -12,6 +12,8 @@ trusted_key_servers: [] enable_registration: true enable_registration_without_verification: true bcrypt_rounds: 4 +url_preview_enabled: true +url_preview_ip_range_blacklist: [] ## Registration ## @@ -90,8 +92,6 @@ allow_device_name_lookup_over_federation: true ## Experimental Features ## experimental_features: - # Enable spaces support - spaces_enabled: true # Enable history backfilling support msc2716_enabled: true # server-side support for partial state in /send_join responses @@ -100,8 +100,8 @@ experimental_features: # client-side support for partial state in /send_join responses faster_joins: true {% endif %} - # Enable jump to date endpoint - msc3030_enabled: true + # Filtering /messages by relation type. + msc3874_enabled: true server_notices: system_mxid_localpart: _server diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 51583dc13d..58c62f2231 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -20,7 +20,7 @@ # * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. # * SYNAPSE_REPORT_STATS: Whether to report stats. # * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG -# below. Leave empty for no workers, or set to '*' for all possible workers. +# below. Leave empty for no workers. # * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files # will be treated as Application Service registration files. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format. @@ -39,6 +39,7 @@ # continue to work if so. import os +import platform import subprocess import sys from pathlib import Path @@ -49,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 - +# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources +# Watching /_matrix/client needs a "client" listener +# Watching /_matrix/federation needs a "federation" listener +# Watching /_matrix/media and related needs a "media" listener +# Stream Writers require "client" and "replication" listeners because they +# have to attach by instance_map to the master process and have client endpoints. WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "pusher": { - "app": "synapse.app.pusher", + "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"start_pushers": False}, + "shared_extra_conf": {}, "worker_extra_conf": "", }, "user_dir": { @@ -78,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_synapse/admin/v1/media/.*$", "^/_synapse/admin/v1/quarantine_media/.*$", ], - "shared_extra_conf": {"enable_media_repo": False}, + # The first configured media worker will run the media background jobs + "shared_extra_conf": { + "enable_media_repo": False, + "media_instance_running_background_jobs": "media_repository1", + }, "worker_extra_conf": "enable_media_repo: true", }, "appservice": { @@ -89,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "", }, "federation_sender": { - "app": "synapse.app.federation_sender", + "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"send_federation": False}, + "shared_extra_conf": {}, "worker_extra_conf": "", }, "synchrotron": { @@ -107,6 +117,35 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "shared_extra_conf": {}, "worker_extra_conf": "", }, + "client_reader": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$", + "^/_matrix/client/v1/rooms/.*/hierarchy$", + "^/_matrix/client/(v1|unstable)/rooms/.*/relations/", + "^/_matrix/client/v1/rooms/.*/threads$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/login$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$", + "^/_matrix/client/versions$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$", + "^/_matrix/client/(r0|v3|unstable)/register$", + "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event", + "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms", + "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases", + "^/_matrix/client/v1/rooms/.*/timestamp_to_event$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/search", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, "federation_reader": { "app": "synapse.app.generic_worker", "listener_resources": ["federation"], @@ -125,6 +164,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_matrix/federation/(v1|v2)/invite/", "^/_matrix/federation/(v1|v2)/query_auth/", "^/_matrix/federation/(v1|v2)/event_auth/", + "^/_matrix/federation/v1/timestamp_to_event/", "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/", "^/_matrix/federation/(v1|v2)/user/devices/", "^/_matrix/federation/(v1|v2)/get_groups_publicised$", @@ -171,14 +211,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "", }, "frontend_proxy": { - "app": "synapse.app.frontend_proxy", + "app": "synapse.app.generic_worker", "listener_resources": ["client", "replication"], "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], "shared_extra_conf": {}, - "worker_extra_conf": ( - "worker_main_http_uri: http://127.0.0.1:%d" - % (MAIN_PROCESS_HTTP_LISTENER_PORT,) - ), + "worker_extra_conf": "", + }, + "account_data": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(r0|v3|unstable)/.*/tags", + "^/_matrix/client/(r0|v3|unstable)/.*/account_data", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "presence": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "receipts": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt", + "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "to_device": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "typing": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing" + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", }, } @@ -201,24 +281,19 @@ upstream {upstream_worker_type} {{ # Utility functions def log(txt: str) -> None: - """Log something to the stdout. - - Args: - txt: The text to log. - """ print(txt) def error(txt: str) -> NoReturn: - """Log something and exit with an error code. - - Args: - txt: The text to log in error. - """ - log(txt) + print(txt, file=sys.stderr) sys.exit(2) +def flush_buffers() -> None: + sys.stdout.flush() + sys.stderr.flush() + + def convert(src: str, dst: str, **template_vars: object) -> None: """Generate a file from a template @@ -247,14 +322,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None: outfile.write(rendered) -def add_sharding_to_shared_config( +def add_worker_roles_to_shared_config( shared_config: dict, worker_type: str, worker_name: str, worker_port: int, ) -> None: """Given a dictionary representing a config file shared across all workers, - append sharded worker information to it for the current worker_type instance. + append appropriate worker information to it for the current worker_type instance. Args: shared_config: The config dict that all worker instances share (after being converted to YAML) @@ -285,9 +360,19 @@ def add_sharding_to_shared_config( "port": worker_port, } - elif worker_type == "media_repository": - # The first configured media worker will run the media background jobs - shared_config.setdefault("media_instance_running_background_jobs", worker_name) + elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]: + # Update the list of stream writers + # It's convenient that the name of the worker type is the same as the stream to write + shared_config.setdefault("stream_writers", {}).setdefault( + worker_type, [] + ).append(worker_name) + + # Map of stream writer instance names to host/ports combos + # For now, all stream writers need http replication ports + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } def generate_base_homeserver_config() -> None: @@ -299,7 +384,7 @@ def generate_base_homeserver_config() -> None: # start.py already does this for us, so just call that. # note that this script is copied in in the official, monolith dockerfile os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT) - subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"]) + subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True) def generate_worker_files( @@ -373,8 +458,8 @@ def generate_worker_files( # No workers, just the main process worker_types = [] else: - # Split type names by comma - worker_types = worker_types_env.split(",") + # Split type names by comma, ignoring whitespace. + worker_types = [x.strip() for x in worker_types_env.split(",")] # Create the worker configuration directory if it doesn't already exist os.makedirs("/conf/workers", exist_ok=True) @@ -393,14 +478,11 @@ def generate_worker_files( # For each worker type specified by the user, create config values for worker_type in worker_types: - worker_type = worker_type.strip() - worker_config = WORKERS_CONFIG.get(worker_type) if worker_config: worker_config = worker_config.copy() else: - log(worker_type + " is an unknown worker type! It will be ignored") - continue + error(worker_type + " is an unknown worker type! Please fix!") new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 worker_type_counter[worker_type] = new_worker_count @@ -419,11 +501,11 @@ def generate_worker_files( # Check if more than one instance of this worker type has been specified worker_type_total_count = worker_types.count(worker_type) - if worker_type_total_count > 1: - # Update the shared config with sharding-related options if necessary - add_sharding_to_shared_config( - shared_config, worker_type, worker_name, worker_port - ) + + # Update the shared config with sharding-related options if necessary + add_worker_roles_to_shared_config( + shared_config, worker_type, worker_name, worker_port + ) # Enable the worker in supervisord worker_descriptors.append(worker_config) @@ -604,14 +686,24 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: with open(mark_filepath, "w") as f: f.write("") + # Lifted right out of start.py + jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),) + + if os.path.isfile(jemallocpath): + environ["LD_PRELOAD"] = jemallocpath + else: + log("Could not find %s, will not use" % (jemallocpath,)) + # Start supervisord, which will start Synapse, all of the configured worker # processes, redis, nginx etc. according to the config we created above. log("Starting supervisord") - os.execl( + flush_buffers() + os.execle( "/usr/local/bin/supervisord", "supervisord", "-c", "/etc/supervisor/supervisord.conf", + environ, ) diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile new file mode 100644 index 0000000000..0e8cf2e712 --- /dev/null +++ b/docker/editable.Dockerfile @@ -0,0 +1,75 @@ +# syntax=docker/dockerfile:1 +# This dockerfile builds an editable install of Synapse. +# +# Used by `complement.sh`. Not suitable for production use. + +ARG PYTHON_VERSION=3.9 + +### +### Stage 0: generate requirements.txt +### +# We hardcode the use of Debian bullseye here because this could change upstream +# and other Dockerfiles used for testing are expecting bullseye. +FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye + +# Install Rust and other dependencies (stolen from normal Dockerfile) +# install the OS build deps +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update -qq && apt-get install -yqq \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libwebp-dev \ + libxml++2.6-dev \ + libxslt1-dev \ + openssl \ + zlib1g-dev \ + git \ + curl \ + gosu \ + libjpeg62-turbo \ + libpq5 \ + libwebp6 \ + xmlsec1 \ + libjemalloc2 \ + && rm -rf /var/lib/apt/lists/* +ENV RUSTUP_HOME=/rust +ENV CARGO_HOME=/cargo +ENV PATH=/cargo/bin:/rust/bin:$PATH +RUN mkdir /rust /cargo +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal + + +# Make a base copy of the editable source tree, so that we have something to +# install and build now — even though it's going to be covered up by a mount +# at runtime. +COPY synapse /editable-src/synapse/ +COPY rust /editable-src/rust/ +# ... and what we need to `pip install`. +COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/ + +RUN pip install poetry +RUN poetry config virtualenvs.create false +RUN cd /editable-src && poetry install --extras all + +# Make copies of useful things for inspection: +# - the Rust module (must be copied to the editable source tree before startup) +# - poetry.lock is useful for checking if dependencies have changed. +RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak +RUN cp /editable-src/poetry.lock /poetry.lock.bak + + +### Extra setup from original Dockerfile +COPY ./docker/start.py /start.py +COPY ./docker/conf /conf + +EXPOSE 8008/tcp 8009/tcp 8448/tcp + +ENTRYPOINT ["/start.py"] + +HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ + CMD curl -fSs http://localhost:8008/health || exit 1 diff --git a/docker/start.py b/docker/start.py index 5a98dce551..ebcc599f04 100755 --- a/docker/start.py +++ b/docker/start.py @@ -13,14 +13,19 @@ import jinja2 # Utility functions def log(txt: str) -> None: - print(txt, file=sys.stderr) + print(txt) def error(txt: str) -> NoReturn: - log(txt) + print(txt, file=sys.stderr) sys.exit(2) +def flush_buffers() -> None: + sys.stdout.flush() + sys.stderr.flush() + + def convert(src: str, dst: str, environ: Mapping[str, object]) -> None: """Generate a file from a template @@ -131,10 +136,10 @@ def generate_config_from_template( if ownership is not None: log(f"Setting ownership on /data to {ownership}") - subprocess.check_output(["chown", "-R", ownership, "/data"]) + subprocess.run(["chown", "-R", ownership, "/data"], check=True) args = ["gosu", ownership] + args - subprocess.check_output(args) + subprocess.run(args, check=True) def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None: @@ -158,7 +163,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> if ownership is not None: # make sure that synapse has perms to write to the data dir. log(f"Setting ownership on {data_dir} to {ownership}") - subprocess.check_output(["chown", ownership, data_dir]) + subprocess.run(["chown", ownership, data_dir], check=True) # create a suitable log config from our template log_config_file = "%s/%s.log.config" % (config_dir, server_name) @@ -185,6 +190,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> "--open-private-ports", ] # log("running %s" % (args, )) + flush_buffers() os.execv(sys.executable, args) @@ -267,8 +273,10 @@ running with 'migrate_config'. See the README for more details. args = [sys.executable] + args if ownership is not None: args = ["gosu", ownership] + args + flush_buffers() os.execve("/usr/sbin/gosu", args, environ) else: + flush_buffers() os.execve(sys.executable, args, environ) |