diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
index b5b6735a8f..c555f5f914 100644
--- a/synapse/config/__main__.py
+++ b/synapse/config/__main__.py
@@ -1,4 +1,5 @@
# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,25 +12,44 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import sys
+
from synapse.config._base import ConfigError
+from synapse.config.homeserver import HomeServerConfig
-if __name__ == "__main__":
- import sys
- from synapse.config.homeserver import HomeServerConfig
+def main(args):
+ action = args[1] if len(args) > 1 and args[1] == "read" else None
+ # If we're reading a key in the config file, then `args[1]` will be `read` and `args[2]`
+ # will be the key to read.
+ # We'll want to rework this code if we want to support more actions than just `read`.
+ load_config_args = args[3:] if action else args[1:]
- action = sys.argv[1]
+ try:
+ config = HomeServerConfig.load_config("", load_config_args)
+ except ConfigError as e:
+ sys.stderr.write("\n" + str(e) + "\n")
+ sys.exit(1)
+
+ print("Config parses OK!")
if action == "read":
- key = sys.argv[2]
+ key = args[2]
+ key_parts = key.split(".")
+
+ value = config
try:
- config = HomeServerConfig.load_config("", sys.argv[3:])
- except ConfigError as e:
- sys.stderr.write("\n" + str(e) + "\n")
+ while len(key_parts):
+ value = getattr(value, key_parts[0])
+ key_parts.pop(0)
+
+ print(f"\n{key}: {value}")
+ except AttributeError:
+ print(
+ f"\nNo '{key}' key could be found in the provided configuration file."
+ )
sys.exit(1)
- print(getattr(config, key))
- sys.exit(0)
- else:
- sys.stderr.write("Unknown command %r\n" % (action,))
- sys.exit(1)
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 06fbd1166b..c1d9069798 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -26,6 +26,7 @@ from synapse.config import (
redis,
registration,
repository,
+ retention,
room_directory,
saml2,
server,
@@ -91,6 +92,7 @@ class RootConfig:
modules: modules.ModulesConfig
caches: cache.CacheConfig
federation: federation.FederationConfig
+ retention: retention.RetentionConfig
config_classes: List = ...
def __init__(self) -> None: ...
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 7b0381c06a..8b098ad48d 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -24,6 +24,11 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs):
experimental = config.get("experimental_features") or {}
+ # Whether to enable experimental MSC1849 (aka relations) support
+ self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
+ # MSC3440 (thread relation)
+ self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
+
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 442f1b9ac0..001605c265 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -38,6 +38,7 @@ from .ratelimiting import RatelimitConfig
from .redis import RedisConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
+from .retention import RetentionConfig
from .room import RoomConfig
from .room_directory import RoomDirectoryConfig
from .saml2 import SAML2Config
@@ -59,6 +60,7 @@ class HomeServerConfig(RootConfig):
config_classes = [
ModulesConfig,
ServerConfig,
+ RetentionConfig,
TlsConfig,
FederationConfig,
CacheConfig,
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 0a08231e5a..5252e61a99 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -18,6 +18,7 @@ import os
import sys
import threading
from string import Template
+from typing import TYPE_CHECKING
import yaml
from zope.interface import implementer
@@ -38,6 +39,9 @@ from synapse.util.versionstring import get_version_string
from ._base import Config, ConfigError
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
DEFAULT_LOG_CONFIG = Template(
"""\
# Log configuration for Synapse.
@@ -306,7 +310,10 @@ def _reload_logging_config(log_config_path):
def setup_logging(
- hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner
+ hs: "HomeServer",
+ config,
+ use_worker_options=False,
+ logBeginner: LogBeginner = globalLogBeginner,
) -> None:
"""
Set up the logging subsystem.
diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py
index 83994df798..f980102b45 100644
--- a/synapse/config/password_auth_providers.py
+++ b/synapse/config/password_auth_providers.py
@@ -25,6 +25,29 @@ class PasswordAuthProviderConfig(Config):
section = "authproviders"
def read_config(self, config, **kwargs):
+ """Parses the old password auth providers config. The config format looks like this:
+
+ password_providers:
+ # Example config for an LDAP auth provider
+ - module: "ldap_auth_provider.LdapAuthProvider"
+ config:
+ enabled: true
+ uri: "ldap://ldap.example.com:389"
+ start_tls: true
+ base: "ou=users,dc=example,dc=com"
+ attributes:
+ uid: "cn"
+ mail: "email"
+ name: "givenName"
+ #bind_dn:
+ #bind_password:
+ #filter: "(objectClass=posixAccount)"
+
+ We expect admins to use modules for this feature (which is why it doesn't appear
+ in the sample config file), but we want to keep support for it around for a bit
+ for backwards compatibility.
+ """
+
self.password_providers: List[Tuple[Type, Any]] = []
providers = []
@@ -49,33 +72,3 @@ class PasswordAuthProviderConfig(Config):
)
self.password_providers.append((provider_class, provider_config))
-
- def generate_config_section(self, **kwargs):
- return """\
- # Password providers allow homeserver administrators to integrate
- # their Synapse installation with existing authentication methods
- # ex. LDAP, external tokens, etc.
- #
- # For more information and known implementations, please see
- # https://matrix-org.github.io/synapse/latest/password_auth_providers.html
- #
- # Note: instances wishing to use SAML or CAS authentication should
- # instead use the `saml2_config` or `cas_config` options,
- # respectively.
- #
- password_providers:
- # # Example config for an LDAP auth provider
- # - module: "ldap_auth_provider.LdapAuthProvider"
- # config:
- # enabled: true
- # uri: "ldap://ldap.example.com:389"
- # start_tls: true
- # base: "ou=users,dc=example,dc=com"
- # attributes:
- # uid: "cn"
- # mail: "email"
- # name: "givenName"
- # #bind_dn:
- # #bind_password:
- # #filter: "(objectClass=posixAccount)"
- """
diff --git a/synapse/config/retention.py b/synapse/config/retention.py
new file mode 100644
index 0000000000..aed9bf458f
--- /dev/null
+++ b/synapse/config/retention.py
@@ -0,0 +1,226 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import List, Optional
+
+import attr
+
+from synapse.config._base import Config, ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RetentionPurgeJob:
+ """Object describing the configuration of the manhole"""
+
+ interval: int
+ shortest_max_lifetime: Optional[int]
+ longest_max_lifetime: Optional[int]
+
+
+class RetentionConfig(Config):
+ section = "retention"
+
+ def read_config(self, config, **kwargs):
+ retention_config = config.get("retention")
+ if retention_config is None:
+ retention_config = {}
+
+ self.retention_enabled = retention_config.get("enabled", False)
+
+ retention_default_policy = retention_config.get("default_policy")
+
+ if retention_default_policy is not None:
+ self.retention_default_min_lifetime = retention_default_policy.get(
+ "min_lifetime"
+ )
+ if self.retention_default_min_lifetime is not None:
+ self.retention_default_min_lifetime = self.parse_duration(
+ self.retention_default_min_lifetime
+ )
+
+ self.retention_default_max_lifetime = retention_default_policy.get(
+ "max_lifetime"
+ )
+ if self.retention_default_max_lifetime is not None:
+ self.retention_default_max_lifetime = self.parse_duration(
+ self.retention_default_max_lifetime
+ )
+
+ if (
+ self.retention_default_min_lifetime is not None
+ and self.retention_default_max_lifetime is not None
+ and (
+ self.retention_default_min_lifetime
+ > self.retention_default_max_lifetime
+ )
+ ):
+ raise ConfigError(
+ "The default retention policy's 'min_lifetime' can not be greater"
+ " than its 'max_lifetime'"
+ )
+ else:
+ self.retention_default_min_lifetime = None
+ self.retention_default_max_lifetime = None
+
+ if self.retention_enabled:
+ logger.info(
+ "Message retention policies support enabled with the following default"
+ " policy: min_lifetime = %s ; max_lifetime = %s",
+ self.retention_default_min_lifetime,
+ self.retention_default_max_lifetime,
+ )
+
+ self.retention_allowed_lifetime_min = retention_config.get(
+ "allowed_lifetime_min"
+ )
+ if self.retention_allowed_lifetime_min is not None:
+ self.retention_allowed_lifetime_min = self.parse_duration(
+ self.retention_allowed_lifetime_min
+ )
+
+ self.retention_allowed_lifetime_max = retention_config.get(
+ "allowed_lifetime_max"
+ )
+ if self.retention_allowed_lifetime_max is not None:
+ self.retention_allowed_lifetime_max = self.parse_duration(
+ self.retention_allowed_lifetime_max
+ )
+
+ if (
+ self.retention_allowed_lifetime_min is not None
+ and self.retention_allowed_lifetime_max is not None
+ and self.retention_allowed_lifetime_min
+ > self.retention_allowed_lifetime_max
+ ):
+ raise ConfigError(
+ "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
+ " greater than 'allowed_lifetime_max'"
+ )
+
+ self.retention_purge_jobs: List[RetentionPurgeJob] = []
+ for purge_job_config in retention_config.get("purge_jobs", []):
+ interval_config = purge_job_config.get("interval")
+
+ if interval_config is None:
+ raise ConfigError(
+ "A retention policy's purge jobs configuration must have the"
+ " 'interval' key set."
+ )
+
+ interval = self.parse_duration(interval_config)
+
+ shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
+
+ if shortest_max_lifetime is not None:
+ shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
+
+ longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
+
+ if longest_max_lifetime is not None:
+ longest_max_lifetime = self.parse_duration(longest_max_lifetime)
+
+ if (
+ shortest_max_lifetime is not None
+ and longest_max_lifetime is not None
+ and shortest_max_lifetime > longest_max_lifetime
+ ):
+ raise ConfigError(
+ "A retention policy's purge jobs configuration's"
+ " 'shortest_max_lifetime' value can not be greater than its"
+ " 'longest_max_lifetime' value."
+ )
+
+ self.retention_purge_jobs.append(
+ RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
+ )
+
+ if not self.retention_purge_jobs:
+ self.retention_purge_jobs = [
+ RetentionPurgeJob(self.parse_duration("1d"), None, None)
+ ]
+
+ def generate_config_section(self, config_dir_path, server_name, **kwargs):
+ return """\
+ # Message retention policy at the server level.
+ #
+ # Room admins and mods can define a retention period for their rooms using the
+ # 'm.room.retention' state event, and server admins can cap this period by setting
+ # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+ #
+ # If this feature is enabled, Synapse will regularly look for and purge events
+ # which are older than the room's maximum retention period. Synapse will also
+ # filter events received over federation so that events that should have been
+ # purged are ignored and not stored again.
+ #
+ retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, and the state of a room contains a
+ # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
+ # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
+ # to these limits when running purge jobs.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a more frequent basis than for the rest of the rooms
+ # (e.g. every 12h), but not want that purge to be performed by a job that's
+ # iterating over every room it knows, which could be heavy on the server.
+ #
+ # If any purge job is configured, it is strongly recommended to have at least
+ # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
+ # set, or one job without 'shortest_max_lifetime' and one job without
+ # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
+ # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
+ # room's policy to these values is done after the policies are retrieved from
+ # Synapse's database (which is done using the range specified in a purge job's
+ # configuration).
+ #
+ #purge_jobs:
+ # - longest_max_lifetime: 3d
+ # interval: 12h
+ # - shortest_max_lifetime: 3d
+ # interval: 1d
+ """
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 818b806357..ed094bdc44 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -225,15 +225,6 @@ class ManholeConfig:
pub_key: Optional[Key]
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class RetentionConfig:
- """Object describing the configuration of the manhole"""
-
- interval: int
- shortest_max_lifetime: Optional[int]
- longest_max_lifetime: Optional[int]
-
-
@attr.s(frozen=True)
class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@@ -376,11 +367,6 @@ class ServerConfig(Config):
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
- # Whether to enable experimental MSC1849 (aka relations) support
- self.experimental_msc1849_support_enabled = config.get(
- "experimental_msc1849_support_enabled", True
- )
-
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
@@ -466,124 +452,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
- retention_config = config.get("retention")
- if retention_config is None:
- retention_config = {}
-
- self.retention_enabled = retention_config.get("enabled", False)
-
- retention_default_policy = retention_config.get("default_policy")
-
- if retention_default_policy is not None:
- self.retention_default_min_lifetime = retention_default_policy.get(
- "min_lifetime"
- )
- if self.retention_default_min_lifetime is not None:
- self.retention_default_min_lifetime = self.parse_duration(
- self.retention_default_min_lifetime
- )
-
- self.retention_default_max_lifetime = retention_default_policy.get(
- "max_lifetime"
- )
- if self.retention_default_max_lifetime is not None:
- self.retention_default_max_lifetime = self.parse_duration(
- self.retention_default_max_lifetime
- )
-
- if (
- self.retention_default_min_lifetime is not None
- and self.retention_default_max_lifetime is not None
- and (
- self.retention_default_min_lifetime
- > self.retention_default_max_lifetime
- )
- ):
- raise ConfigError(
- "The default retention policy's 'min_lifetime' can not be greater"
- " than its 'max_lifetime'"
- )
- else:
- self.retention_default_min_lifetime = None
- self.retention_default_max_lifetime = None
-
- if self.retention_enabled:
- logger.info(
- "Message retention policies support enabled with the following default"
- " policy: min_lifetime = %s ; max_lifetime = %s",
- self.retention_default_min_lifetime,
- self.retention_default_max_lifetime,
- )
-
- self.retention_allowed_lifetime_min = retention_config.get(
- "allowed_lifetime_min"
- )
- if self.retention_allowed_lifetime_min is not None:
- self.retention_allowed_lifetime_min = self.parse_duration(
- self.retention_allowed_lifetime_min
- )
-
- self.retention_allowed_lifetime_max = retention_config.get(
- "allowed_lifetime_max"
- )
- if self.retention_allowed_lifetime_max is not None:
- self.retention_allowed_lifetime_max = self.parse_duration(
- self.retention_allowed_lifetime_max
- )
-
- if (
- self.retention_allowed_lifetime_min is not None
- and self.retention_allowed_lifetime_max is not None
- and self.retention_allowed_lifetime_min
- > self.retention_allowed_lifetime_max
- ):
- raise ConfigError(
- "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
- " greater than 'allowed_lifetime_max'"
- )
-
- self.retention_purge_jobs: List[RetentionConfig] = []
- for purge_job_config in retention_config.get("purge_jobs", []):
- interval_config = purge_job_config.get("interval")
-
- if interval_config is None:
- raise ConfigError(
- "A retention policy's purge jobs configuration must have the"
- " 'interval' key set."
- )
-
- interval = self.parse_duration(interval_config)
-
- shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
-
- if shortest_max_lifetime is not None:
- shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
-
- longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
-
- if longest_max_lifetime is not None:
- longest_max_lifetime = self.parse_duration(longest_max_lifetime)
-
- if (
- shortest_max_lifetime is not None
- and longest_max_lifetime is not None
- and shortest_max_lifetime > longest_max_lifetime
- ):
- raise ConfigError(
- "A retention policy's purge jobs configuration's"
- " 'shortest_max_lifetime' value can not be greater than its"
- " 'longest_max_lifetime' value."
- )
-
- self.retention_purge_jobs.append(
- RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
- )
-
- if not self.retention_purge_jobs:
- self.retention_purge_jobs = [
- RetentionConfig(self.parse_duration("1d"), None, None)
- ]
-
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
@@ -1255,75 +1123,6 @@ class ServerConfig(Config):
#
#user_ips_max_age: 14d
- # Message retention policy at the server level.
- #
- # Room admins and mods can define a retention period for their rooms using the
- # 'm.room.retention' state event, and server admins can cap this period by setting
- # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
- #
- # If this feature is enabled, Synapse will regularly look for and purge events
- # which are older than the room's maximum retention period. Synapse will also
- # filter events received over federation so that events that should have been
- # purged are ignored and not stored again.
- #
- retention:
- # The message retention policies feature is disabled by default. Uncomment the
- # following line to enable it.
- #
- #enabled: true
-
- # Default retention policy. If set, Synapse will apply it to rooms that lack the
- # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
- # matter much because Synapse doesn't take it into account yet.
- #
- #default_policy:
- # min_lifetime: 1d
- # max_lifetime: 1y
-
- # Retention policy limits. If set, and the state of a room contains a
- # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
- # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
- # to these limits when running purge jobs.
- #
- #allowed_lifetime_min: 1d
- #allowed_lifetime_max: 1y
-
- # Server admins can define the settings of the background jobs purging the
- # events which lifetime has expired under the 'purge_jobs' section.
- #
- # If no configuration is provided, a single job will be set up to delete expired
- # events in every room daily.
- #
- # Each job's configuration defines which range of message lifetimes the job
- # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
- # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
- # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
- # lower than or equal to 3 days. Both the minimum and the maximum value of a
- # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
- # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
- # which 'max_lifetime' is lower than or equal to three days.
- #
- # The rationale for this per-job configuration is that some rooms might have a
- # retention policy with a low 'max_lifetime', where history needs to be purged
- # of outdated messages on a more frequent basis than for the rest of the rooms
- # (e.g. every 12h), but not want that purge to be performed by a job that's
- # iterating over every room it knows, which could be heavy on the server.
- #
- # If any purge job is configured, it is strongly recommended to have at least
- # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
- # set, or one job without 'shortest_max_lifetime' and one job without
- # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
- # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
- # room's policy to these values is done after the policies are retrieved from
- # Synapse's database (which is done using the range specified in a purge job's
- # configuration).
- #
- #purge_jobs:
- # - longest_max_lifetime: 3d
- # interval: 12h
- # - shortest_max_lifetime: 3d
- # interval: 1d
-
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
|