diff --git a/synapse/config/server.py b/synapse/config/server.py
index 7f8d315954..f57eefc99c 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -19,6 +19,7 @@ import logging
import os.path
import re
from textwrap import indent
+from typing import Dict, List, Optional
import attr
import yaml
@@ -40,7 +41,7 @@ logger = logging.Logger(__name__)
# in the list.
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
-DEFAULT_ROOM_VERSION = "4"
+DEFAULT_ROOM_VERSION = "5"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
@@ -48,8 +49,17 @@ ROOM_COMPLEXITY_TOO_GREAT = (
"to join this room."
)
+METRICS_PORT_WARNING = """\
+The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
+a listener. Please see
+https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
+on how to configure the new listener.
+--------------------------------------------------------------------------------"""
+
class ServerConfig(Config):
+ section = "server"
+
def read_config(self, config, **kwargs):
self.server_name = config["server_name"]
self.server_context = config.get("server_context", None)
@@ -92,6 +102,12 @@ class ServerConfig(Config):
"require_auth_for_profile_requests", False
)
+ # Whether to require sharing a room with a user to retrieve their
+ # profile data
+ self.limit_profile_requests_to_users_who_share_rooms = config.get(
+ "limit_profile_requests_to_users_who_share_rooms", False,
+ )
+
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
@@ -108,15 +124,16 @@ class ServerConfig(Config):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
- # If set to 'False', requires authentication to access the server's public
- # rooms directory through the client API. Defaults to 'True'.
+ # If set to 'true', removes the need for authentication to access the server's
+ # public rooms directory through the client API, meaning that anyone can
+ # query the room directory. Defaults to 'false'.
self.allow_public_rooms_without_auth = config.get(
- "allow_public_rooms_without_auth", True
+ "allow_public_rooms_without_auth", False
)
- # If set to 'False', forbids any other homeserver to fetch the server's public
- # rooms directory via federation. Defaults to 'True'.
+ # If set to 'true', allows any other homeserver to fetch the server's public
+ # rooms directory via federation. Defaults to 'false'.
self.allow_public_rooms_over_federation = config.get(
- "allow_public_rooms_over_federation", True
+ "allow_public_rooms_over_federation", False
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
@@ -161,6 +178,7 @@ class ServerConfig(Config):
)
self.mau_trial_days = config.get("mau_trial_days", 0)
+ self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
@@ -172,17 +190,23 @@ class ServerConfig(Config):
else:
self.redaction_retention_period = None
+ # How long to keep entries in the `users_ips` table.
+ user_ips_max_age = config.get("user_ips_max_age", "28d")
+ if user_ips_max_age is not None:
+ self.user_ips_max_age = self.parse_duration(user_ips_max_age)
+ else:
+ self.user_ips_max_age = None
+
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
- self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
# FIXME: federation_domain_whitelist needs sytests
- self.federation_domain_whitelist = None
+ self.federation_domain_whitelist = None # type: Optional[dict]
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
if federation_domain_whitelist is not None:
@@ -206,7 +230,7 @@ class ServerConfig(Config):
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
except Exception as e:
raise ConfigError(
- "Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e
+ "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
)
if self.public_baseurl is not None:
@@ -229,7 +253,133 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
- self.listeners = []
+ retention_config = config.get("retention")
+ if retention_config is None:
+ retention_config = {}
+
+ self.retention_enabled = retention_config.get("enabled", False)
+
+ retention_default_policy = retention_config.get("default_policy")
+
+ if retention_default_policy is not None:
+ self.retention_default_min_lifetime = retention_default_policy.get(
+ "min_lifetime"
+ )
+ if self.retention_default_min_lifetime is not None:
+ self.retention_default_min_lifetime = self.parse_duration(
+ self.retention_default_min_lifetime
+ )
+
+ self.retention_default_max_lifetime = retention_default_policy.get(
+ "max_lifetime"
+ )
+ if self.retention_default_max_lifetime is not None:
+ self.retention_default_max_lifetime = self.parse_duration(
+ self.retention_default_max_lifetime
+ )
+
+ if (
+ self.retention_default_min_lifetime is not None
+ and self.retention_default_max_lifetime is not None
+ and (
+ self.retention_default_min_lifetime
+ > self.retention_default_max_lifetime
+ )
+ ):
+ raise ConfigError(
+ "The default retention policy's 'min_lifetime' can not be greater"
+ " than its 'max_lifetime'"
+ )
+ else:
+ self.retention_default_min_lifetime = None
+ self.retention_default_max_lifetime = None
+
+ if self.retention_enabled:
+ logger.info(
+ "Message retention policies support enabled with the following default"
+ " policy: min_lifetime = %s ; max_lifetime = %s",
+ self.retention_default_min_lifetime,
+ self.retention_default_max_lifetime,
+ )
+
+ self.retention_allowed_lifetime_min = retention_config.get(
+ "allowed_lifetime_min"
+ )
+ if self.retention_allowed_lifetime_min is not None:
+ self.retention_allowed_lifetime_min = self.parse_duration(
+ self.retention_allowed_lifetime_min
+ )
+
+ self.retention_allowed_lifetime_max = retention_config.get(
+ "allowed_lifetime_max"
+ )
+ if self.retention_allowed_lifetime_max is not None:
+ self.retention_allowed_lifetime_max = self.parse_duration(
+ self.retention_allowed_lifetime_max
+ )
+
+ if (
+ self.retention_allowed_lifetime_min is not None
+ and self.retention_allowed_lifetime_max is not None
+ and self.retention_allowed_lifetime_min
+ > self.retention_allowed_lifetime_max
+ ):
+ raise ConfigError(
+ "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
+ " greater than 'allowed_lifetime_max'"
+ )
+
+ self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
+ for purge_job_config in retention_config.get("purge_jobs", []):
+ interval_config = purge_job_config.get("interval")
+
+ if interval_config is None:
+ raise ConfigError(
+ "A retention policy's purge jobs configuration must have the"
+ " 'interval' key set."
+ )
+
+ interval = self.parse_duration(interval_config)
+
+ shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
+
+ if shortest_max_lifetime is not None:
+ shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
+
+ longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
+
+ if longest_max_lifetime is not None:
+ longest_max_lifetime = self.parse_duration(longest_max_lifetime)
+
+ if (
+ shortest_max_lifetime is not None
+ and longest_max_lifetime is not None
+ and shortest_max_lifetime > longest_max_lifetime
+ ):
+ raise ConfigError(
+ "A retention policy's purge jobs configuration's"
+ " 'shortest_max_lifetime' value can not be greater than its"
+ " 'longest_max_lifetime' value."
+ )
+
+ self.retention_purge_jobs.append(
+ {
+ "interval": interval,
+ "shortest_max_lifetime": shortest_max_lifetime,
+ "longest_max_lifetime": longest_max_lifetime,
+ }
+ )
+
+ if not self.retention_purge_jobs:
+ self.retention_purge_jobs = [
+ {
+ "interval": self.parse_duration("1d"),
+ "shortest_max_lifetime": None,
+ "longest_max_lifetime": None,
+ }
+ ]
+
+ self.listeners = [] # type: List[dict]
for listener in config.get("listeners", []):
if not isinstance(listener.get("port", None), int):
raise ConfigError(
@@ -273,7 +423,10 @@ class ServerConfig(Config):
validator=attr.validators.instance_of(bool), default=False
)
complexity = attr.ib(
- validator=attr.validators.instance_of((int, float)), default=1.0
+ validator=attr.validators.instance_of(
+ (float, int) # type: ignore[arg-type] # noqa
+ ),
+ default=1.0,
)
complexity_error = attr.ib(
validator=attr.validators.instance_of(str),
@@ -281,7 +434,7 @@ class ServerConfig(Config):
)
self.limit_remote_rooms = LimitRemoteRoomsConfig(
- **config.get("limit_remote_rooms", {})
+ **(config.get("limit_remote_rooms") or {})
)
bind_port = config.get("bind_port")
@@ -334,14 +487,7 @@ class ServerConfig(Config):
metrics_port = config.get("metrics_port")
if metrics_port:
- logger.warn(
- (
- "The metrics_port configuration option is deprecated in Synapse 0.31 "
- "in favour of a listener. Please see "
- "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md"
- " on how to configure the new listener."
- )
- )
+ logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
{
@@ -355,14 +501,28 @@ class ServerConfig(Config):
_check_resource_config(self.listeners)
- # An experimental option to try and periodically clean up extremities
- # by sending dummy events.
self.cleanup_extremities_with_dummy_events = config.get(
- "cleanup_extremities_with_dummy_events", False
+ "cleanup_extremities_with_dummy_events", True
+ )
+
+ # The number of forward extremities in a room needed to send a dummy event.
+ self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
+
+ self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
+
+ # Inhibits the /requestToken endpoints from returning an error that might leak
+ # information about whether an e-mail address is in use or not on this
+ # homeserver, and instead return a 200 with a fake sid if this kind of error is
+ # met, without sending anything.
+ # This is a compromise between sending an email, which could be a spam vector,
+ # and letting the client know which email address is bound to an account and
+ # which one isn't.
+ self.request_token_inhibit_3pid_errors = config.get(
+ "request_token_inhibit_3pid_errors", False,
)
- def has_tls_listener(self):
- return any(l["tls"] for l in self.listeners)
+ def has_tls_listener(self) -> bool:
+ return any(listener["tls"] for listener in self.listeners)
def generate_config_section(
self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
@@ -458,10 +618,15 @@ class ServerConfig(Config):
#
pid_file: %(pid_file)s
- # The path to the web client which will be served at /_matrix/client/
- # if 'webclient' is configured under the 'listeners' configuration.
+ # The absolute URL to the web client which /_matrix/client will redirect
+ # to if 'webclient' is configured under the 'listeners' configuration.
+ #
+ # This option can be also set to the filesystem path to the web client
+ # which will be served at /_matrix/client/ if 'webclient' is configured
+ # under the 'listeners' configuration, however this is a security risk:
+ # https://github.com/matrix-org/synapse#security-note
#
- #web_client_location: "/path/to/web/root"
+ #web_client_location: https://riot.example.com/
# The public-facing base URL that clients use to access this HS
# (not including _matrix/...). This is the same URL a user would
@@ -489,15 +654,23 @@ class ServerConfig(Config):
#
#require_auth_for_profile_requests: true
- # If set to 'false', requires authentication to access the server's public rooms
- # directory through the client API. Defaults to 'true'.
+ # Uncomment to require a user to share a room with another user in order
+ # to retrieve their profile information. Only checked on Client-Server
+ # requests. Profile requests from other servers should be checked by the
+ # requesting server. Defaults to 'false'.
#
- #allow_public_rooms_without_auth: false
+ #limit_profile_requests_to_users_who_share_rooms: true
- # If set to 'false', forbids any other homeserver to fetch the server's public
- # rooms directory via federation. Defaults to 'true'.
+ # If set to 'true', removes the need for authentication to access the server's
+ # public rooms directory through the client API, meaning that anyone can
+ # query the room directory. Defaults to 'false'.
#
- #allow_public_rooms_over_federation: false
+ #allow_public_rooms_without_auth: true
+
+ # If set to 'true', allows any other homeserver to fetch the server's public
+ # rooms directory via federation. Defaults to 'false'.
+ #
+ #allow_public_rooms_over_federation: true
# The default room version for newly created rooms.
#
@@ -521,7 +694,7 @@ class ServerConfig(Config):
# Whether room invites to users on this server should be blocked
# (except those sent by local server admins). The default is False.
#
- #block_non_admin_invites: True
+ #block_non_admin_invites: true
# Room searching
#
@@ -545,6 +718,9 @@ class ServerConfig(Config):
# blacklist IP address CIDR ranges. If this option is not specified, or
# specified with an empty list, no ip range blacklist will be enforced.
#
+ # As of Synapse v1.4.0 this option also affects any outbound requests to identity
+ # servers provided by user input.
+ #
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
# listed here, since they correspond to unroutable addresses.)
#
@@ -650,6 +826,18 @@ class ServerConfig(Config):
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
+ # Forward extremities can build up in a room due to networking delays between
+ # homeservers. Once this happens in a large room, calculation of the state of
+ # that room can become quite expensive. To mitigate this, once the number of
+ # forward extremities reaches a given threshold, Synapse will send an
+ # org.matrix.dummy_event event, which will reduce the forward extremities
+ # in the room.
+ #
+ # This setting defines the threshold (i.e. number of forward extremities in the
+ # room) at which dummy events are sent. The default value is 10.
+ #
+ #dummy_events_threshold: 5
+
## Homeserver blocking ##
@@ -659,9 +847,8 @@ class ServerConfig(Config):
# Global blocking
#
- #hs_disabled: False
+ #hs_disabled: false
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
- #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
# Monthly Active User Blocking
#
@@ -681,15 +868,22 @@ class ServerConfig(Config):
# sign up in a short space of time never to return after their initial
# session.
#
- #limit_usage_by_mau: False
+ # 'mau_limit_alerting' is a means of limiting client side alerting
+ # should the mau limit be reached. This is useful for small instances
+ # where the admin has 5 mau seats (say) for 5 specific people and no
+ # interest increasing the mau limit further. Defaults to True, which
+ # means that alerting is enabled
+ #
+ #limit_usage_by_mau: false
#max_mau_value: 50
#mau_trial_days: 2
+ #mau_limit_alerting: false
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
# is true, this is implied to be true.
#
- #mau_stats_only: False
+ #mau_stats_only: false
# Sometimes the server admin will want to ensure certain accounts are
# never blocked by mau checking. These accounts are specified here.
@@ -701,22 +895,27 @@ class ServerConfig(Config):
# Used by phonehome stats to group together related servers.
#server_context: context
- # Resource-constrained Homeserver Settings
+ # Resource-constrained homeserver settings
#
- # If limit_remote_rooms.enabled is True, the room complexity will be
- # checked before a user joins a new remote room. If it is above
- # limit_remote_rooms.complexity, it will disallow joining or
- # instantly leave.
+ # When this is enabled, the room "complexity" will be checked before a user
+ # joins a new remote room. If it is above the complexity limit, the server will
+ # disallow joining, or will instantly leave.
#
- # limit_remote_rooms.complexity_error can be set to customise the text
- # displayed to the user when a room above the complexity threshold has
- # its join cancelled.
+ # Room complexity is an arbitrary measure based on factors such as the number of
+ # users in the room.
#
- # Uncomment the below lines to enable:
- #limit_remote_rooms:
- # enabled: True
- # complexity: 1.0
- # complexity_error: "This room is too complex."
+ limit_remote_rooms:
+ # Uncomment to enable room complexity checking.
+ #
+ #enabled: true
+
+ # the limit above which rooms cannot be joined. The default is 1.0.
+ #
+ #complexity: 0.5
+
+ # override the error which is returned when the room is too complex.
+ #
+ #complexity_error: "This room is too complex."
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
@@ -734,7 +933,86 @@ class ServerConfig(Config):
#
# Defaults to `7d`. Set to `null` to disable.
#
- redaction_retention_period: 7d
+ #redaction_retention_period: 28d
+
+ # How long to track users' last seen time and IPs in the database.
+ #
+ # Defaults to `28d`. Set to `null` to disable clearing out of old rows.
+ #
+ #user_ips_max_age: 14d
+
+ # Message retention policy at the server level.
+ #
+ # Room admins and mods can define a retention period for their rooms using the
+ # 'm.room.retention' state event, and server admins can cap this period by setting
+ # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+ #
+ # If this feature is enabled, Synapse will regularly look for and purge events
+ # which are older than the room's maximum retention period. Synapse will also
+ # filter events received over federation so that events that should have been
+ # purged are ignored and not stored again.
+ #
+ retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, a user won't be able to send a
+ # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+ # that's not within this range. This is especially useful in closed federations,
+ # in which server admins can make sure every federating server applies the same
+ # rules.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a more frequent basis than for the rest of the rooms
+ # (e.g. every 12h), but not want that purge to be performed by a job that's
+ # iterating over every room it knows, which could be heavy on the server.
+ #
+ #purge_jobs:
+ # - shortest_max_lifetime: 1d
+ # longest_max_lifetime: 3d
+ # interval: 12h
+ # - shortest_max_lifetime: 3d
+ # longest_max_lifetime: 1y
+ # interval: 1d
+
+ # Inhibits the /requestToken endpoints from returning an error that might leak
+ # information about whether an e-mail address is in use or not on this
+ # homeserver.
+ # Note that for some endpoints the error situation is the e-mail already being
+ # used, and for others the error is entering the e-mail being unused.
+ # If this option is enabled, instead of returning an error, these endpoints will
+ # act as if no error happened and return a fake session ID ('sid') to clients.
+ #
+ #request_token_inhibit_3pid_errors: true
"""
% locals()
)
@@ -755,20 +1033,20 @@ class ServerConfig(Config):
"--daemonize",
action="store_true",
default=None,
- help="Daemonize the home server",
+ help="Daemonize the homeserver",
)
server_group.add_argument(
"--print-pidfile",
action="store_true",
default=None,
- help="Print the path to the pidfile just" " before daemonizing",
+ help="Print the path to the pidfile just before daemonizing",
)
server_group.add_argument(
"--manhole",
metavar="PORT",
dest="manhole",
type=int,
- help="Turn on the twisted telnet manhole" " service on the given port.",
+ help="Turn on the twisted telnet manhole service on the given port.",
)
@@ -834,12 +1112,12 @@ KNOWN_RESOURCES = (
def _check_resource_config(listeners):
- resource_names = set(
+ resource_names = {
res_name
for listener in listeners
for res in listener.get("resources", [])
for res_name in res.get("names", [])
- )
+ }
for resource in resource_names:
if resource not in KNOWN_RESOURCES:
|