diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
index fca35b008c..65043d5b5b 100644
--- a/synapse/config/__main__.py
+++ b/synapse/config/__main__.py
@@ -16,6 +16,7 @@ from synapse.config._base import ConfigError
if __name__ == "__main__":
import sys
+
from synapse.config.homeserver import HomeServerConfig
action = sys.argv[1]
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 30d1050a91..ad5ab6ad62 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -18,12 +18,16 @@
import argparse
import errno
import os
+import time
+import urllib.parse
from collections import OrderedDict
+from hashlib import sha256
from textwrap import dedent
-from typing import Any, MutableMapping, Optional
-
-from six import integer_types
+from typing import Any, Callable, List, MutableMapping, Optional
+import attr
+import jinja2
+import pkg_resources
import yaml
@@ -84,7 +88,7 @@ def path_exists(file_path):
return False
-class Config(object):
+class Config:
"""
A configuration section, containing configuration keys and values.
@@ -100,6 +104,11 @@ class Config(object):
def __init__(self, root_config=None):
self.root = root_config
+ # Get the path to the default Synapse template directory
+ self.default_template_dir = pkg_resources.resource_filename(
+ "synapse", "res/templates"
+ )
+
def __getattr__(self, item: str) -> Any:
"""
Try and fetch a configuration option that does not exist on this class.
@@ -117,7 +126,7 @@ class Config(object):
@staticmethod
def parse_size(value):
- if isinstance(value, integer_types):
+ if isinstance(value, int):
return value
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
@@ -129,7 +138,7 @@ class Config(object):
@staticmethod
def parse_duration(value):
- if isinstance(value, integer_types):
+ if isinstance(value, int):
return value
second = 1000
minute = 60 * second
@@ -184,8 +193,97 @@ class Config(object):
with open(file_path) as file_stream:
return file_stream.read()
+ def read_templates(
+ self, filenames: List[str], custom_template_directory: Optional[str] = None,
+ ) -> List[jinja2.Template]:
+ """Load a list of template files from disk using the given variables.
+
+ This function will attempt to load the given templates from the default Synapse
+ template directory. If `custom_template_directory` is supplied, that directory
+ is tried first.
+
+ Files read are treated as Jinja templates. These templates are not rendered yet.
+
+ Args:
+ filenames: A list of template filenames to read.
+
+ custom_template_directory: A directory to try to look for the templates
+ before using the default Synapse template directory instead.
+
+ Raises:
+ ConfigError: if the file's path is incorrect or otherwise cannot be read.
+
+ Returns:
+ A list of jinja2 templates.
+ """
+ templates = []
+ search_directories = [self.default_template_dir]
+
+ # The loader will first look in the custom template directory (if specified) for the
+ # given filename. If it doesn't find it, it will use the default template dir instead
+ if custom_template_directory:
+ # Check that the given template directory exists
+ if not self.path_exists(custom_template_directory):
+ raise ConfigError(
+ "Configured template directory does not exist: %s"
+ % (custom_template_directory,)
+ )
+
+ # Search the custom template directory as well
+ search_directories.insert(0, custom_template_directory)
+
+ loader = jinja2.FileSystemLoader(search_directories)
+ env = jinja2.Environment(loader=loader, autoescape=True)
+
+ # Update the environment with our custom filters
+ env.filters.update(
+ {
+ "format_ts": _format_ts_filter,
+ "mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
+ }
+ )
+
+ for filename in filenames:
+ # Load the template
+ template = env.get_template(filename)
+ templates.append(template)
+
+ return templates
-class RootConfig(object):
+
+def _format_ts_filter(value: int, format: str):
+ return time.strftime(format, time.localtime(value / 1000))
+
+
+def _create_mxc_to_http_filter(public_baseurl: str) -> Callable:
+ """Create and return a jinja2 filter that converts MXC urls to HTTP
+
+ Args:
+ public_baseurl: The public, accessible base URL of the homeserver
+ """
+
+ def mxc_to_http_filter(value, width, height, resize_method="crop"):
+ if value[0:6] != "mxc://":
+ return ""
+
+ server_and_media_id = value[6:]
+ fragment = None
+ if "#" in server_and_media_id:
+ server_and_media_id, fragment = server_and_media_id.split("#", 1)
+ fragment = "#" + fragment
+
+ params = {"width": width, "height": height, "method": resize_method}
+ return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
+ public_baseurl,
+ server_and_media_id,
+ urllib.parse.urlencode(params),
+ fragment or "",
+ )
+
+ return mxc_to_http_filter
+
+
+class RootConfig:
"""
Holder of an application's configuration.
@@ -719,4 +817,36 @@ def find_config_files(search_paths):
return config_files
-__all__ = ["Config", "RootConfig"]
+@attr.s
+class ShardedWorkerHandlingConfig:
+ """Algorithm for choosing which instance is responsible for handling some
+ sharded work.
+
+ For example, the federation senders use this to determine which instances
+ handles sending stuff to a given destination (which is used as the `key`
+ below).
+ """
+
+ instances = attr.ib(type=List[str])
+
+ def should_handle(self, instance_name: str, key: str) -> bool:
+ """Whether this instance is responsible for handling the given key.
+ """
+
+ # If multiple instances are not defined we always return true.
+ if not self.instances or len(self.instances) == 1:
+ return True
+
+ # We shard by taking the hash, modulo it by the number of instances and
+ # then checking whether this instance matches the instance at that
+ # index.
+ #
+ # (Technically this introduces some bias and is not entirely uniform,
+ # but since the hash is so large the bias is ridiculously small).
+ dest_hash = sha256(key.encode("utf8")).digest()
+ dest_int = int.from_bytes(dest_hash, byteorder="little")
+ remainder = dest_int % (len(self.instances))
+ return self.instances[remainder] == instance_name
+
+
+__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 9e576060d4..eb911e8f9f 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -137,3 +137,8 @@ class Config:
def read_config_files(config_files: List[str]): ...
def find_config_files(search_paths: List[str]): ...
+
+class ShardedWorkerHandlingConfig:
+ instances: List[str]
+ def __init__(self, instances: List[str]) -> None: ...
+ def should_handle(self, instance_name: str, key: str) -> bool: ...
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
new file mode 100644
index 0000000000..cd31b1c3c9
--- /dev/null
+++ b/synapse/config/_util.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, List
+
+import jsonschema
+
+from synapse.config._base import ConfigError
+from synapse.types import JsonDict
+
+
+def validate_config(json_schema: JsonDict, config: Any, config_path: List[str]) -> None:
+ """Validates a config setting against a JsonSchema definition
+
+ This can be used to validate a section of the config file against a schema
+ definition. If the validation fails, a ConfigError is raised with a textual
+ description of the problem.
+
+ Args:
+ json_schema: the schema to validate against
+ config: the configuration value to be validated
+ config_path: the path within the config file. This will be used as a basis
+ for the error message.
+ """
+ try:
+ jsonschema.validate(config, json_schema)
+ except jsonschema.ValidationError as e:
+ # copy `config_path` before modifying it.
+ path = list(config_path)
+ for p in list(e.path):
+ if isinstance(p, int):
+ path.append("<item %i>" % p)
+ else:
+ path.append(str(p))
+
+ raise ConfigError(
+ "Unable to parse configuration: %s at %s" % (e.message, ".".join(path))
+ )
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index ca43e96bd1..8ed3e24258 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -14,9 +14,7 @@
import logging
from typing import Dict
-
-from six import string_types
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
import yaml
from netaddr import IPSet
@@ -98,17 +96,14 @@ def load_appservices(hostname, config_files):
def _load_appservice(hostname, as_info, config_filename):
required_string_fields = ["id", "as_token", "hs_token", "sender_localpart"]
for field in required_string_fields:
- if not isinstance(as_info.get(field), string_types):
+ if not isinstance(as_info.get(field), str):
raise KeyError(
"Required string field: '%s' (%s)" % (field, config_filename)
)
# 'url' must either be a string or explicitly null, not missing
# to avoid accidentally turning off push for ASes.
- if (
- not isinstance(as_info.get("url"), string_types)
- and as_info.get("url", "") is not None
- ):
+ if not isinstance(as_info.get("url"), str) and as_info.get("url", "") is not None:
raise KeyError(
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
)
@@ -138,7 +133,7 @@ def _load_appservice(hostname, as_info, config_filename):
ns,
regex_obj,
)
- if not isinstance(regex_obj.get("regex"), string_types):
+ if not isinstance(regex_obj.get("regex"), str):
raise ValueError("Missing/bad type 'regex' key in %s", regex_obj)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 0672538796..8e03f14005 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -15,6 +15,7 @@
import os
import re
+import threading
from typing import Callable, Dict
from ._base import Config, ConfigError
@@ -25,11 +26,14 @@ _CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
# Map from canonicalised cache name to cache.
_CACHES = {}
+# a lock on the contents of _CACHES
+_CACHES_LOCK = threading.Lock()
+
_DEFAULT_FACTOR_SIZE = 0.5
_DEFAULT_EVENT_CACHE_SIZE = "10K"
-class CacheProperties(object):
+class CacheProperties:
def __init__(self):
# The default factor size for all caches
self.default_factor_size = float(
@@ -66,7 +70,10 @@ def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
# Some caches have '*' in them which we strip out.
cache_name = _canonicalise_cache_name(cache_name)
- _CACHES[cache_name] = cache_resize_callback
+ # sometimes caches are initialised from background threads, so we need to make
+ # sure we don't conflict with another thread running a resize operation
+ with _CACHES_LOCK:
+ _CACHES[cache_name] = cache_resize_callback
# Ensure all loaded caches are sized appropriately
#
@@ -87,7 +94,8 @@ class CacheConfig(Config):
os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
)
properties.resize_all_caches_func = None
- _CACHES.clear()
+ with _CACHES_LOCK:
+ _CACHES.clear()
def generate_config_section(self, **kwargs):
return """\
@@ -193,6 +201,8 @@ class CacheConfig(Config):
For each cache, run the mapped callback function with either
a specific cache factor or the default, global one.
"""
- for cache_name, callback in _CACHES.items():
- new_factor = self.cache_factors.get(cache_name, self.global_factor)
- callback(new_factor)
+ # block other threads from modifying _CACHES while we iterate it.
+ with _CACHES_LOCK:
+ for cache_name, callback in _CACHES.items():
+ new_factor = self.cache_factors.get(cache_name, self.global_factor)
+ callback(new_factor)
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 1064c2697b..8a18a9ca2a 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -55,7 +55,7 @@ DEFAULT_CONFIG = """\
#database:
# name: psycopg2
# args:
-# user: synapse
+# user: synapse_user
# password: secretpassword
# database: synapse
# host: localhost
@@ -100,7 +100,10 @@ class DatabaseConnectionConfig:
self.name = name
self.config = db_config
- self.data_stores = data_stores
+
+ # The `data_stores` config is actually talking about `databases` (we
+ # changed the name).
+ self.databases = data_stores
class DatabaseConfig(Config):
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index ca61214454..7a796996c0 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -14,7 +14,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
from __future__ import print_function
# This file can't be called email.py because if it is, we cannot:
@@ -23,7 +22,7 @@ import os
from enum import Enum
from typing import Optional
-import pkg_resources
+import attr
from ._base import Config, ConfigError
@@ -33,6 +32,33 @@ Password reset emails are enabled on this homeserver due to a partial
%s
"""
+DEFAULT_SUBJECTS = {
+ "message_from_person_in_room": "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room...",
+ "message_from_person": "[%(app)s] You have a message on %(app)s from %(person)s...",
+ "messages_from_person": "[%(app)s] You have messages on %(app)s from %(person)s...",
+ "messages_in_room": "[%(app)s] You have messages on %(app)s in the %(room)s room...",
+ "messages_in_room_and_others": "[%(app)s] You have messages on %(app)s in the %(room)s room and others...",
+ "messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...",
+ "invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...",
+ "invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...",
+ "password_reset": "[%(server_name)s] Password reset",
+ "email_validation": "[%(server_name)s] Validate your email",
+}
+
+
+@attr.s
+class EmailSubjectConfig:
+ message_from_person_in_room = attr.ib(type=str)
+ message_from_person = attr.ib(type=str)
+ messages_from_person = attr.ib(type=str)
+ messages_in_room = attr.ib(type=str)
+ messages_in_room_and_others = attr.ib(type=str)
+ messages_from_person_and_others = attr.ib(type=str)
+ invite_from_person = attr.ib(type=str)
+ invite_from_person_to_room = attr.ib(type=str)
+ password_reset = attr.ib(type=str)
+ email_validation = attr.ib(type=str)
+
class EmailConfig(Config):
section = "email"
@@ -71,21 +97,18 @@ class EmailConfig(Config):
if parsed[1] == "":
raise RuntimeError("Invalid notif_from address")
+ # A user-configurable template directory
template_dir = email_config.get("template_dir")
- # we need an absolute path, because we change directory after starting (and
- # we don't yet know what auxilliary templates like mail.css we will need).
- # (Note that loading as package_resources with jinja.PackageLoader doesn't
- # work for the same reason.)
- if not template_dir:
- template_dir = pkg_resources.resource_filename("synapse", "res/templates")
-
- self.email_template_dir = os.path.abspath(template_dir)
+ if isinstance(template_dir, str):
+ # We need an absolute path, because we change directory after starting (and
+ # we don't yet know what auxiliary templates like mail.css we will need).
+ template_dir = os.path.abspath(template_dir)
+ elif template_dir is not None:
+ # If template_dir is something other than a str or None, warn the user
+ raise ConfigError("Config option email.template_dir must be type str")
self.email_enable_notifs = email_config.get("enable_notifs", False)
- account_validity_config = config.get("account_validity") or {}
- account_validity_renewal_enabled = account_validity_config.get("renew_at")
-
self.threepid_behaviour_email = (
# Have Synapse handle the email sending if account_threepid_delegates.email
# is not defined
@@ -139,19 +162,6 @@ class EmailConfig(Config):
email_config.get("validation_token_lifetime", "1h")
)
- if (
- self.email_enable_notifs
- or account_validity_renewal_enabled
- or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
- ):
- # make sure we can import the required deps
- import jinja2
- import bleach
-
- # prevent unused warnings
- jinja2
- bleach
-
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
missing = []
if not self.email_notif_from:
@@ -169,49 +179,49 @@ class EmailConfig(Config):
# These email templates have placeholders in them, and thus must be
# parsed using a templating engine during a request
- self.email_password_reset_template_html = email_config.get(
+ password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html"
)
- self.email_password_reset_template_text = email_config.get(
+ password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt"
)
- self.email_registration_template_html = email_config.get(
+ registration_template_html = email_config.get(
"registration_template_html", "registration.html"
)
- self.email_registration_template_text = email_config.get(
+ registration_template_text = email_config.get(
"registration_template_text", "registration.txt"
)
- self.email_add_threepid_template_html = email_config.get(
+ add_threepid_template_html = email_config.get(
"add_threepid_template_html", "add_threepid.html"
)
- self.email_add_threepid_template_text = email_config.get(
+ add_threepid_template_text = email_config.get(
"add_threepid_template_text", "add_threepid.txt"
)
- self.email_password_reset_template_failure_html = email_config.get(
+ password_reset_template_failure_html = email_config.get(
"password_reset_template_failure_html", "password_reset_failure.html"
)
- self.email_registration_template_failure_html = email_config.get(
+ registration_template_failure_html = email_config.get(
"registration_template_failure_html", "registration_failure.html"
)
- self.email_add_threepid_template_failure_html = email_config.get(
+ add_threepid_template_failure_html = email_config.get(
"add_threepid_template_failure_html", "add_threepid_failure.html"
)
# These templates do not support any placeholder variables, so we
# will read them from disk once during setup
- email_password_reset_template_success_html = email_config.get(
+ password_reset_template_success_html = email_config.get(
"password_reset_template_success_html", "password_reset_success.html"
)
- email_registration_template_success_html = email_config.get(
+ registration_template_success_html = email_config.get(
"registration_template_success_html", "registration_success.html"
)
- email_add_threepid_template_success_html = email_config.get(
+ add_threepid_template_success_html = email_config.get(
"add_threepid_template_success_html", "add_threepid_success.html"
)
- # Check templates exist
- for f in [
+ # Read all templates from disk
+ (
self.email_password_reset_template_html,
self.email_password_reset_template_text,
self.email_registration_template_html,
@@ -221,32 +231,36 @@ class EmailConfig(Config):
self.email_password_reset_template_failure_html,
self.email_registration_template_failure_html,
self.email_add_threepid_template_failure_html,
- email_password_reset_template_success_html,
- email_registration_template_success_html,
- email_add_threepid_template_success_html,
- ]:
- p = os.path.join(self.email_template_dir, f)
- if not os.path.isfile(p):
- raise ConfigError("Unable to find template file %s" % (p,))
-
- # Retrieve content of web templates
- filepath = os.path.join(
- self.email_template_dir, email_password_reset_template_success_html
- )
- self.email_password_reset_template_success_html = self.read_file(
- filepath, "email.password_reset_template_success_html"
+ password_reset_template_success_html_template,
+ registration_template_success_html_template,
+ add_threepid_template_success_html_template,
+ ) = self.read_templates(
+ [
+ password_reset_template_html,
+ password_reset_template_text,
+ registration_template_html,
+ registration_template_text,
+ add_threepid_template_html,
+ add_threepid_template_text,
+ password_reset_template_failure_html,
+ registration_template_failure_html,
+ add_threepid_template_failure_html,
+ password_reset_template_success_html,
+ registration_template_success_html,
+ add_threepid_template_success_html,
+ ],
+ template_dir,
)
- filepath = os.path.join(
- self.email_template_dir, email_registration_template_success_html
- )
- self.email_registration_template_success_html_content = self.read_file(
- filepath, "email.registration_template_success_html"
+
+ # Render templates that do not contain any placeholders
+ self.email_password_reset_template_success_html_content = (
+ password_reset_template_success_html_template.render()
)
- filepath = os.path.join(
- self.email_template_dir, email_add_threepid_template_success_html
+ self.email_registration_template_success_html_content = (
+ registration_template_success_html_template.render()
)
- self.email_add_threepid_template_success_html_content = self.read_file(
- filepath, "email.add_threepid_template_success_html"
+ self.email_add_threepid_template_success_html_content = (
+ add_threepid_template_success_html_template.render()
)
if self.email_enable_notifs:
@@ -263,17 +277,19 @@ class EmailConfig(Config):
% (", ".join(missing),)
)
- self.email_notif_template_html = email_config.get(
+ notif_template_html = email_config.get(
"notif_template_html", "notif_mail.html"
)
- self.email_notif_template_text = email_config.get(
+ notif_template_text = email_config.get(
"notif_template_text", "notif_mail.txt"
)
- for f in self.email_notif_template_text, self.email_notif_template_html:
- p = os.path.join(self.email_template_dir, f)
- if not os.path.isfile(p):
- raise ConfigError("Unable to find email template file %s" % (p,))
+ (
+ self.email_notif_template_html,
+ self.email_notif_template_text,
+ ) = self.read_templates(
+ [notif_template_html, notif_template_text], template_dir,
+ )
self.email_notif_for_new_users = email_config.get(
"notif_for_new_users", True
@@ -282,21 +298,32 @@ class EmailConfig(Config):
"client_base_url", email_config.get("riot_base_url", None)
)
- if account_validity_renewal_enabled:
- self.email_expiry_template_html = email_config.get(
+ if self.account_validity.renew_by_email_enabled:
+ expiry_template_html = email_config.get(
"expiry_template_html", "notice_expiry.html"
)
- self.email_expiry_template_text = email_config.get(
+ expiry_template_text = email_config.get(
"expiry_template_text", "notice_expiry.txt"
)
- for f in self.email_expiry_template_text, self.email_expiry_template_html:
- p = os.path.join(self.email_template_dir, f)
- if not os.path.isfile(p):
- raise ConfigError("Unable to find email template file %s" % (p,))
+ (
+ self.account_validity_template_html,
+ self.account_validity_template_text,
+ ) = self.read_templates(
+ [expiry_template_html, expiry_template_text], template_dir,
+ )
+
+ subjects_config = email_config.get("subjects", {})
+ subjects = {}
+
+ for key, default in DEFAULT_SUBJECTS.items():
+ subjects[key] = subjects_config.get(key, default)
+
+ self.email_subjects = EmailSubjectConfig(**subjects)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
- return """\
+ return (
+ """\
# Configuration for sending emails from Synapse.
#
email:
@@ -324,17 +351,17 @@ class EmailConfig(Config):
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
- # The placeholder '%(app)s' will be replaced by the application name,
+ # The placeholder '%%(app)s' will be replaced by the application name,
# which is normally 'app_name' (below), but may be overridden by the
# Matrix client application.
#
- # Note that the placeholder must be written '%(app)s', including the
+ # Note that the placeholder must be written '%%(app)s', including the
# trailing 's'.
#
- #notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
+ #notif_from: "Your Friendly %%(app)s homeserver <noreply@example.com>"
- # app_name defines the default value for '%(app)s' in notif_from. It
- # defaults to 'Matrix'.
+ # app_name defines the default value for '%%(app)s' in notif_from and email
+ # subjects. It defaults to 'Matrix'.
#
#app_name: my_branded_matrix_server
@@ -364,9 +391,7 @@ class EmailConfig(Config):
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
- # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
- # If you *do* uncomment it, you will need to make sure that all the templates
- # below are in the directory.
+ # Do not uncomment this setting unless you want to customise the templates.
#
# Synapse will look for the following templates in this directory:
#
@@ -402,7 +427,76 @@ class EmailConfig(Config):
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
+
+ # Subjects to use when sending emails from Synapse.
+ #
+ # The placeholder '%%(app)s' will be replaced with the value of the 'app_name'
+ # setting above, or by a value dictated by the Matrix client application.
+ #
+ # If a subject isn't overridden in this configuration file, the value used as
+ # its example will be used.
+ #
+ #subjects:
+
+ # Subjects for notification emails.
+ #
+ # On top of the '%%(app)s' placeholder, these can use the following
+ # placeholders:
+ #
+ # * '%%(person)s', which will be replaced by the display name of the user(s)
+ # that sent the message(s), e.g. "Alice and Bob".
+ # * '%%(room)s', which will be replaced by the name of the room the
+ # message(s) have been sent to, e.g. "My super room".
+ #
+ # See the example provided for each setting to see which placeholder can be
+ # used and how to use them.
+ #
+ # Subject to use to notify about one message from one or more user(s) in a
+ # room which has a name.
+ #message_from_person_in_room: "%(message_from_person_in_room)s"
+ #
+ # Subject to use to notify about one message from one or more user(s) in a
+ # room which doesn't have a name.
+ #message_from_person: "%(message_from_person)s"
+ #
+ # Subject to use to notify about multiple messages from one or more users in
+ # a room which doesn't have a name.
+ #messages_from_person: "%(messages_from_person)s"
+ #
+ # Subject to use to notify about multiple messages in a room which has a
+ # name.
+ #messages_in_room: "%(messages_in_room)s"
+ #
+ # Subject to use to notify about multiple messages in multiple rooms.
+ #messages_in_room_and_others: "%(messages_in_room_and_others)s"
+ #
+ # Subject to use to notify about multiple messages from multiple persons in
+ # multiple rooms. This is similar to the setting above except it's used when
+ # the room in which the notification was triggered has no name.
+ #messages_from_person_and_others: "%(messages_from_person_and_others)s"
+ #
+ # Subject to use to notify about an invite to a room which has a name.
+ #invite_from_person_to_room: "%(invite_from_person_to_room)s"
+ #
+ # Subject to use to notify about an invite to a room which doesn't have a
+ # name.
+ #invite_from_person: "%(invite_from_person)s"
+
+ # Subject for emails related to account administration.
+ #
+ # On top of the '%%(app)s' placeholder, these one can use the
+ # '%%(server_name)s' placeholder, which will be replaced by the value of the
+ # 'server_name' setting in your Synapse configuration.
+ #
+ # Subject to use when sending a password reset email.
+ #password_reset: "%(password_reset)s"
+ #
+ # Subject to use when sending a verification email to assert an address's
+ # ownership.
+ #email_validation: "%(email_validation)s"
"""
+ % DEFAULT_SUBJECTS
+ )
class ThreepidBehaviour(Enum):
diff --git a/synapse/config/federation.py b/synapse/config/federation.py
new file mode 100644
index 0000000000..2c77d8f85b
--- /dev/null
+++ b/synapse/config/federation.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from netaddr import IPSet
+
+from ._base import Config, ConfigError
+
+
+class FederationConfig(Config):
+ section = "federation"
+
+ def read_config(self, config, **kwargs):
+ # FIXME: federation_domain_whitelist needs sytests
+ self.federation_domain_whitelist = None # type: Optional[dict]
+ federation_domain_whitelist = config.get("federation_domain_whitelist", None)
+
+ if federation_domain_whitelist is not None:
+ # turn the whitelist into a hash for speed of lookup
+ self.federation_domain_whitelist = {}
+
+ for domain in federation_domain_whitelist:
+ self.federation_domain_whitelist[domain] = True
+
+ self.federation_ip_range_blacklist = config.get(
+ "federation_ip_range_blacklist", []
+ )
+
+ # Attempt to create an IPSet from the given ranges
+ try:
+ self.federation_ip_range_blacklist = IPSet(
+ self.federation_ip_range_blacklist
+ )
+
+ # Always blacklist 0.0.0.0, ::
+ self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
+ except Exception as e:
+ raise ConfigError(
+ "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
+ )
+
+ def generate_config_section(self, config_dir_path, server_name, **kwargs):
+ return """\
+ # Restrict federation to the following whitelist of domains.
+ # N.B. we recommend also firewalling your federation listener to limit
+ # inbound federation traffic as early as possible, rather than relying
+ # purely on this application-layer restriction. If not specified, the
+ # default is to whitelist everything.
+ #
+ #federation_domain_whitelist:
+ # - lon.example.com
+ # - nyc.example.com
+ # - syd.example.com
+
+ # Prevent federation requests from being sent to the following
+ # blacklist IP address CIDR ranges. If this option is not specified, or
+ # specified with an empty list, no ip range blacklist will be enforced.
+ #
+ # As of Synapse v1.4.0 this option also affects any outbound requests to identity
+ # servers provided by user input.
+ #
+ # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+ # listed here, since they correspond to unroutable addresses.)
+ #
+ federation_ip_range_blacklist:
+ - '127.0.0.0/8'
+ - '10.0.0.0/8'
+ - '172.16.0.0/12'
+ - '192.168.0.0/16'
+ - '100.64.0.0/10'
+ - '169.254.0.0/16'
+ - '::1/128'
+ - 'fe80::/64'
+ - 'fc00::/7'
+ """
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 2c7b3a699f..556e291495 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -23,6 +23,7 @@ from .cas import CasConfig
from .consent_config import ConsentConfig
from .database import DatabaseConfig
from .emailconfig import EmailConfig
+from .federation import FederationConfig
from .groups import GroupsConfig
from .jwt_config import JWTConfig
from .key import KeyConfig
@@ -36,6 +37,7 @@ from .ratelimiting import RatelimitConfig
from .redis import RedisConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
+from .room import RoomConfig
from .room_directory import RoomDirectoryConfig
from .saml2_config import SAML2Config
from .server import ServerConfig
@@ -56,6 +58,7 @@ class HomeServerConfig(RootConfig):
config_classes = [
ServerConfig,
TlsConfig,
+ FederationConfig,
CacheConfig,
DatabaseConfig,
LoggingConfig,
@@ -75,10 +78,10 @@ class HomeServerConfig(RootConfig):
JWTConfig,
PasswordConfig,
EmailConfig,
- WorkerConfig,
PasswordAuthProviderConfig,
PushConfig,
SpamCheckerConfig,
+ RoomConfig,
GroupsConfig,
UserDirectoryConfig,
ConsentConfig,
@@ -87,5 +90,7 @@ class HomeServerConfig(RootConfig):
RoomDirectoryConfig,
ThirdPartyRulesConfig,
TracerConfig,
+ WorkerConfig,
RedisConfig,
+ FederationConfig,
]
diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py
index a568726985..3252ad9e7f 100644
--- a/synapse/config/jwt_config.py
+++ b/synapse/config/jwt_config.py
@@ -32,6 +32,11 @@ class JWTConfig(Config):
self.jwt_secret = jwt_config["secret"]
self.jwt_algorithm = jwt_config["algorithm"]
+ # The issuer and audiences are optional, if provided, it is asserted
+ # that the claims exist on the JWT.
+ self.jwt_issuer = jwt_config.get("issuer")
+ self.jwt_audiences = jwt_config.get("audiences")
+
try:
import jwt
@@ -42,13 +47,63 @@ class JWTConfig(Config):
self.jwt_enabled = False
self.jwt_secret = None
self.jwt_algorithm = None
+ self.jwt_issuer = None
+ self.jwt_audiences = None
def generate_config_section(self, **kwargs):
return """\
- # The JWT needs to contain a globally unique "sub" (subject) claim.
+ # JSON web token integration. The following settings can be used to make
+ # Synapse JSON web tokens for authentication, instead of its internal
+ # password database.
+ #
+ # Each JSON Web Token needs to contain a "sub" (subject) claim, which is
+ # used as the localpart of the mxid.
+ #
+ # Additionally, the expiration time ("exp"), not before time ("nbf"),
+ # and issued at ("iat") claims are validated if present.
+ #
+ # Note that this is a non-standard login type and client support is
+ # expected to be non-existant.
+ #
+ # See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
#
#jwt_config:
- # enabled: true
- # secret: "a secret"
- # algorithm: "HS256"
+ # Uncomment the following to enable authorization using JSON web
+ # tokens. Defaults to false.
+ #
+ #enabled: true
+
+ # This is either the private shared secret or the public key used to
+ # decode the contents of the JSON web token.
+ #
+ # Required if 'enabled' is true.
+ #
+ #secret: "provided-by-your-issuer"
+
+ # The algorithm used to sign the JSON web token.
+ #
+ # Supported algorithms are listed at
+ # https://pyjwt.readthedocs.io/en/latest/algorithms.html
+ #
+ # Required if 'enabled' is true.
+ #
+ #algorithm: "provided-by-your-issuer"
+
+ # The issuer to validate the "iss" claim against.
+ #
+ # Optional, if provided the "iss" claim will be required and
+ # validated for all JSON web tokens.
+ #
+ #issuer: "provided-by-your-issuer"
+
+ # A list of audiences to validate the "aud" claim against.
+ #
+ # Optional, if provided the "aud" claim will be required and
+ # validated for all JSON web tokens.
+ #
+ # Note that if the "aud" claim is included in a JSON web token then
+ # validation will fail without configuring audiences.
+ #
+ #audiences:
+ # - "provided-by-your-issuer"
"""
diff --git a/synapse/config/key.py b/synapse/config/key.py
index b529ea5da0..de964dff13 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -82,7 +82,7 @@ logger = logging.getLogger(__name__)
@attr.s
-class TrustedKeyServer(object):
+class TrustedKeyServer:
# string: name of the server.
server_name = attr.ib()
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 49f6c32beb..c96e6ef62a 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -55,24 +55,33 @@ formatters:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
%(request)s - %(message)s'
-filters:
- context:
- (): synapse.logging.context.LoggingContextFilter
- request: ""
-
handlers:
file:
- class: logging.handlers.RotatingFileHandler
+ class: logging.handlers.TimedRotatingFileHandler
formatter: precise
filename: ${log_file}
- maxBytes: 104857600
- backupCount: 10
- filters: [context]
+ when: midnight
+ backupCount: 3 # Does not include the current log file.
encoding: utf8
+
+ # Default to buffering writes to log file for efficiency. This means that
+ # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
+ # logs will still be flushed immediately.
+ buffer:
+ class: logging.handlers.MemoryHandler
+ target: file
+ # The capacity is the number of log lines that are buffered before
+ # being written to disk. Increasing this will lead to better
+ # performance, at the expensive of it taking longer for log lines to
+ # be written to disk.
+ capacity: 10
+ flushLevel: 30 # Flush for WARNING logs as well
+
+ # A handler that writes logs to stderr. Unused by default, but can be used
+ # instead of "buffer" and "file" in the logger handlers.
console:
class: logging.StreamHandler
formatter: precise
- filters: [context]
loggers:
synapse.storage.SQL:
@@ -80,9 +89,24 @@ loggers:
# information such as access tokens.
level: INFO
+ twisted:
+ # We send the twisted logging directly to the file handler,
+ # to work around https://github.com/matrix-org/synapse/issues/3471
+ # when using "buffer" logger. Use "console" to log to stderr instead.
+ handlers: [file]
+ propagate: false
+
root:
level: INFO
- handlers: [file, console]
+
+ # Write logs to the `buffer` handler, which will buffer them together in memory,
+ # then write them to a file.
+ #
+ # Replace "buffer" with "console" to log to stderr instead. (Note that you'll
+ # also need to update the configuation for the `twisted` logger above, in
+ # this case.)
+ #
+ handlers: [buffer]
disable_existing_loggers: false
"""
@@ -168,11 +192,26 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
handler = logging.StreamHandler()
handler.setFormatter(formatter)
- handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
else:
logging.config.dictConfig(log_config)
+ # We add a log record factory that runs all messages through the
+ # LoggingContextFilter so that we get the context *at the time we log*
+ # rather than when we write to a handler. This can be done in config using
+ # filter options, but care must when using e.g. MemoryHandler to buffer
+ # writes.
+
+ log_filter = LoggingContextFilter(request="")
+ old_factory = logging.getLogRecordFactory()
+
+ def factory(*args, **kwargs):
+ record = old_factory(*args, **kwargs)
+ log_filter.filter(record)
+ return record
+
+ logging.setLogRecordFactory(factory)
+
# Route Twisted's native logging through to the standard library logging
# system.
observer = STDLibLogObserver()
@@ -214,7 +253,7 @@ def setup_logging(
Set up the logging subsystem.
Args:
- config (LoggingConfig | synapse.config.workers.WorkerConfig):
+ config (LoggingConfig | synapse.config.worker.WorkerConfig):
configuration data
use_worker_options (bool): True to use the 'worker_log_config' option
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 6aad0d37c0..dfd27e1523 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -22,7 +22,7 @@ from ._base import Config, ConfigError
@attr.s
-class MetricsFlags(object):
+class MetricsFlags:
known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool))
@classmethod
diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py
index e24dd637bc..e0939bce84 100644
--- a/synapse/config/oidc_config.py
+++ b/synapse/config/oidc_config.py
@@ -89,7 +89,7 @@ class OIDCConfig(Config):
# use an OpenID Connect Provider for authentication, instead of its internal
# password database.
#
- # See https://github.com/matrix-org/synapse/blob/master/openid.md.
+ # See https://github.com/matrix-org/synapse/blob/master/docs/openid.md.
#
oidc_config:
# Uncomment the following to enable authorization against an OpenID Connect
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 6f2b3a7faa..a1f3752c8a 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ._base import Config
+from ._base import Config, ShardedWorkerHandlingConfig
class PushConfig(Config):
@@ -24,6 +24,9 @@ class PushConfig(Config):
push_config = config.get("push", {})
self.push_include_content = push_config.get("include_content", True)
+ pusher_instances = config.get("pusher_instances") or []
+ self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
+
# There was a a 'redact_content' setting but mistakenly read from the
# 'email'section'. Check for the flag in the 'push' section, and log,
# but do not honour it to avoid nasty surprises when people upgrade.
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 2dd94bae2b..14b8836197 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -17,7 +17,7 @@ from typing import Dict
from ._base import Config
-class RateLimitConfig(object):
+class RateLimitConfig:
def __init__(
self,
config: Dict[str, float],
@@ -27,7 +27,7 @@ class RateLimitConfig(object):
self.burst_count = config.get("burst_count", defaults["burst_count"])
-class FederationRateLimitConfig(object):
+class FederationRateLimitConfig:
_items_and_default = {
"window_size": 1000,
"sleep_limit": 10,
@@ -93,6 +93,15 @@ class RatelimitConfig(Config):
if rc_admin_redaction:
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
+ self.rc_joins_local = RateLimitConfig(
+ config.get("rc_joins", {}).get("local", {}),
+ defaults={"per_second": 0.1, "burst_count": 3},
+ )
+ self.rc_joins_remote = RateLimitConfig(
+ config.get("rc_joins", {}).get("remote", {}),
+ defaults={"per_second": 0.01, "burst_count": 3},
+ )
+
def generate_config_section(self, **kwargs):
return """\
## Ratelimiting ##
@@ -118,6 +127,10 @@ class RatelimitConfig(Config):
# - one for ratelimiting redactions by room admins. If this is not explicitly
# set then it uses the same ratelimiting as per rc_message. This is useful
# to allow room admins to deal with abuse quickly.
+ # - two for ratelimiting number of rooms a user can join, "local" for when
+ # users are joining rooms the server is already in (this is cheap) vs
+ # "remote" for when users are trying to join rooms not on the server (which
+ # can be more expensive)
#
# The defaults are as shown below.
#
@@ -143,6 +156,14 @@ class RatelimitConfig(Config):
#rc_admin_redaction:
# per_second: 1
# burst_count: 50
+ #
+ #rc_joins:
+ # local:
+ # per_second: 0.1
+ # burst_count: 3
+ # remote:
+ # per_second: 0.01
+ # burst_count: 3
# Ratelimiting settings for incoming federation
diff --git a/synapse/config/redis.py b/synapse/config/redis.py
index d5d3ca1c9e..1373302335 100644
--- a/synapse/config/redis.py
+++ b/synapse/config/redis.py
@@ -21,7 +21,7 @@ class RedisConfig(Config):
section = "redis"
def read_config(self, config, **kwargs):
- redis_config = config.get("redis", {})
+ redis_config = config.get("redis") or {}
self.redis_enabled = redis_config.get("enabled", False)
if not self.redis_enabled:
@@ -32,3 +32,24 @@ class RedisConfig(Config):
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
self.redis_password = redis_config.get("password")
+
+ def generate_config_section(self, config_dir_path, server_name, **kwargs):
+ return """\
+ # Configuration for Redis when using workers. This *must* be enabled when
+ # using workers (unless using old style direct TCP configuration).
+ #
+ redis:
+ # Uncomment the below to enable Redis support.
+ #
+ #enabled: true
+
+ # Optional host and port to use to connect to redis. Defaults to
+ # localhost and 6379
+ #
+ #host: localhost
+ #port: 6379
+
+ # Optional password if configured on the Redis instance
+ #
+ #password: <secret_password>
+ """
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index c7487178dc..a670080fe2 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -18,8 +18,9 @@ from distutils.util import strtobool
import pkg_resources
+from synapse.api.constants import RoomCreationPreset
from synapse.config._base import Config, ConfigError
-from synapse.types import RoomAlias
+from synapse.types import RoomAlias, UserID
from synapse.util.stringutils import random_string_with_symbols
@@ -130,7 +131,50 @@ class RegistrationConfig(Config):
for room_alias in self.auto_join_rooms:
if not RoomAlias.is_valid(room_alias):
raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
+
+ # Options for creating auto-join rooms if they do not exist yet.
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
+ self.autocreate_auto_join_rooms_federated = config.get(
+ "autocreate_auto_join_rooms_federated", True
+ )
+ self.autocreate_auto_join_room_preset = (
+ config.get("autocreate_auto_join_room_preset")
+ or RoomCreationPreset.PUBLIC_CHAT
+ )
+ self.auto_join_room_requires_invite = self.autocreate_auto_join_room_preset in {
+ RoomCreationPreset.PRIVATE_CHAT,
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ }
+
+ # Pull the creater/inviter from the configuration, this gets used to
+ # send invites for invite-only rooms.
+ mxid_localpart = config.get("auto_join_mxid_localpart")
+ self.auto_join_user_id = None
+ if mxid_localpart:
+ # Convert the localpart to a full mxid.
+ self.auto_join_user_id = UserID(
+ mxid_localpart, self.server_name
+ ).to_string()
+
+ if self.autocreate_auto_join_rooms:
+ # Ensure the preset is a known value.
+ if self.autocreate_auto_join_room_preset not in {
+ RoomCreationPreset.PUBLIC_CHAT,
+ RoomCreationPreset.PRIVATE_CHAT,
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ }:
+ raise ConfigError("Invalid value for autocreate_auto_join_room_preset")
+ # If the preset requires invitations to be sent, ensure there's a
+ # configured user to send them from.
+ if self.auto_join_room_requires_invite:
+ if not mxid_localpart:
+ raise ConfigError(
+ "The configuration option `auto_join_mxid_localpart` is required if "
+ "`autocreate_auto_join_room_preset` is set to private_chat or trusted_private_chat, such that "
+ "Synapse knows who to send invitations from. Please "
+ "configure `auto_join_mxid_localpart`."
+ )
+
self.auto_join_rooms_for_guests = config.get("auto_join_rooms_for_guests", True)
self.enable_set_displayname = config.get("enable_set_displayname", True)
@@ -297,24 +341,6 @@ class RegistrationConfig(Config):
#
#default_identity_server: https://matrix.org
- # The list of identity servers trusted to verify third party
- # identifiers by this server.
- #
- # Also defines the ID server which will be called when an account is
- # deactivated (one will be picked arbitrarily).
- #
- # Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
- # server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
- # background migration script, informing itself that the identity server all of its
- # 3PIDs have been bound to is likely one of the below.
- #
- # As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
- # it is now solely used for the purposes of the background migration script, and can be
- # removed once it has run.
- #trusted_third_party_id_servers:
- # - matrix.org
- # - vector.im
-
# Handle threepid (email/phone etc) registration and password resets through a set of
# *trusted* identity servers. Note that this allows the configured identity server to
# reset passwords for accounts!
@@ -365,7 +391,11 @@ class RegistrationConfig(Config):
#enable_3pid_changes: false
# Users who register on this homeserver will automatically be joined
- # to these rooms
+ # to these rooms.
+ #
+ # By default, any room aliases included in this list will be created
+ # as a publicly joinable room when the first user registers for the
+ # homeserver. This behaviour can be customised with the settings below.
#
#auto_join_rooms:
# - "#example:example.com"
@@ -373,10 +403,62 @@ class RegistrationConfig(Config):
# Where auto_join_rooms are specified, setting this flag ensures that the
# the rooms exist by creating them when the first user on the
# homeserver registers.
+ #
+ # By default the auto-created rooms are publicly joinable from any federated
+ # server. Use the autocreate_auto_join_rooms_federated and
+ # autocreate_auto_join_room_preset settings below to customise this behaviour.
+ #
# Setting to false means that if the rooms are not manually created,
# users cannot be auto-joined since they do not exist.
#
- #autocreate_auto_join_rooms: true
+ # Defaults to true. Uncomment the following line to disable automatically
+ # creating auto-join rooms.
+ #
+ #autocreate_auto_join_rooms: false
+
+ # Whether the auto_join_rooms that are auto-created are available via
+ # federation. Only has an effect if autocreate_auto_join_rooms is true.
+ #
+ # Note that whether a room is federated cannot be modified after
+ # creation.
+ #
+ # Defaults to true: the room will be joinable from other servers.
+ # Uncomment the following to prevent users from other homeservers from
+ # joining these rooms.
+ #
+ #autocreate_auto_join_rooms_federated: false
+
+ # The room preset to use when auto-creating one of auto_join_rooms. Only has an
+ # effect if autocreate_auto_join_rooms is true.
+ #
+ # This can be one of "public_chat", "private_chat", or "trusted_private_chat".
+ # If a value of "private_chat" or "trusted_private_chat" is used then
+ # auto_join_mxid_localpart must also be configured.
+ #
+ # Defaults to "public_chat", meaning that the room is joinable by anyone, including
+ # federated servers if autocreate_auto_join_rooms_federated is true (the default).
+ # Uncomment the following to require an invitation to join these rooms.
+ #
+ #autocreate_auto_join_room_preset: private_chat
+
+ # The local part of the user id which is used to create auto_join_rooms if
+ # autocreate_auto_join_rooms is true. If this is not provided then the
+ # initial user account that registers will be used to create the rooms.
+ #
+ # The user id is also used to invite new users to any auto-join rooms which
+ # are set to invite-only.
+ #
+ # It *must* be configured if autocreate_auto_join_room_preset is set to
+ # "private_chat" or "trusted_private_chat".
+ #
+ # Note that this must be specified in order for new users to be correctly
+ # invited to any auto-join rooms which have been set to invite-only (either
+ # at the time of creation or subsequently).
+ #
+ # Note that, if the room already exists, this user must be joined and
+ # have the appropriate permissions to invite new members.
+ #
+ #auto_join_mxid_localpart: system
# When auto_join_rooms is specified, setting this flag to false prevents
# guest accounts from being automatically joined to the rooms.
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index b751d02d37..01009f3924 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -94,6 +94,12 @@ class ContentRepositoryConfig(Config):
else:
self.can_load_media_repo = True
+ # Whether this instance should be the one to run the background jobs to
+ # e.g clean up old URL previews.
+ self.media_instance_running_background_jobs = config.get(
+ "media_instance_running_background_jobs",
+ )
+
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
diff --git a/synapse/config/room.py b/synapse/config/room.py
new file mode 100644
index 0000000000..692d7a1936
--- /dev/null
+++ b/synapse/config/room.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.api.constants import RoomCreationPreset
+
+from ._base import Config, ConfigError
+
+logger = logging.Logger(__name__)
+
+
+class RoomDefaultEncryptionTypes:
+ """Possible values for the encryption_enabled_by_default_for_room_type config option"""
+
+ ALL = "all"
+ INVITE = "invite"
+ OFF = "off"
+
+
+class RoomConfig(Config):
+ section = "room"
+
+ def read_config(self, config, **kwargs):
+ # Whether new, locally-created rooms should have encryption enabled
+ encryption_for_room_type = config.get(
+ "encryption_enabled_by_default_for_room_type",
+ RoomDefaultEncryptionTypes.OFF,
+ )
+ if encryption_for_room_type == RoomDefaultEncryptionTypes.ALL:
+ self.encryption_enabled_by_default_for_room_presets = [
+ RoomCreationPreset.PRIVATE_CHAT,
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ RoomCreationPreset.PUBLIC_CHAT,
+ ]
+ elif encryption_for_room_type == RoomDefaultEncryptionTypes.INVITE:
+ self.encryption_enabled_by_default_for_room_presets = [
+ RoomCreationPreset.PRIVATE_CHAT,
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ ]
+ elif (
+ encryption_for_room_type == RoomDefaultEncryptionTypes.OFF
+ or encryption_for_room_type is False
+ ):
+ # PyYAML translates "off" into False if it's unquoted, so we also need to
+ # check for encryption_for_room_type being False.
+ self.encryption_enabled_by_default_for_room_presets = []
+ else:
+ raise ConfigError(
+ "Invalid value for encryption_enabled_by_default_for_room_type"
+ )
+
+ def generate_config_section(self, **kwargs):
+ return """\
+ ## Rooms ##
+
+ # Controls whether locally-created rooms should be end-to-end encrypted by
+ # default.
+ #
+ # Possible options are "all", "invite", and "off". They are defined as:
+ #
+ # * "all": any locally-created room
+ # * "invite": any room created with the "private_chat" or "trusted_private_chat"
+ # room creation presets
+ # * "off": this option will take no effect
+ #
+ # The default value is "off".
+ #
+ # Note that this option will only affect rooms created after it is set. It
+ # will also not affect rooms created by other servers.
+ #
+ #encryption_enabled_by_default_for_room_type: invite
+ """
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 7ac7699676..6de1f9d103 100644
--- a/synapse/config/room_directory.py
+++ b/synapse/config/room_directory.py
@@ -149,7 +149,7 @@ class RoomDirectoryConfig(Config):
return False
-class _RoomDirectoryRule(object):
+class _RoomDirectoryRule:
"""Helper class to test whether a room directory action is allowed, like
creating an alias or publishing a room.
"""
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index d0a19751e8..cc7401888b 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -15,14 +15,15 @@
# limitations under the License.
import logging
+from typing import Any, List
-import jinja2
-import pkg_resources
+import attr
from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module, load_python_module
from ._base import Config, ConfigError
+from ._util import validate_config
logger = logging.getLogger(__name__)
@@ -80,6 +81,11 @@ class SAML2Config(Config):
self.saml2_enabled = True
+ attribute_requirements = saml2_config.get("attribute_requirements") or []
+ self.attribute_requirements = _parse_attribute_requirements_def(
+ attribute_requirements
+ )
+
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
"grandfathered_mxid_source_attribute", "uid"
)
@@ -160,18 +166,12 @@ class SAML2Config(Config):
# session lifetime: in milliseconds
self.saml2_session_lifetime = self.parse_duration(
- saml2_config.get("saml_session_lifetime", "5m")
+ saml2_config.get("saml_session_lifetime", "15m")
)
- template_dir = saml2_config.get("template_dir")
- if not template_dir:
- template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
-
- loader = jinja2.FileSystemLoader(template_dir)
- # enable auto-escape here, to having to remember to escape manually in the
- # template
- env = jinja2.Environment(loader=loader, autoescape=True)
- self.saml2_error_html_template = env.get_template("saml_error.html")
+ self.saml2_error_html_template = self.read_templates(
+ ["saml_error.html"], saml2_config.get("template_dir")
+ )[0]
def _default_saml_config_dict(
self, required_attributes: set, optional_attributes: set
@@ -286,7 +286,7 @@ class SAML2Config(Config):
# The lifetime of a SAML session. This defines how long a user has to
# complete the authentication process, if allow_unsolicited is unset.
- # The default is 5 minutes.
+ # The default is 15 minutes.
#
#saml_session_lifetime: 5m
@@ -341,6 +341,17 @@ class SAML2Config(Config):
#
#grandfathered_mxid_source_attribute: upn
+ # It is possible to configure Synapse to only allow logins if SAML attributes
+ # match particular values. The requirements can be listed under
+ # `attribute_requirements` as shown below. All of the listed attributes must
+ # match for the login to be permitted.
+ #
+ #attribute_requirements:
+ # - attribute: userGroup
+ # value: "staff"
+ # - attribute: department
+ # value: "sales"
+
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
@@ -368,3 +379,34 @@ class SAML2Config(Config):
""" % {
"config_dir_path": config_dir_path
}
+
+
+@attr.s(frozen=True)
+class SamlAttributeRequirement:
+ """Object describing a single requirement for SAML attributes."""
+
+ attribute = attr.ib(type=str)
+ value = attr.ib(type=str)
+
+ JSON_SCHEMA = {
+ "type": "object",
+ "properties": {"attribute": {"type": "string"}, "value": {"type": "string"}},
+ "required": ["attribute", "value"],
+ }
+
+
+ATTRIBUTE_REQUIREMENTS_SCHEMA = {
+ "type": "array",
+ "items": SamlAttributeRequirement.JSON_SCHEMA,
+}
+
+
+def _parse_attribute_requirements_def(
+ attribute_requirements: Any,
+) -> List[SamlAttributeRequirement]:
+ validate_config(
+ ATTRIBUTE_REQUIREMENTS_SCHEMA,
+ attribute_requirements,
+ config_path=["saml2_config", "attribute_requirements"],
+ )
+ return [SamlAttributeRequirement(**x) for x in attribute_requirements]
diff --git a/synapse/config/server.py b/synapse/config/server.py
index f57eefc99c..e85c6a0840 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -19,15 +19,13 @@ import logging
import os.path
import re
from textwrap import indent
-from typing import Dict, List, Optional
+from typing import Any, Dict, Iterable, List, Optional
import attr
import yaml
-from netaddr import IPSet
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.endpoint import parse_and_validate_server_name
-from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@@ -57,6 +55,64 @@ on how to configure the new listener.
--------------------------------------------------------------------------------"""
+KNOWN_LISTENER_TYPES = {
+ "http",
+ "metrics",
+ "manhole",
+ "replication",
+}
+
+KNOWN_RESOURCES = {
+ "client",
+ "consent",
+ "federation",
+ "keys",
+ "media",
+ "metrics",
+ "openid",
+ "replication",
+ "static",
+ "webclient",
+}
+
+
+@attr.s(frozen=True)
+class HttpResourceConfig:
+ names = attr.ib(
+ type=List[str],
+ factory=list,
+ validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
+ )
+ compress = attr.ib(
+ type=bool,
+ default=False,
+ validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
+ )
+
+
+@attr.s(frozen=True)
+class HttpListenerConfig:
+ """Object describing the http-specific parts of the config of a listener"""
+
+ x_forwarded = attr.ib(type=bool, default=False)
+ resources = attr.ib(type=List[HttpResourceConfig], factory=list)
+ additional_resources = attr.ib(type=Dict[str, dict], factory=dict)
+ tag = attr.ib(type=str, default=None)
+
+
+@attr.s(frozen=True)
+class ListenerConfig:
+ """Object describing the configuration of a single listener."""
+
+ port = attr.ib(type=int, validator=attr.validators.instance_of(int))
+ bind_addresses = attr.ib(type=List[str])
+ type = attr.ib(type=str, validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
+ tls = attr.ib(type=bool, default=False)
+
+ # http_options is only populated if type=http
+ http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
+
+
class ServerConfig(Config):
section = "server"
@@ -78,11 +134,6 @@ class ServerConfig(Config):
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.public_baseurl = config.get("public_baseurl")
- # Whether to send federation traffic out in this process. This only
- # applies to some federation traffic, and so shouldn't be used to
- # "disable" federation
- self.send_federation = config.get("send_federation", True)
-
# Whether to enable user presence.
self.use_presence = config.get("use_presence", True)
@@ -155,7 +206,7 @@ class ServerConfig(Config):
# errors when attempting to search for messages.
self.enable_search = config.get("enable_search", True)
- self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
+ self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
# Whether we should block invites sent to users on this server
# (other than those sent by local server admins)
@@ -205,34 +256,6 @@ class ServerConfig(Config):
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
- # FIXME: federation_domain_whitelist needs sytests
- self.federation_domain_whitelist = None # type: Optional[dict]
- federation_domain_whitelist = config.get("federation_domain_whitelist", None)
-
- if federation_domain_whitelist is not None:
- # turn the whitelist into a hash for speed of lookup
- self.federation_domain_whitelist = {}
-
- for domain in federation_domain_whitelist:
- self.federation_domain_whitelist[domain] = True
-
- self.federation_ip_range_blacklist = config.get(
- "federation_ip_range_blacklist", []
- )
-
- # Attempt to create an IPSet from the given ranges
- try:
- self.federation_ip_range_blacklist = IPSet(
- self.federation_ip_range_blacklist
- )
-
- # Always blacklist 0.0.0.0, ::
- self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
- except Exception as e:
- raise ConfigError(
- "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
- )
-
if self.public_baseurl is not None:
if self.public_baseurl[-1] != "/":
self.public_baseurl += "/"
@@ -379,38 +402,21 @@ class ServerConfig(Config):
}
]
- self.listeners = [] # type: List[dict]
- for listener in config.get("listeners", []):
- if not isinstance(listener.get("port", None), int):
- raise ConfigError(
- "Listener configuration is lacking a valid 'port' option"
- )
+ self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
- if listener.setdefault("tls", False):
- # no_tls is not really supported any more, but let's grandfather it in
- # here.
- if config.get("no_tls", False):
+ # no_tls is not really supported any more, but let's grandfather it in
+ # here.
+ if config.get("no_tls", False):
+ l2 = []
+ for listener in self.listeners:
+ if listener.tls:
logger.info(
- "Ignoring TLS-enabled listener on port %i due to no_tls"
+ "Ignoring TLS-enabled listener on port %i due to no_tls",
+ listener.port,
)
- continue
-
- bind_address = listener.pop("bind_address", None)
- bind_addresses = listener.setdefault("bind_addresses", [])
-
- # if bind_address was specified, add it to the list of addresses
- if bind_address:
- bind_addresses.append(bind_address)
-
- # if we still have an empty list of addresses, use the default list
- if not bind_addresses:
- if listener["type"] == "metrics":
- # the metrics listener doesn't support IPv6
- bind_addresses.append("0.0.0.0")
else:
- bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
-
- self.listeners.append(listener)
+ l2.append(listener)
+ self.listeners = l2
if not self.web_client_location:
_warn_if_webclient_configured(self.listeners)
@@ -418,7 +424,7 @@ class ServerConfig(Config):
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
@attr.s
- class LimitRemoteRoomsConfig(object):
+ class LimitRemoteRoomsConfig:
enabled = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
@@ -432,6 +438,9 @@ class ServerConfig(Config):
validator=attr.validators.instance_of(str),
default=ROOM_COMPLEXITY_TOO_GREAT,
)
+ admins_can_join = attr.ib(
+ validator=attr.validators.instance_of(bool), default=False
+ )
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
@@ -446,43 +455,41 @@ class ServerConfig(Config):
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
+ http_options = HttpListenerConfig(
+ resources=[
+ HttpResourceConfig(names=["client"], compress=gzip_responses),
+ HttpResourceConfig(names=["federation"]),
+ ],
+ )
+
self.listeners.append(
- {
- "port": bind_port,
- "bind_addresses": [bind_host],
- "tls": True,
- "type": "http",
- "resources": [
- {"names": ["client"], "compress": gzip_responses},
- {"names": ["federation"], "compress": False},
- ],
- }
+ ListenerConfig(
+ port=bind_port,
+ bind_addresses=[bind_host],
+ tls=True,
+ type="http",
+ http_options=http_options,
+ )
)
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
- {
- "port": unsecure_port,
- "bind_addresses": [bind_host],
- "tls": False,
- "type": "http",
- "resources": [
- {"names": ["client"], "compress": gzip_responses},
- {"names": ["federation"], "compress": False},
- ],
- }
+ ListenerConfig(
+ port=unsecure_port,
+ bind_addresses=[bind_host],
+ tls=False,
+ type="http",
+ http_options=http_options,
+ )
)
manhole = config.get("manhole")
if manhole:
self.listeners.append(
- {
- "port": manhole,
- "bind_addresses": ["127.0.0.1"],
- "type": "manhole",
- "tls": False,
- }
+ ListenerConfig(
+ port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
+ )
)
metrics_port = config.get("metrics_port")
@@ -490,17 +497,16 @@ class ServerConfig(Config):
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
- {
- "port": metrics_port,
- "bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
- "tls": False,
- "type": "http",
- "resources": [{"names": ["metrics"], "compress": False}],
- }
+ ListenerConfig(
+ port=metrics_port,
+ bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
+ type="http",
+ http_options=HttpListenerConfig(
+ resources=[HttpResourceConfig(names=["metrics"])]
+ ),
+ )
)
- _check_resource_config(self.listeners)
-
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
@@ -521,8 +527,23 @@ class ServerConfig(Config):
"request_token_inhibit_3pid_errors", False,
)
+ # List of users trialing the new experimental default push rules. This setting is
+ # not included in the sample configuration file on purpose as it's a temporary
+ # hack, so that some users can trial the new defaults without impacting every
+ # user on the homeserver.
+ users_new_default_push_rules = (
+ config.get("users_new_default_push_rules") or []
+ ) # type: list
+ if not isinstance(users_new_default_push_rules, list):
+ raise ConfigError("'users_new_default_push_rules' must be a list")
+
+ # Turn the list into a set to improve lookup speed.
+ self.users_new_default_push_rules = set(
+ users_new_default_push_rules
+ ) # type: set
+
def has_tls_listener(self) -> bool:
- return any(listener["tls"] for listener in self.listeners)
+ return any(listener.tls for listener in self.listeners)
def generate_config_section(
self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
@@ -687,7 +708,9 @@ class ServerConfig(Config):
#gc_thresholds: [700, 10, 10]
# Set the limit on the returned events in the timeline in the get
- # and sync operations. The default value is -1, means no upper limit.
+ # and sync operations. The default value is 100. -1 means no upper limit.
+ #
+ # Uncomment the following to increase the limit to 5000.
#
#filter_timeline_limit: 5000
@@ -703,38 +726,6 @@ class ServerConfig(Config):
#
#enable_search: false
- # Restrict federation to the following whitelist of domains.
- # N.B. we recommend also firewalling your federation listener to limit
- # inbound federation traffic as early as possible, rather than relying
- # purely on this application-layer restriction. If not specified, the
- # default is to whitelist everything.
- #
- #federation_domain_whitelist:
- # - lon.example.com
- # - nyc.example.com
- # - syd.example.com
-
- # Prevent federation requests from being sent to the following
- # blacklist IP address CIDR ranges. If this option is not specified, or
- # specified with an empty list, no ip range blacklist will be enforced.
- #
- # As of Synapse v1.4.0 this option also affects any outbound requests to identity
- # servers provided by user input.
- #
- # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
- # listed here, since they correspond to unroutable addresses.)
- #
- federation_ip_range_blacklist:
- - '127.0.0.0/8'
- - '10.0.0.0/8'
- - '172.16.0.0/12'
- - '192.168.0.0/16'
- - '100.64.0.0/10'
- - '169.254.0.0/16'
- - '::1/128'
- - 'fe80::/64'
- - 'fc00::/7'
-
# List of ports that Synapse should listen on, their purpose and their
# configuration.
#
@@ -763,7 +754,7 @@ class ServerConfig(Config):
# names: a list of names of HTTP resources. See below for a list of
# valid resource names.
#
- # compress: set to true to enable HTTP comression for this resource.
+ # compress: set to true to enable HTTP compression for this resource.
#
# additional_resources: Only valid for an 'http' listener. A map of
# additional endpoints which should be loaded via dynamic modules.
@@ -856,7 +847,7 @@ class ServerConfig(Config):
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
- # anabled and a limit is reached the server returns a 'ResourceLimitError'
+ # enabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
@@ -917,6 +908,10 @@ class ServerConfig(Config):
#
#complexity_error: "This room is too complex."
+ # allow server admins to join complex rooms. Default is false.
+ #
+ #admins_can_join: true
+
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
#
@@ -966,11 +961,10 @@ class ServerConfig(Config):
# min_lifetime: 1d
# max_lifetime: 1y
- # Retention policy limits. If set, a user won't be able to send a
- # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
- # that's not within this range. This is especially useful in closed federations,
- # in which server admins can make sure every federating server applies the same
- # rules.
+ # Retention policy limits. If set, and the state of a room contains a
+ # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
+ # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
+ # to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
@@ -996,12 +990,19 @@ class ServerConfig(Config):
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
+ # If any purge job is configured, it is strongly recommended to have at least
+ # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
+ # set, or one job without 'shortest_max_lifetime' and one job without
+ # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
+ # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
+ # room's policy to these values is done after the policies are retrieved from
+ # Synapse's database (which is done using the range specified in a purge job's
+ # configuration).
+ #
#purge_jobs:
- # - shortest_max_lifetime: 1d
- # longest_max_lifetime: 3d
+ # - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
- # longest_max_lifetime: 1y
# interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak
@@ -1081,6 +1082,44 @@ def read_gc_thresholds(thresholds):
)
+def parse_listener_def(listener: Any) -> ListenerConfig:
+ """parse a listener config from the config file"""
+ listener_type = listener["type"]
+
+ port = listener.get("port")
+ if not isinstance(port, int):
+ raise ConfigError("Listener configuration is lacking a valid 'port' option")
+
+ tls = listener.get("tls", False)
+
+ bind_addresses = listener.get("bind_addresses", [])
+ bind_address = listener.get("bind_address")
+ # if bind_address was specified, add it to the list of addresses
+ if bind_address:
+ bind_addresses.append(bind_address)
+
+ # if we still have an empty list of addresses, use the default list
+ if not bind_addresses:
+ if listener_type == "metrics":
+ # the metrics listener doesn't support IPv6
+ bind_addresses.append("0.0.0.0")
+ else:
+ bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
+
+ http_config = None
+ if listener_type == "http":
+ http_config = HttpListenerConfig(
+ x_forwarded=listener.get("x_forwarded", False),
+ resources=[
+ HttpResourceConfig(**res) for res in listener.get("resources", [])
+ ],
+ additional_resources=listener.get("additional_resources", {}),
+ tag=listener.get("tag"),
+ )
+
+ return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
+
+
NO_MORE_WEB_CLIENT_WARNING = """
Synapse no longer includes a web client. To enable a web client, configure
web_client_location. To remove this warning, remove 'webclient' from the 'listeners'
@@ -1088,42 +1127,12 @@ configuration.
"""
-def _warn_if_webclient_configured(listeners):
+def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
for listener in listeners:
- for res in listener.get("resources", []):
- for name in res.get("names", []):
+ if not listener.http_options:
+ continue
+ for res in listener.http_options.resources:
+ for name in res.names:
if name == "webclient":
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
return
-
-
-KNOWN_RESOURCES = (
- "client",
- "consent",
- "federation",
- "keys",
- "media",
- "metrics",
- "openid",
- "replication",
- "static",
- "webclient",
-)
-
-
-def _check_resource_config(listeners):
- resource_names = {
- res_name
- for listener in listeners
- for res in listener.get("resources", [])
- for res_name in res.get("names", [])
- }
-
- for resource in resource_names:
- if resource not in KNOWN_RESOURCES:
- raise ConfigError("Unknown listener resource '%s'" % (resource,))
- if resource == "consent":
- try:
- check_requirements("resources.consent")
- except DependencyException as e:
- raise ConfigError(e.message)
diff --git a/synapse/config/sso.py b/synapse/config/sso.py
index 73b7296399..4427676167 100644
--- a/synapse/config/sso.py
+++ b/synapse/config/sso.py
@@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
from typing import Any, Dict
-import pkg_resources
-
from ._base import Config
@@ -29,22 +26,32 @@ class SSOConfig(Config):
def read_config(self, config, **kwargs):
sso_config = config.get("sso") or {} # type: Dict[str, Any]
- # Pick a template directory in order of:
- # * The sso-specific template_dir
- # * /path/to/synapse/install/res/templates
+ # The sso-specific template_dir
template_dir = sso_config.get("template_dir")
- if not template_dir:
- template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
- self.sso_template_dir = template_dir
- self.sso_account_deactivated_template = self.read_file(
- os.path.join(self.sso_template_dir, "sso_account_deactivated.html"),
- "sso_account_deactivated_template",
+ # Read templates from disk
+ (
+ self.sso_redirect_confirm_template,
+ self.sso_auth_confirm_template,
+ self.sso_error_template,
+ sso_account_deactivated_template,
+ sso_auth_success_template,
+ ) = self.read_templates(
+ [
+ "sso_redirect_confirm.html",
+ "sso_auth_confirm.html",
+ "sso_error.html",
+ "sso_account_deactivated.html",
+ "sso_auth_success.html",
+ ],
+ template_dir,
)
- self.sso_auth_success_template = self.read_file(
- os.path.join(self.sso_template_dir, "sso_auth_success.html"),
- "sso_auth_success_template",
+
+ # These templates have no placeholders, so render them here
+ self.sso_account_deactivated_template = (
+ sso_account_deactivated_template.render()
)
+ self.sso_auth_success_template = sso_auth_success_template.render()
self.sso_client_whitelist = sso_config.get("client_whitelist") or []
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index a65538562b..e368ea564d 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -20,8 +20,6 @@ from datetime import datetime
from hashlib import sha256
from typing import List
-import six
-
from unpaddedbase64 import encode_base64
from OpenSSL import SSL, crypto
@@ -59,7 +57,7 @@ class TlsConfig(Config):
logger.warning(ACME_SUPPORT_ENABLED_WARN)
# hyperlink complains on py2 if this is not a Unicode
- self.acme_url = six.text_type(
+ self.acme_url = str(
acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory")
)
self.acme_port = acme_config.get("port", 80)
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index ed06b91a54..c784a71508 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -15,7 +15,8 @@
import attr
-from ._base import Config, ConfigError
+from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
+from .server import ListenerConfig, parse_listener_def
@attr.s
@@ -33,9 +34,11 @@ class WriterLocations:
Attributes:
events: The instance that writes to the event and backfill streams.
+ events: The instance that writes to the typing stream.
"""
events = attr.ib(default="master", type=str)
+ typing = attr.ib(default="master", type=str)
class WorkerConfig(Config):
@@ -52,7 +55,9 @@ class WorkerConfig(Config):
if self.worker_app == "synapse.app.homeserver":
self.worker_app = None
- self.worker_listeners = config.get("worker_listeners", [])
+ self.worker_listeners = [
+ parse_listener_def(x) for x in config.get("worker_listeners", [])
+ ]
self.worker_daemonize = config.get("worker_daemonize")
self.worker_pid_file = config.get("worker_pid_file")
self.worker_log_config = config.get("worker_log_config")
@@ -75,23 +80,20 @@ class WorkerConfig(Config):
manhole = config.get("worker_manhole")
if manhole:
self.worker_listeners.append(
- {
- "port": manhole,
- "bind_addresses": ["127.0.0.1"],
- "type": "manhole",
- "tls": False,
- }
+ ListenerConfig(
+ port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
+ )
)
- if self.worker_listeners:
- for listener in self.worker_listeners:
- bind_address = listener.pop("bind_address", None)
- bind_addresses = listener.setdefault("bind_addresses", [])
+ # Whether to send federation traffic out in this process. This only
+ # applies to some federation traffic, and so shouldn't be used to
+ # "disable" federation
+ self.send_federation = config.get("send_federation", True)
- if bind_address:
- bind_addresses.append(bind_address)
- elif not bind_addresses:
- bind_addresses.append("")
+ federation_sender_instances = config.get("federation_sender_instances") or []
+ self.federation_shard_config = ShardedWorkerHandlingConfig(
+ federation_sender_instances
+ )
# A map from instance name to host/port of their HTTP replication endpoint.
instance_map = config.get("instance_map") or {}
@@ -103,16 +105,52 @@ class WorkerConfig(Config):
writers = config.get("stream_writers") or {}
self.writers = WriterLocations(**writers)
- # Check that the configured writer for events also appears in
+ # Check that the configured writer for events and typing also appears in
# `instance_map`.
- if (
- self.writers.events != "master"
- and self.writers.events not in self.instance_map
- ):
- raise ConfigError(
- "Instance %r is configured to write events but does not appear in `instance_map` config."
- % (self.writers.events,)
- )
+ for stream in ("events", "typing"):
+ instance = getattr(self.writers, stream)
+ if instance != "master" and instance not in self.instance_map:
+ raise ConfigError(
+ "Instance %r is configured to write %s but does not appear in `instance_map` config."
+ % (instance, stream)
+ )
+
+ def generate_config_section(self, config_dir_path, server_name, **kwargs):
+ return """\
+ ## Workers ##
+
+ # Disables sending of outbound federation transactions on the main process.
+ # Uncomment if using a federation sender worker.
+ #
+ #send_federation: false
+
+ # It is possible to run multiple federation sender workers, in which case the
+ # work is balanced across them.
+ #
+ # This configuration must be shared between all federation sender workers, and if
+ # changed all federation sender workers must be stopped at the same time and then
+ # started, to ensure that all instances are running with the same config (otherwise
+ # events may be dropped).
+ #
+ #federation_sender_instances:
+ # - federation_sender1
+
+ # When using workers this should be a map from `worker_name` to the
+ # HTTP replication listener of the worker, if configured.
+ #
+ #instance_map:
+ # worker1:
+ # host: localhost
+ # port: 8034
+
+ # Experimental: When using workers you can define which workers should
+ # handle event persistence and typing notifications. Any worker
+ # specified here must also be in the `instance_map`.
+ #
+ #stream_writers:
+ # events: worker1
+ # typing: worker1
+ """
def read_arguments(self, args):
# We support a bunch of command line arguments that override options in
|