From b25e387c0d0eef77e0ba20cb9f12b67272664bae Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 8 Apr 2019 15:47:39 +0100 Subject: add context to phonehome stats (#5020) add context to phonehome stats --- synapse/config/server.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'synapse/config/server.py') diff --git a/synapse/config/server.py b/synapse/config/server.py index 08e4e45482..c5e5679d52 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -37,6 +37,7 @@ class ServerConfig(Config): def read_config(self, config): self.server_name = config["server_name"] + self.server_context = config.get("server_context", None) try: parse_and_validate_server_name(self.server_name) @@ -484,6 +485,9 @@ class ServerConfig(Config): #mau_limit_reserved_threepids: # - medium: 'email' # address: 'reserved_user@example.com' + + # Used by phonehome stats to group together related servers. + #server_context: context """ % locals() def read_arguments(self, args): -- cgit 1.5.1 From 6824ddd93df1cfc347e4c8f423d54fab5bb732fb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 25 Apr 2019 06:22:49 -0700 Subject: Config option for verifying federation certificates (MSC 1711) (#4967) --- changelog.d/4967.feature | 1 + docs/MSC1711_certificates_FAQ.md | 1 - docs/sample_config.yaml | 34 ++++++++ synapse/config/server.py | 6 +- synapse/config/tls.py | 95 ++++++++++++++++++++-- synapse/crypto/context_factory.py | 33 ++++++-- synapse/http/federation/matrix_federation_agent.py | 2 +- .../federation/test_matrix_federation_agent.py | 3 +- 8 files changed, 158 insertions(+), 17 deletions(-) create mode 100644 changelog.d/4967.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/4967.feature b/changelog.d/4967.feature new file mode 100644 index 0000000000..7f9f81f849 --- /dev/null +++ b/changelog.d/4967.feature @@ -0,0 +1 @@ +Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 8eb22656db..ebfb20f5c8 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -177,7 +177,6 @@ You can do this with a `.well-known` file as follows: on `customer.example.net:8000` it correctly handles HTTP requests with Host header set to `customer.example.net:8000`. - ## FAQ ### Synapse 0.99.0 has just been released, what do I need to do right now? diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ab02e8f20e..a7f6bf31ac 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -260,6 +260,40 @@ listeners: # #tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" +# Whether to verify TLS certificates when sending federation traffic. +# +# This currently defaults to `false`, however this will change in +# Synapse 1.0 when valid federation certificates will be required. +# +#federation_verify_certificates: true + +# Skip federation certificate verification on the following whitelist +# of domains. +# +# This setting should only be used in very specific cases, such as +# federation over Tor hidden services and similar. For private networks +# of homeservers, you likely want to use a private CA instead. +# +# Only effective if federation_verify_certicates is `true`. +# +#federation_certificate_verification_whitelist: +# - lon.example.com +# - *.domain.com +# - *.onion + +# List of custom certificate authorities for federation traffic. +# +# This setting should only normally be used within a private network of +# homeservers. +# +# Note that this list will replace those that are provided by your +# operating environment. Certificates must be in PEM format. +# +#federation_custom_ca_list: +# - myCA1.pem +# - myCA2.pem +# - myCA3.pem + # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # diff --git a/synapse/config/server.py b/synapse/config/server.py index c5e5679d52..cdf1e4d286 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -114,11 +114,13 @@ class ServerConfig(Config): # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist = None federation_domain_whitelist = config.get( - "federation_domain_whitelist", None + "federation_domain_whitelist", None, ) - # turn the whitelist into a hash for speed of lookup + if federation_domain_whitelist is not None: + # turn the whitelist into a hash for speed of lookup self.federation_domain_whitelist = {} + for domain in federation_domain_whitelist: self.federation_domain_whitelist[domain] = True diff --git a/synapse/config/tls.py b/synapse/config/tls.py index f0014902da..72dd5926f9 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -24,8 +24,10 @@ import six from unpaddedbase64 import encode_base64 from OpenSSL import crypto +from twisted.internet._sslverify import Certificate, trustRootFromCertificates from synapse.config._base import Config, ConfigError +from synapse.util import glob_to_regex logger = logging.getLogger(__name__) @@ -70,6 +72,53 @@ class TlsConfig(Config): self.tls_fingerprints = list(self._original_tls_fingerprints) + # Whether to verify certificates on outbound federation traffic + self.federation_verify_certificates = config.get( + "federation_verify_certificates", False, + ) + + # Whitelist of domains to not verify certificates for + fed_whitelist_entries = config.get( + "federation_certificate_verification_whitelist", [], + ) + + # Support globs (*) in whitelist values + self.federation_certificate_verification_whitelist = [] + for entry in fed_whitelist_entries: + # Convert globs to regex + entry_regex = glob_to_regex(entry) + self.federation_certificate_verification_whitelist.append(entry_regex) + + # List of custom certificate authorities for federation traffic validation + custom_ca_list = config.get( + "federation_custom_ca_list", None, + ) + + # Read in and parse custom CA certificates + self.federation_ca_trust_root = None + if custom_ca_list is not None: + if len(custom_ca_list) == 0: + # A trustroot cannot be generated without any CA certificates. + # Raise an error if this option has been specified without any + # corresponding certificates. + raise ConfigError("federation_custom_ca_list specified without " + "any certificate files") + + certs = [] + for ca_file in custom_ca_list: + logger.debug("Reading custom CA certificate file: %s", ca_file) + content = self.read_file(ca_file) + + # Parse the CA certificates + try: + cert_base = Certificate.loadPEM(content) + certs.append(cert_base) + except Exception as e: + raise ConfigError("Error parsing custom CA certificate file %s: %s" + % (ca_file, e)) + + self.federation_ca_trust_root = trustRootFromCertificates(certs) + # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for @@ -99,15 +148,15 @@ class TlsConfig(Config): try: with open(self.tls_certificate_file, 'rb') as f: cert_pem = f.read() - except Exception: - logger.exception("Failed to read existing certificate off disk!") - raise + except Exception as e: + raise ConfigError("Failed to read existing certificate file %s: %s" + % (self.tls_certificate_file, e)) try: tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) - except Exception: - logger.exception("Failed to parse existing certificate off disk!") - raise + except Exception as e: + raise ConfigError("Failed to parse existing certificate file %s: %s" + % (self.tls_certificate_file, e)) if not allow_self_signed: if tls_certificate.get_subject() == tls_certificate.get_issuer(): @@ -192,6 +241,40 @@ class TlsConfig(Config): # #tls_private_key_path: "%(tls_private_key_path)s" + # Whether to verify TLS certificates when sending federation traffic. + # + # This currently defaults to `false`, however this will change in + # Synapse 1.0 when valid federation certificates will be required. + # + #federation_verify_certificates: true + + # Skip federation certificate verification on the following whitelist + # of domains. + # + # This setting should only be used in very specific cases, such as + # federation over Tor hidden services and similar. For private networks + # of homeservers, you likely want to use a private CA instead. + # + # Only effective if federation_verify_certicates is `true`. + # + #federation_certificate_verification_whitelist: + # - lon.example.com + # - *.domain.com + # - *.onion + + # List of custom certificate authorities for federation traffic. + # + # This setting should only normally be used within a private network of + # homeservers. + # + # Note that this list will replace those that are provided by your + # operating environment. Certificates must be in PEM format. + # + #federation_custom_ca_list: + # - myCA1.pem + # - myCA2.pem + # - myCA3.pem + # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 49cbc7098f..59ea087e66 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -18,10 +18,10 @@ import logging from zope.interface import implementer from OpenSSL import SSL, crypto -from twisted.internet._sslverify import _defaultCurveName +from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName from twisted.internet.abstract import isIPAddress, isIPv6Address from twisted.internet.interfaces import IOpenSSLClientConnectionCreator -from twisted.internet.ssl import CertificateOptions, ContextFactory +from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust from twisted.python.failure import Failure logger = logging.getLogger(__name__) @@ -90,7 +90,7 @@ def _tolerateErrors(wrapped): @implementer(IOpenSSLClientConnectionCreator) -class ClientTLSOptions(object): +class ClientTLSOptionsNoVerify(object): """ Client creator for TLS without certificate identity verification. This is a copy of twisted.internet._sslverify.ClientTLSOptions with the identity @@ -127,9 +127,30 @@ class ClientTLSOptionsFactory(object): to remote servers for federation.""" def __init__(self, config): - # We don't use config options yet - self._options = CertificateOptions(verify=False) + self._config = config + self._options_noverify = CertificateOptions() + + # Check if we're using a custom list of a CA certificates + trust_root = config.federation_ca_trust_root + if trust_root is None: + # Use CA root certs provided by OpenSSL + trust_root = platformTrust() + + self._options_verify = CertificateOptions(trustRoot=trust_root) def get_options(self, host): # Use _makeContext so that we get a fresh OpenSSL CTX each time. - return ClientTLSOptions(host, self._options._makeContext()) + + # Check if certificate verification has been enabled + should_verify = self._config.federation_verify_certificates + + # Check if we've disabled certificate verification for this host + if should_verify: + for regex in self._config.federation_certificate_verification_whitelist: + if regex.match(host): + should_verify = False + break + + if should_verify: + return ClientTLSOptions(host, self._options_verify._makeContext()) + return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext()) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 1334c630cc..b4cbe97b41 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -149,7 +149,7 @@ class MatrixFederationAgent(object): tls_options = None else: tls_options = self._tls_client_options_factory.get_options( - res.tls_server_name.decode("ascii") + res.tls_server_name.decode("ascii"), ) # make sure that the Host header is set correctly diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index dcf184d3cf..e9eb662c4c 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -39,6 +39,7 @@ from synapse.util.logcontext import LoggingContext from tests.http import ServerTLSContext from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase +from tests.utils import default_config logger = logging.getLogger(__name__) @@ -53,7 +54,7 @@ class MatrixFederationAgentTests(TestCase): self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(None), + tls_client_options_factory=ClientTLSOptionsFactory(default_config("test")), _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, -- cgit 1.5.1 From 8e9ca8353730949290460f9183c5ee48941d1f75 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 15:18:58 +0100 Subject: Move admin API to a new prefix --- docs/admin_api/account_validity.rst | 2 +- docs/admin_api/delete_group.md | 2 +- docs/admin_api/media_admin_api.md | 2 +- docs/admin_api/purge_history_api.rst | 4 +- docs/admin_api/purge_remote_media.rst | 2 +- docs/admin_api/register_api.rst | 4 +- docs/admin_api/user_admin_api.rst | 6 +-- docs/admin_api/version_api.rst | 2 +- synapse/app/homeserver.py | 2 + synapse/config/server.py | 4 +- synapse/rest/__init__.py | 12 +++++- synapse/rest/client/v1/admin.py | 70 +++++++++++++++++++++++++---------- 12 files changed, 76 insertions(+), 36 deletions(-) (limited to 'synapse/config/server.py') diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst index c26353752f..7559de4c57 100644 --- a/docs/admin_api/account_validity.rst +++ b/docs/admin_api/account_validity.rst @@ -13,7 +13,7 @@ This API extends the validity of an account by as much time as configured in the The API is:: - POST /_matrix/client/unstable/admin/account_validity/validity + POST /_synapse/admin/v1/account_validity/validity with the following body: diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index d703d108b0..1710488ea8 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -8,7 +8,7 @@ being deleted. The API is: ``` -POST /_matrix/client/r0/admin/delete_group/ +POST /_synapse/admin/v1/delete_group/ ``` including an `access_token` of a server admin. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index abdbc1ea86..5e9f8e5d84 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -4,7 +4,7 @@ This API gets a list of known media in a room. The API is: ``` -GET /_matrix/client/r0/admin/room//media +GET /_synapse/admin/v1/room//media ``` including an `access_token` of a server admin. diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index a5c3dc8149..f7be226fd9 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -10,7 +10,7 @@ paginate further back in the room from the point being purged from. The API is: -``POST /_matrix/client/r0/admin/purge_history/[/]`` +``POST /_synapse/admin/v1/purge_history/[/]`` including an ``access_token`` of a server admin. @@ -49,7 +49,7 @@ Purge status query It is possible to poll for updates on recent purges with a second API; -``GET /_matrix/client/r0/admin/purge_history_status/`` +``GET /_synapse/admin/v1/purge_history_status/`` (again, with a suitable ``access_token``). This API returns a JSON body like the following: diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst index 5deb02a3df..dacd5bc8fb 100644 --- a/docs/admin_api/purge_remote_media.rst +++ b/docs/admin_api/purge_remote_media.rst @@ -6,7 +6,7 @@ media. The API is:: - POST /_matrix/client/r0/admin/purge_media_cache?before_ts=&access_token= + POST /_synapse/admin/v1/purge_media_cache?before_ts=&access_token= {} diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst index 084e74ebf5..3a63109aa0 100644 --- a/docs/admin_api/register_api.rst +++ b/docs/admin_api/register_api.rst @@ -12,7 +12,7 @@ is not enabled. To fetch the nonce, you need to request one from the API:: - > GET /_matrix/client/r0/admin/register + > GET /_synapse/admin/v1/register < {"nonce": "thisisanonce"} @@ -22,7 +22,7 @@ body containing the nonce, username, password, whether they are an admin As an example:: - > POST /_matrix/client/r0/admin/register + > POST /_synapse/admin/v1/register > { "nonce": "thisisanonce", "username": "pepper_roni", diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index d17121a188..8aca4f158d 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -5,7 +5,7 @@ This API returns information about a specific user account. The api is:: - GET /_matrix/client/r0/admin/whois/ + GET /_synapse/admin/v1/whois/ including an ``access_token`` of a server admin. @@ -50,7 +50,7 @@ references to it). The api is:: - POST /_matrix/client/r0/admin/deactivate/ + POST /_synapse/admin/v1/deactivate/ with a body of: @@ -73,7 +73,7 @@ Changes the password of another user. The api is:: - POST /_matrix/client/r0/admin/reset_password/ + POST /_synapse/admin/v1/reset_password/ with a body of: diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.rst index 30a91b5f43..6d66543b96 100644 --- a/docs/admin_api/version_api.rst +++ b/docs/admin_api/version_api.rst @@ -8,7 +8,7 @@ contains Synapse version information). The api is:: - GET /_matrix/client/r0/admin/server_version + GET /_synapse/admin/v1/server_version including an ``access_token`` of a server admin. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 79be977ea6..3d7db61d14 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,6 +62,7 @@ from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource +from synapse.rest.client.v1.admin import AdminRestResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.well_known import WellKnownResource @@ -180,6 +181,7 @@ class SynapseHomeServer(HomeServer): "/_matrix/client/v2_alpha": client_resource, "/_matrix/client/versions": client_resource, "/.well-known/matrix/client": WellKnownResource(self), + "/_synapse/admin": AdminRestResource(self), }) if self.get_config().saml2_enabled: diff --git a/synapse/config/server.py b/synapse/config/server.py index cdf1e4d286..cc232422b9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -388,8 +388,8 @@ class ServerConfig(Config): # # Valid resource names are: # - # client: the client-server API (/_matrix/client). Also implies 'media' and - # 'static'. + # client: the client-server API (/_matrix/client), and the synapse admin + # API (/_synapse/admin). Also implies 'media' and 'static'. # # consent: user consent forms (/_matrix/consent). See # docs/consent_tracking.md. diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index a66885d349..6bc50f78e1 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -58,8 +58,14 @@ from synapse.rest.client.v2_alpha import ( class ClientRestResource(JsonResource): - """A resource for version 1 of the matrix client API.""" + """Matrix Client API REST resource. + This gets mounted at various points under /_matrix/client, including: + * /_matrix/client/r0 + * /_matrix/client/api/v1 + * /_matrix/client/unstable + * etc + """ def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) self.register_servlets(self, hs) @@ -82,7 +88,6 @@ class ClientRestResource(JsonResource): presence.register_servlets(hs, client_resource) directory.register_servlets(hs, client_resource) voip.register_servlets(hs, client_resource) - admin.register_servlets(hs, client_resource) pusher.register_servlets(hs, client_resource) push_rule.register_servlets(hs, client_resource) logout.register_servlets(hs, client_resource) @@ -111,3 +116,6 @@ class ClientRestResource(JsonResource): room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) + + # moving to /_synapse/admin + admin.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 8efa457d15..d4f83e4ae8 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -18,6 +18,7 @@ import hashlib import hmac import logging import platform +import re from six import text_type from six.moves import http_client @@ -27,6 +28,7 @@ from twisted.internet import defer import synapse from synapse.api.constants import Membership, UserTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.http.server import JsonResource from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -37,13 +39,33 @@ from synapse.http.servlet import ( from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string -from .base import client_path_patterns - logger = logging.getLogger(__name__) +def historical_admin_path_patterns(path_regex): + """Returns the list of patterns for an admin endpoint, including historical ones + + This is a backwards-compatibility hack. Previously, the Admin API was exposed at + various paths under /_matrix/client. This function returns a list of patterns + matching those paths (as well as the new one), so that existing scripts which rely + on the endpoints being available there are not broken. + + Note that this should only be used for existing endpoints: new ones should just + register for the /_synapse/admin path. + """ + return list( + re.compile(prefix + path_regex) + for prefix in ( + "^/_synapse/admin/v1", + "^/_matrix/client/api/v1/admin", + "^/_matrix/client/unstable/admin", + "^/_matrix/client/r0/admin" + ) + ) + + class UsersRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/users/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)") def __init__(self, hs): self.hs = hs @@ -72,7 +94,7 @@ class UsersRestServlet(RestServlet): class VersionServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/server_version") + PATTERNS = historical_admin_path_patterns("/server_version") def __init__(self, hs): self.auth = hs.get_auth() @@ -100,7 +122,7 @@ class UserRegisterServlet(RestServlet): nonces (dict[str, int]): The nonces that we will accept. A dict of nonce to the time it was generated, in int seconds. """ - PATTERNS = client_path_patterns("/admin/register") + PATTERNS = historical_admin_path_patterns("/register") NONCE_TIMEOUT = 60 def __init__(self, hs): @@ -231,7 +253,7 @@ class UserRegisterServlet(RestServlet): class WhoisRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)") def __init__(self, hs): self.hs = hs @@ -257,7 +279,7 @@ class WhoisRestServlet(RestServlet): class PurgeMediaCacheRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/purge_media_cache") + PATTERNS = historical_admin_path_patterns("/purge_media_cache") def __init__(self, hs): self.media_repository = hs.get_media_repository() @@ -280,8 +302,8 @@ class PurgeMediaCacheRestServlet(RestServlet): class PurgeHistoryRestServlet(RestServlet): - PATTERNS = client_path_patterns( - "/admin/purge_history/(?P[^/]*)(/(?P[^/]+))?" + PATTERNS = historical_admin_path_patterns( + "/purge_history/(?P[^/]*)(/(?P[^/]+))?" ) def __init__(self, hs): @@ -377,8 +399,8 @@ class PurgeHistoryRestServlet(RestServlet): class PurgeHistoryStatusRestServlet(RestServlet): - PATTERNS = client_path_patterns( - "/admin/purge_history_status/(?P[^/]+)" + PATTERNS = historical_admin_path_patterns( + "/purge_history_status/(?P[^/]+)" ) def __init__(self, hs): @@ -406,7 +428,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)") def __init__(self, hs): self._deactivate_account_handler = hs.get_deactivate_account_handler() @@ -449,7 +471,7 @@ class ShutdownRoomRestServlet(RestServlet): to a new room created by `new_room_user_id` and kicked users will be auto joined to the new room. """ - PATTERNS = client_path_patterns("/admin/shutdown_room/(?P[^/]+)") + PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)") DEFAULT_MESSAGE = ( "Sharing illegal content on this server is not permitted and rooms in" @@ -574,7 +596,7 @@ class QuarantineMediaInRoom(RestServlet): """Quarantines all media in a room so that no one can download it via this server. """ - PATTERNS = client_path_patterns("/admin/quarantine_media/(?P[^/]+)") + PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") def __init__(self, hs): self.store = hs.get_datastore() @@ -597,7 +619,7 @@ class QuarantineMediaInRoom(RestServlet): class ListMediaInRoom(RestServlet): """Lists all of the media in a given room. """ - PATTERNS = client_path_patterns("/admin/room/(?P[^/]+)/media") + PATTERNS = historical_admin_path_patterns("/room/(?P[^/]+)/media") def __init__(self, hs): self.store = hs.get_datastore() @@ -627,7 +649,7 @@ class ResetPasswordRestServlet(RestServlet): Returns: 200 OK with empty object if success otherwise an error. """ - PATTERNS = client_path_patterns("/admin/reset_password/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/reset_password/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -666,7 +688,7 @@ class GetUsersPaginatedRestServlet(RestServlet): Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. """ - PATTERNS = client_path_patterns("/admin/users_paginate/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/users_paginate/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -749,7 +771,7 @@ class SearchUsersRestServlet(RestServlet): Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. """ - PATTERNS = client_path_patterns("/admin/search_users/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -789,7 +811,7 @@ class SearchUsersRestServlet(RestServlet): class DeleteGroupAdminRestServlet(RestServlet): """Allows deleting of local groups """ - PATTERNS = client_path_patterns("/admin/delete_group/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)") def __init__(self, hs): self.group_server = hs.get_groups_server_handler() @@ -812,7 +834,7 @@ class DeleteGroupAdminRestServlet(RestServlet): class AccountValidityRenewServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/account_validity/validity$") + PATTERNS = historical_admin_path_patterns("/account_validity/validity$") def __init__(self, hs): """ @@ -847,6 +869,14 @@ class AccountValidityRenewServlet(RestServlet): defer.returnValue((200, res)) +class AdminRestResource(JsonResource): + """The REST resource which gets mounted at /_synapse/admin""" + + def __init__(self, hs): + JsonResource.__init__(self, hs, canonical_json=False) + register_servlets(hs, self) + + def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) -- cgit 1.5.1 From 84196cb2310ca55100f32cfecbc62810085418e5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 2 May 2019 09:21:29 +0100 Subject: Add some limitations to alias creation --- changelog.d/5124.bugfix | 1 + docs/sample_config.yaml | 5 +++++ synapse/config/server.py | 11 +++++++++++ synapse/handlers/directory.py | 22 +++++++++++++++++++++- synapse/handlers/room.py | 3 ++- 5 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5124.bugfix (limited to 'synapse/config/server.py') diff --git a/changelog.d/5124.bugfix b/changelog.d/5124.bugfix new file mode 100644 index 0000000000..46df1e9fd5 --- /dev/null +++ b/changelog.d/5124.bugfix @@ -0,0 +1 @@ +Add some missing limitations to room alias creation. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a7f6bf31ac..ec18516b5c 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -239,6 +239,11 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context +# Whether to require a user to be in the room to add an alias to it. +# Defaults to 'true'. +# +#require_membership_for_aliases: false + ## TLS ## diff --git a/synapse/config/server.py b/synapse/config/server.py index cdf1e4d286..b1c9d25c71 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -134,6 +134,12 @@ class ServerConfig(Config): # sending out any replication updates. self.replication_torture_level = config.get("replication_torture_level") + # Whether to require a user to be in the room to add an alias to it. + # Defaults to True. + self.require_membership_for_aliases = config.get( + "require_membership_for_aliases", True, + ) + self.listeners = [] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): @@ -490,6 +496,11 @@ class ServerConfig(Config): # Used by phonehome stats to group together related servers. #server_context: context + + # Whether to require a user to be in the room to add an alias to it. + # Defaults to 'true'. + # + #require_membership_for_aliases: false """ % locals() def read_arguments(self, args): diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 27bd06df5d..50c587aa61 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -36,6 +36,7 @@ logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): + MAX_ALIAS_LENGTH = 255 def __init__(self, hs): super(DirectoryHandler, self).__init__(hs) @@ -43,8 +44,10 @@ class DirectoryHandler(BaseHandler): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.store = hs.get_datastore() self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search + self.require_membership = hs.config.require_membership_for_aliases self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -83,7 +86,7 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def create_association(self, requester, room_alias, room_id, servers=None, - send_event=True): + send_event=True, check_membership=True): """Attempt to create a new alias Args: @@ -93,6 +96,8 @@ class DirectoryHandler(BaseHandler): servers (list[str]|None): List of servers that others servers should try and join via send_event (bool): Whether to send an updated m.room.aliases event + check_membership (bool): Whether to check if the user is in the room + before the alias can be set (if the server's config requires it). Returns: Deferred @@ -100,6 +105,13 @@ class DirectoryHandler(BaseHandler): user_id = requester.user.to_string() + if len(room_alias.to_string()) > self.MAX_ALIAS_LENGTH: + raise SynapseError( + 400, + "Can't create aliases longer than %s characters" % self.MAX_ALIAS_LENGTH, + Codes.INVALID_PARAM, + ) + service = requester.app_service if service: if not service.is_interested_in_alias(room_alias.to_string()): @@ -108,6 +120,14 @@ class DirectoryHandler(BaseHandler): " this kind of alias.", errcode=Codes.EXCLUSIVE ) else: + if self.require_membership and check_membership: + rooms_for_user = yield self.store.get_rooms_for_user(user_id) + if room_id not in rooms_for_user: + raise AuthError( + 403, + "You must be in the room to create an alias for it", + ) + if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): raise AuthError( 403, "This user is not permitted to create this alias", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 17628e2684..e37ae96899 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -402,7 +402,7 @@ class RoomCreationHandler(BaseHandler): yield directory_handler.create_association( requester, RoomAlias.from_string(alias), new_room_id, servers=(self.hs.hostname, ), - send_event=False, + send_event=False, check_membership=False, ) logger.info("Moved alias %s to new room", alias) except SynapseError as e: @@ -538,6 +538,7 @@ class RoomCreationHandler(BaseHandler): room_alias=room_alias, servers=[self.hs.hostname], send_event=False, + check_membership=False, ) preset_config = config.get( -- cgit 1.5.1 From c0e0740bef0db661abce352afaf6c958e276f11d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 8 May 2019 18:26:56 +0100 Subject: add options to require an access_token to GET /profile and /publicRooms on CS API (#5083) This commit adds two config options: * `restrict_public_rooms_to_local_users` Requires auth to fetch the public rooms directory through the CS API and disables fetching it through the federation API. * `require_auth_for_profile_requests` When set to `true`, requires that requests to `/profile` over the CS API are authenticated, and only returns the user's profile if the requester shares a room with the profile's owner, as per MSC1301. MSC1301 also specifies a behaviour for federation (only returning the profile if the server asking for it shares a room with the profile's owner), but that's currently really non-trivial to do in a not too expensive way. Next step is writing down a MSC that allows a HS to specify which user sent the profile query. In this implementation, Synapse won't send a profile query over federation if it doesn't believe it already shares a room with the profile's owner, though. Groups have been intentionally omitted from this commit. --- changelog.d/5083.feature | 1 + docs/sample_config.yaml | 14 ++++++ synapse/config/server.py | 27 ++++++++++ synapse/federation/transport/server.py | 10 ++++ synapse/handlers/profile.py | 43 ++++++++++++++++ synapse/rest/client/v1/profile.py | 40 ++++++++++----- synapse/rest/client/v1/room.py | 6 +++ tests/rest/client/v1/test_profile.py | 92 +++++++++++++++++++++++++++++++++- tests/rest/client/v1/test_rooms.py | 32 ++++++++++++ 9 files changed, 252 insertions(+), 13 deletions(-) create mode 100644 changelog.d/5083.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature new file mode 100644 index 0000000000..55d114b3fe --- /dev/null +++ b/changelog.d/5083.feature @@ -0,0 +1 @@ +Add an configuration option to require authentication on /publicRooms and /profile endpoints. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b6b9da6e41..bdfc34c6bd 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -69,6 +69,20 @@ pid_file: DATADIR/homeserver.pid # #use_presence: false +# Whether to require authentication to retrieve profile data (avatars, +# display names) of other users through the client API. Defaults to +# 'false'. Note that profile data is also available via the federation +# API, so this setting is of limited value if federation is enabled on +# the server. +# +#require_auth_for_profile_requests: true + +# If set to 'true', requires authentication to access the server's +# public rooms directory through the client API, and forbids any other +# homeserver to fetch it via federation. Defaults to 'false'. +# +#restrict_public_rooms_to_local_users: true + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/config/server.py b/synapse/config/server.py index 147a976485..8dce75c56a 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -72,6 +72,19 @@ class ServerConfig(Config): # master, potentially causing inconsistency. self.enable_media_repo = config.get("enable_media_repo", True) + # Whether to require authentication to retrieve profile data (avatars, + # display names) of other users through the client API. + self.require_auth_for_profile_requests = config.get( + "require_auth_for_profile_requests", False, + ) + + # If set to 'True', requires authentication to access the server's + # public rooms directory through the client API, and forbids any other + # homeserver to fetch it via federation. + self.restrict_public_rooms_to_local_users = config.get( + "restrict_public_rooms_to_local_users", False, + ) + # whether to enable search. If disabled, new entries will not be inserted # into the search tables and they will not be indexed. Users will receive # errors when attempting to search for messages. @@ -327,6 +340,20 @@ class ServerConfig(Config): # #use_presence: false + # Whether to require authentication to retrieve profile data (avatars, + # display names) of other users through the client API. Defaults to + # 'false'. Note that profile data is also available via the federation + # API, so this setting is of limited value if federation is enabled on + # the server. + # + #require_auth_for_profile_requests: true + + # If set to 'true', requires authentication to access the server's + # public rooms directory through the client API, and forbids any other + # homeserver to fetch it via federation. Defaults to 'false'. + # + #restrict_public_rooms_to_local_users: true + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 452599e1a1..9030eb18c5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -716,8 +716,17 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" + def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access): + super(PublicRoomList, self).__init__( + handler, authenticator, ratelimiter, server_name, + ) + self.deny_access = deny_access + @defer.inlineCallbacks def on_GET(self, origin, content, query): + if self.deny_access: + raise FederationDeniedError(origin) + limit = parse_integer_from_args(query, "limit", 0) since_token = parse_string_from_args(query, "since", None) include_all_networks = parse_boolean_from_args( @@ -1417,6 +1426,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N authenticator=authenticator, ratelimiter=ratelimiter, server_name=hs.hostname, + deny_access=hs.config.restrict_public_rooms_to_local_users, ).register(resource) if "group_server" in servlet_groups: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a65c98ff5c..91fc718ff8 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -53,6 +53,7 @@ class BaseProfileHandler(BaseHandler): @defer.inlineCallbacks def get_profile(self, user_id): target_user = UserID.from_string(user_id) + if self.hs.is_mine(target_user): try: displayname = yield self.store.get_profile_displayname( @@ -283,6 +284,48 @@ class BaseProfileHandler(BaseHandler): room_id, str(e) ) + @defer.inlineCallbacks + def check_profile_query_allowed(self, target_user, requester=None): + """Checks whether a profile query is allowed. If the + 'require_auth_for_profile_requests' config flag is set to True and a + 'requester' is provided, the query is only allowed if the two users + share a room. + + Args: + target_user (UserID): The owner of the queried profile. + requester (None|UserID): The user querying for the profile. + + Raises: + SynapseError(403): The two users share no room, or ne user couldn't + be found to be in any room the server is in, and therefore the query + is denied. + """ + # Implementation of MSC1301: don't allow looking up profiles if the + # requester isn't in the same room as the target. We expect requester to + # be None when this function is called outside of a profile query, e.g. + # when building a membership event. In this case, we must allow the + # lookup. + if not self.hs.config.require_auth_for_profile_requests or not requester: + return + + try: + requester_rooms = yield self.store.get_rooms_for_user( + requester.to_string() + ) + target_user_rooms = yield self.store.get_rooms_for_user( + target_user.to_string(), + ) + + # Check if the room lists have no elements in common. + if requester_rooms.isdisjoint(target_user_rooms): + raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) + except StoreError as e: + if e.code == 404: + # This likely means that one of the users doesn't exist, + # so we act as if we couldn't find the profile. + raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) + raise + class MasterProfileHandler(BaseProfileHandler): PROFILE_UPDATE_MS = 60 * 1000 diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index a23edd8fe5..eac1966c5e 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -31,11 +31,17 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - displayname = yield self.profile_handler.get_displayname( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + displayname = yield self.profile_handler.get_displayname(user) ret = {} if displayname is not None: @@ -74,11 +80,17 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - avatar_url = yield self.profile_handler.get_avatar_url( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + avatar_url = yield self.profile_handler.get_avatar_url(user) ret = {} if avatar_url is not None: @@ -116,14 +128,18 @@ class ProfileRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - displayname = yield self.profile_handler.get_displayname( - user, - ) - avatar_url = yield self.profile_handler.get_avatar_url( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + displayname = yield self.profile_handler.get_displayname(user) + avatar_url = yield self.profile_handler.get_avatar_url(user) ret = {} if displayname is not None: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 48da4d557f..fab04965cb 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -301,6 +301,12 @@ class PublicRoomListRestServlet(ClientV1RestServlet): try: yield self.auth.get_user_by_req(request, allow_guest=True) except AuthError as e: + # Option to allow servers to require auth when accessing + # /publicRooms via CS API. This is especially helpful in private + # federations. + if self.hs.config.restrict_public_rooms_to_local_users: + raise + # We allow people to not be authed if they're just looking at our # room list, but require auth when we proxy the request. # In both cases we call the auth function, as that has the side diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 1eab9c3bdb..5d13de11e6 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -20,7 +20,7 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import AuthError, SynapseError -from synapse.rest.client.v1 import profile +from synapse.rest.client.v1 import admin, login, profile, room from tests import unittest @@ -42,6 +42,7 @@ class ProfileTestCase(unittest.TestCase): "set_displayname", "get_avatar_url", "set_avatar_url", + "check_profile_query_allowed", ] ) @@ -155,3 +156,92 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") + + +class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + profile.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + config = self.default_config() + config.require_auth_for_profile_requests = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, hs): + # User owning the requested profile. + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + self.profile_url = "/profile/%s" % (self.owner) + + # User requesting the profile. + self.requester = self.register_user("requester", "pass") + self.requester_tok = self.login("requester", "pass") + + self.room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + def test_no_auth(self): + self.try_fetch_profile(401) + + def test_not_in_shared_room(self): + self.ensure_requester_left_room() + + self.try_fetch_profile(403, access_token=self.requester_tok) + + def test_in_shared_room(self): + self.ensure_requester_left_room() + + self.helper.join( + room=self.room_id, + user=self.requester, + tok=self.requester_tok, + ) + + self.try_fetch_profile(200, self.requester_tok) + + def try_fetch_profile(self, expected_code, access_token=None): + self.request_profile( + expected_code, + access_token=access_token + ) + + self.request_profile( + expected_code, + url_suffix="/displayname", + access_token=access_token, + ) + + self.request_profile( + expected_code, + url_suffix="/avatar_url", + access_token=access_token, + ) + + def request_profile(self, expected_code, url_suffix="", access_token=None): + request, channel = self.make_request( + "GET", + self.profile_url + url_suffix, + access_token=access_token, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def ensure_requester_left_room(self): + try: + self.helper.leave( + room=self.room_id, + user=self.requester, + tok=self.requester_tok, + ) + except AssertionError: + # We don't care whether the leave request didn't return a 200 (e.g. + # if the user isn't already in the room), because we only want to + # make sure the user isn't in the room. + pass diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 521ac80f9a..28fbf6ae52 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -904,3 +904,35 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): self.assertEqual( context["profile_info"][self.other_user_id]["displayname"], "otheruser" ) + + +class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + self.url = b"/_matrix/client/r0/publicRooms" + + config = self.default_config() + config.restrict_public_rooms_to_local_users = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def test_restricted_no_auth(self): + request, channel = self.make_request("GET", self.url) + self.render(request) + self.assertEqual(channel.code, 401, channel.result) + + def test_restricted_auth(self): + self.register_user("user", "pass") + tok = self.login("user", "pass") + + request, channel = self.make_request("GET", self.url, access_token=tok) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) -- cgit 1.5.1 From 5a4b328f522e9d08248dc03613fb0529f7529dbb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 13 May 2019 11:05:06 -0700 Subject: Add ability to blacklist ip ranges for federation traffic (#5043) --- changelog.d/5043.feature | 1 + docs/sample_config.yaml | 18 +++++++++ synapse/config/server.py | 38 ++++++++++++++++++ synapse/http/client.py | 6 +-- synapse/http/matrixfederationclient.py | 48 ++++++++++++++++++----- tests/http/test_fedclient.py | 71 ++++++++++++++++++++++++++++++++++ 6 files changed, 168 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5043.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/5043.feature b/changelog.d/5043.feature new file mode 100644 index 0000000000..0f1e0ee30e --- /dev/null +++ b/changelog.d/5043.feature @@ -0,0 +1 @@ +Add ability to blacklist IP ranges for the federation client. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bdfc34c6bd..c4e5c4cf39 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -115,6 +115,24 @@ pid_file: DATADIR/homeserver.pid # - nyc.example.com # - syd.example.com +# Prevent federation requests from being sent to the following +# blacklist IP address CIDR ranges. If this option is not specified, or +# specified with an empty list, no ip range blacklist will be enforced. +# +# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly +# listed here, since they correspond to unroutable addresses.) +# +federation_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '169.254.0.0/16' + - '::1/128' + - 'fe80::/64' + - 'fc00::/7' + # List of ports that Synapse should listen on, their purpose and their # configuration. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 8dce75c56a..7874cd9da7 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -17,6 +17,8 @@ import logging import os.path +from netaddr import IPSet + from synapse.http.endpoint import parse_and_validate_server_name from synapse.python_dependencies import DependencyException, check_requirements @@ -137,6 +139,24 @@ class ServerConfig(Config): for domain in federation_domain_whitelist: self.federation_domain_whitelist[domain] = True + self.federation_ip_range_blacklist = config.get( + "federation_ip_range_blacklist", [], + ) + + # Attempt to create an IPSet from the given ranges + try: + self.federation_ip_range_blacklist = IPSet( + self.federation_ip_range_blacklist + ) + + # Always blacklist 0.0.0.0, :: + self.federation_ip_range_blacklist.update(["0.0.0.0", "::"]) + except Exception as e: + raise ConfigError( + "Invalid range(s) provided in " + "federation_ip_range_blacklist: %s" % e + ) + if self.public_baseurl is not None: if self.public_baseurl[-1] != '/': self.public_baseurl += '/' @@ -386,6 +406,24 @@ class ServerConfig(Config): # - nyc.example.com # - syd.example.com + # Prevent federation requests from being sent to the following + # blacklist IP address CIDR ranges. If this option is not specified, or + # specified with an empty list, no ip range blacklist will be enforced. + # + # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly + # listed here, since they correspond to unroutable addresses.) + # + federation_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '169.254.0.0/16' + - '::1/128' + - 'fe80::/64' + - 'fc00::/7' + # List of ports that Synapse should listen on, their purpose and their # configuration. # diff --git a/synapse/http/client.py b/synapse/http/client.py index ddbfb72228..77fe68818b 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -165,7 +165,8 @@ class BlacklistingAgentWrapper(Agent): ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info( - "Blocking access to %s because of blacklist" % (ip_address,) + "Blocking access to %s due to blacklist" % + (ip_address,) ) e = SynapseError(403, "IP address blocked by IP blacklist entry") return defer.fail(Failure(e)) @@ -263,9 +264,6 @@ class SimpleHttpClient(object): uri (str): URI to query. data (bytes): Data to send in the request body, if applicable. headers (t.w.http_headers.Headers): Request headers. - - Raises: - SynapseError: If the IP is blacklisted. """ # A small wrapper around self.agent.request() so we can easily attach # counters to it diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index ff63d0b2a8..7eefc7b1fc 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -27,9 +27,11 @@ import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json +from zope.interface import implementer from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError +from twisted.internet.interfaces import IReactorPluggableNameResolver from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone from twisted.web.http_headers import Headers @@ -44,6 +46,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http import QuieterFileBodyProducer +from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.util.async_helpers import timeout_deferred from synapse.util.logcontext import make_deferred_yieldable @@ -172,19 +175,44 @@ class MatrixFederationHttpClient(object): self.hs = hs self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname - reactor = hs.get_reactor() + + real_reactor = hs.get_reactor() + + # We need to use a DNS resolver which filters out blacklisted IP + # addresses, to prevent DNS rebinding. + nameResolver = IPBlacklistingResolver( + real_reactor, None, hs.config.federation_ip_range_blacklist, + ) + + @implementer(IReactorPluggableNameResolver) + class Reactor(object): + def __getattr__(_self, attr): + if attr == "nameResolver": + return nameResolver + else: + return getattr(real_reactor, attr) + + self.reactor = Reactor() self.agent = MatrixFederationAgent( - hs.get_reactor(), + self.reactor, tls_client_options_factory, ) + + # Use a BlacklistingAgentWrapper to prevent circumventing the IP + # blacklist via IP literals in server names + self.agent = BlacklistingAgentWrapper( + self.agent, self.reactor, + ip_blacklist=hs.config.federation_ip_range_blacklist, + ) + self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string_bytes = hs.version_string.encode('ascii') self.default_timeout = 60 def schedule(x): - reactor.callLater(_EPSILON, x) + self.reactor.callLater(_EPSILON, x) self._cooperator = Cooperator(scheduler=schedule) @@ -370,7 +398,7 @@ class MatrixFederationHttpClient(object): request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, - reactor=self.hs.get_reactor(), + reactor=self.reactor, ) response = yield request_deferred @@ -397,7 +425,7 @@ class MatrixFederationHttpClient(object): d = timeout_deferred( d, timeout=_sec_timeout, - reactor=self.hs.get_reactor(), + reactor=self.reactor, ) try: @@ -586,7 +614,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -645,7 +673,7 @@ class MatrixFederationHttpClient(object): _sec_timeout = self.default_timeout body = yield _handle_json_response( - self.hs.get_reactor(), _sec_timeout, request, response, + self.reactor, _sec_timeout, request, response, ) defer.returnValue(body) @@ -704,7 +732,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -753,7 +781,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -801,7 +829,7 @@ class MatrixFederationHttpClient(object): try: d = _readBodyToFile(response, output_stream, max_size) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) + d.addTimeout(self.default_timeout, self.reactor) length = yield make_deferred_yieldable(d) except Exception as e: logger.warn( diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 279e456614..ee767f3a5a 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -15,6 +15,8 @@ from mock import Mock +from netaddr import IPSet + from twisted.internet import defer from twisted.internet.defer import TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError @@ -209,6 +211,75 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived) + def test_client_ip_range_blacklist(self): + """Ensure that Synapse does not try to connect to blacklisted IPs""" + + # Set up the ip_range blacklist + self.hs.config.federation_ip_range_blacklist = IPSet([ + "127.0.0.0/8", + "fe80::/64", + ]) + self.reactor.lookups["internal"] = "127.0.0.1" + self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" + self.reactor.lookups["fine"] = "10.20.30.40" + cl = MatrixFederationHttpClient(self.hs, None) + + # Try making a GET request to a blacklisted IPv4 address + # ------------------------------------------------------ + # Make the request + d = cl.get_json("internal:8008", "foo/bar", timeout=10000) + + # Nothing happened yet + self.assertNoResult(d) + + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, DNSLookupError) + + # Try making a POST request to a blacklisted IPv6 address + # ------------------------------------------------------- + # Make the request + d = cl.post_json("internalv6:8008", "foo/bar", timeout=10000) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + # Check that it was due to a blacklisted DNS lookup + f = self.failureResultOf(d, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, DNSLookupError) + + # Try making a GET request to a non-blacklisted IPv4 address + # ---------------------------------------------------------- + # Make the request + d = cl.post_json("fine:8008", "foo/bar", timeout=10000) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was able to resolve the address + clients = self.reactor.tcpClients + self.assertNotEqual(len(clients), 0) + + # Connection will still fail as this IP address does not resolve to anything + f = self.failureResultOf(d, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, ConnectingCancelledError) + def test_client_gets_headers(self): """ Once the client gets the headers, _request returns successfully. -- cgit 1.5.1 From a0603523d2e210cf59f887bd75e1a755720cb7a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 16:59:21 +0100 Subject: Add aggregations API --- synapse/config/server.py | 5 + synapse/events/utils.py | 34 +++- synapse/rest/client/v2_alpha/relations.py | 141 ++++++++++++++- synapse/storage/relations.py | 225 +++++++++++++++++++++++- tests/rest/client/v2_alpha/test_relations.py | 251 ++++++++++++++++++++++++++- 5 files changed, 643 insertions(+), 13 deletions(-) (limited to 'synapse/config/server.py') diff --git a/synapse/config/server.py b/synapse/config/server.py index 7874cd9da7..f403477b54 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -100,6 +100,11 @@ class ServerConfig(Config): "block_non_admin_invites", False, ) + # Whether to enable experimental MSC1849 (aka relations) support + self.experimental_msc1849_support_enabled = config.get( + "experimental_msc1849_support_enabled", False, + ) + # Options to control access by tracking MAU self.limit_usage_by_mau = config.get("limit_usage_by_mau", False) self.max_mau_value = 0 diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a5454556cc..16d0c64372 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -21,7 +21,7 @@ from frozendict import frozendict from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.util.async_helpers import yieldable_gather_results from . import EventBase @@ -324,8 +324,12 @@ class EventClientSerializer(object): """ def __init__(self, hs): - pass + self.store = hs.get_datastore() + self.experimental_msc1849_support_enabled = ( + hs.config.experimental_msc1849_support_enabled + ) + @defer.inlineCallbacks def serialize_event(self, event, time_now, **kwargs): """Serializes a single event. @@ -337,8 +341,32 @@ class EventClientSerializer(object): Returns: Deferred[dict]: The serialized event """ + # To handle the case of presence events and the like + if not isinstance(event, EventBase): + defer.returnValue(event) + + event_id = event.event_id event = serialize_event(event, time_now, **kwargs) - return defer.succeed(event) + + # If MSC1849 is enabled then we need to look if thre are any relations + # we need to bundle in with the event + if self.experimental_msc1849_support_enabled: + annotations = yield self.store.get_aggregation_groups_for_event( + event_id, + ) + references = yield self.store.get_relations_for_event( + event_id, RelationTypes.REFERENCES, direction="f", + ) + + if annotations.chunk: + r = event["unsigned"].setdefault("m.relations", {}) + r[RelationTypes.ANNOTATION] = annotations.to_dict() + + if references.chunk: + r = event["unsigned"].setdefault("m.relations", {}) + r[RelationTypes.REFERENCES] = references.to_dict() + + defer.returnValue(event) def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index c3ac73b8c7..f468409b68 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -23,7 +23,7 @@ import logging from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, @@ -141,12 +141,16 @@ class RelationPaginationServlet(RestServlet): ) limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") result = yield self.store.get_relations_for_event( event_id=parent_id, relation_type=relation_type, event_type=event_type, limit=limit, + from_token=from_token, + to_token=to_token, ) events = yield self.store.get_events_as_list( @@ -162,6 +166,141 @@ class RelationPaginationServlet(RestServlet): defer.returnValue((200, return_value)) +class RelationAggregationPaginationServlet(RestServlet): + """API to paginate aggregation groups of relations, e.g. paginate the + types and counts of the reactions on the events. + + Example request and response: + + GET /rooms/{room_id}/aggregations/{parent_id} + + { + chunk: [ + { + "type": "m.reaction", + "key": "👍", + "count": 3 + } + ] + } + """ + + PATTERNS = client_v2_patterns( + "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" + "(/(?P[^/]*)(/(?P[^/]*))?)?$", + releases=(), + ) + + def __init__(self, hs): + super(RelationAggregationPaginationServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + yield self.auth.check_in_room_or_world_readable( + room_id, requester.user.to_string() + ) + + if relation_type not in (RelationTypes.ANNOTATION, None): + raise SynapseError(400, "Relation type must be 'annotation'") + + limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") + + res = yield self.store.get_aggregation_groups_for_event( + event_id=parent_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) + + defer.returnValue((200, res.to_dict())) + + +class RelationAggregationGroupPaginationServlet(RestServlet): + """API to paginate within an aggregation group of relations, e.g. paginate + all the 👍 reactions on an event. + + Example request and response: + + GET /rooms/{room_id}/aggregations/{parent_id}/m.annotation/m.reaction/👍 + + { + chunk: [ + { + "type": "m.reaction", + "content": { + "m.relates_to": { + "rel_type": "m.annotation", + "key": "👍" + } + } + }, + ... + ] + } + """ + + PATTERNS = client_v2_patterns( + "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" + "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)$", + releases=(), + ) + + def __init__(self, hs): + super(RelationAggregationGroupPaginationServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() + + @defer.inlineCallbacks + def on_GET(self, request, room_id, parent_id, relation_type, event_type, key): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + yield self.auth.check_in_room_or_world_readable( + room_id, requester.user.to_string() + ) + + if relation_type != RelationTypes.ANNOTATION: + raise SynapseError(400, "Relation type must be 'annotation'") + + limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") + + result = yield self.store.get_relations_for_event( + event_id=parent_id, + relation_type=relation_type, + event_type=event_type, + aggregation_key=key, + limit=limit, + from_token=from_token, + to_token=to_token, + ) + + events = yield self.store.get_events_as_list( + [c["event_id"] for c in result.chunk] + ) + + now = self.clock.time_msec() + events = yield self._event_serializer.serialize_events(events, now) + + return_value = result.to_dict() + return_value["chunk"] = events + + defer.returnValue((200, return_value)) + + defer.returnValue((200, return_value)) + + def register_servlets(hs, http_server): RelationSendServlet(hs).register(http_server) RelationPaginationServlet(hs).register(http_server) + RelationAggregationPaginationServlet(hs).register(http_server) + RelationAggregationGroupPaginationServlet(hs).register(http_server) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 31ef6679af..db4b842c97 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -18,7 +18,9 @@ import logging import attr from synapse.api.constants import RelationTypes +from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore +from synapse.storage.stream import generate_pagination_where_clause logger = logging.getLogger(__name__) @@ -29,19 +31,94 @@ class PaginationChunk(object): Attributes: chunk (list): The rows returned by pagination + next_batch (Any|None): Token to fetch next set of results with, if + None then there are no more results. + prev_batch (Any|None): Token to fetch previous set of results with, if + None then there are no previous results. """ chunk = attr.ib() + next_batch = attr.ib(default=None) + prev_batch = attr.ib(default=None) def to_dict(self): d = {"chunk": self.chunk} + if self.next_batch: + d["next_batch"] = self.next_batch.to_string() + + if self.prev_batch: + d["prev_batch"] = self.prev_batch.to_string() + return d +@attr.s +class RelationPaginationToken(object): + """Pagination token for relation pagination API. + + As the results are order by topological ordering, we can use the + `topological_ordering` and `stream_ordering` fields of the events at the + boundaries of the chunk as pagination tokens. + + Attributes: + topological (int): The topological ordering of the boundary event + stream (int): The stream ordering of the boundary event. + """ + + topological = attr.ib() + stream = attr.ib() + + @staticmethod + def from_string(string): + try: + t, s = string.split("-") + return RelationPaginationToken(int(t), int(s)) + except ValueError: + raise SynapseError(400, "Invalid token") + + def to_string(self): + return "%d-%d" % (self.topological, self.stream) + + +@attr.s +class AggregationPaginationToken(object): + """Pagination token for relation aggregation pagination API. + + As the results are order by count and then MAX(stream_ordering) of the + aggregation groups, we can just use them as our pagination token. + + Attributes: + count (int): The count of relations in the boundar group. + stream (int): The MAX stream ordering in the boundary group. + """ + + count = attr.ib() + stream = attr.ib() + + @staticmethod + def from_string(string): + try: + c, s = string.split("-") + return AggregationPaginationToken(int(c), int(s)) + except ValueError: + raise SynapseError(400, "Invalid token") + + def to_string(self): + return "%d-%d" % (self.count, self.stream) + + class RelationsStore(SQLBaseStore): def get_relations_for_event( - self, event_id, relation_type=None, event_type=None, limit=5, direction="b" + self, + event_id, + relation_type=None, + event_type=None, + aggregation_key=None, + limit=5, + direction="b", + from_token=None, + to_token=None, ): """Get a list of relations for an event, ordered by topological ordering. @@ -51,16 +128,26 @@ class RelationsStore(SQLBaseStore): type, if given. event_type (str|None): Only fetch events with this event type, if given. + aggregation_key (str|None): Only fetch events with this aggregation + key, if given. limit (int): Only fetch the most recent `limit` events. direction (str): Whether to fetch the most recent first (`"b"`) or the oldest first (`"f"`). + from_token (RelationPaginationToken|None): Fetch rows from the given + token, or from the start if None. + to_token (RelationPaginationToken|None): Fetch rows up to the given + token, or up to the end if None. Returns: Deferred[PaginationChunk]: List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ - # TODO: Pagination tokens + if from_token: + from_token = RelationPaginationToken.from_string(from_token) + + if to_token: + to_token = RelationPaginationToken.from_string(to_token) where_clause = ["relates_to_id = ?"] where_args = [event_id] @@ -73,12 +160,29 @@ class RelationsStore(SQLBaseStore): where_clause.append("type = ?") where_args.append(event_type) - order = "ASC" + if aggregation_key: + where_clause.append("aggregation_key = ?") + where_args.append(aggregation_key) + + pagination_clause = generate_pagination_where_clause( + direction=direction, + column_names=("topological_ordering", "stream_ordering"), + from_token=attr.astuple(from_token) if from_token else None, + to_token=attr.astuple(to_token) if to_token else None, + engine=self.database_engine, + ) + + if pagination_clause: + where_clause.append(pagination_clause) + if direction == "b": order = "DESC" + else: + order = "ASC" sql = """ - SELECT event_id FROM event_relations + SELECT event_id, topological_ordering, stream_ordering + FROM event_relations INNER JOIN events USING (event_id) WHERE %s ORDER BY topological_ordering %s, stream_ordering %s @@ -92,16 +196,125 @@ class RelationsStore(SQLBaseStore): def _get_recent_references_for_event_txn(txn): txn.execute(sql, where_args + [limit + 1]) - events = [{"event_id": row[0]} for row in txn] + last_topo_id = None + last_stream_id = None + events = [] + for row in txn: + events.append({"event_id": row[0]}) + last_topo_id = row[1] + last_stream_id = row[2] + + next_batch = None + if len(events) > limit and last_topo_id and last_stream_id: + next_batch = RelationPaginationToken(last_topo_id, last_stream_id) return PaginationChunk( - chunk=list(events[:limit]), + chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token ) return self.runInteraction( "get_recent_references_for_event", _get_recent_references_for_event_txn ) + def get_aggregation_groups_for_event( + self, + event_id, + event_type=None, + limit=5, + direction="b", + from_token=None, + to_token=None, + ): + """Get a list of annotations on the event, grouped by event type and + aggregation key, sorted by count. + + This is used e.g. to get the what and how many reactions have happend + on an event. + + Args: + event_id (str): Fetch events that relate to this event ID. + event_type (str|None): Only fetch events with this event type, if + given. + limit (int): Only fetch the `limit` groups. + direction (str): Whether to fetch the highest count first (`"b"`) or + the lowest count first (`"f"`). + from_token (AggregationPaginationToken|None): Fetch rows from the + given token, or from the start if None. + to_token (AggregationPaginationToken|None): Fetch rows up to the + given token, or up to the end if None. + + + Returns: + Deferred[PaginationChunk]: List of groups of annotations that + match. Each row is a dict with `type`, `key` and `count` fields. + """ + + if from_token: + from_token = AggregationPaginationToken.from_string(from_token) + + if to_token: + to_token = AggregationPaginationToken.from_string(to_token) + + where_clause = ["relates_to_id = ?", "relation_type = ?"] + where_args = [event_id, RelationTypes.ANNOTATION] + + if event_type: + where_clause.append("type = ?") + where_args.append(event_type) + + having_clause = generate_pagination_where_clause( + direction=direction, + column_names=("COUNT(*)", "MAX(stream_ordering)"), + from_token=attr.astuple(from_token) if from_token else None, + to_token=attr.astuple(to_token) if to_token else None, + engine=self.database_engine, + ) + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + if having_clause: + having_clause = "HAVING " + having_clause + else: + having_clause = "" + + sql = """ + SELECT type, aggregation_key, COUNT(*), MAX(stream_ordering) + FROM event_relations + INNER JOIN events USING (event_id) + WHERE {where_clause} + GROUP BY relation_type, type, aggregation_key + {having_clause} + ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order} + LIMIT ? + """.format( + where_clause=" AND ".join(where_clause), + order=order, + having_clause=having_clause, + ) + + def _get_aggregation_groups_for_event_txn(txn): + txn.execute(sql, where_args + [limit + 1]) + + next_batch = None + events = [] + for row in txn: + events.append({"type": row[0], "key": row[1], "count": row[2]}) + next_batch = AggregationPaginationToken(row[2], row[3]) + + if len(events) <= limit: + next_batch = None + + return PaginationChunk( + chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token + ) + + return self.runInteraction( + "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn + ) + def _handle_event_relations(self, txn, event): """Handles inserting relation data during peristence of events diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index bcc1c1bb85..661dd45755 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools + import six from synapse.api.constants import EventTypes, RelationTypes @@ -30,6 +32,12 @@ class RelationsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] + def make_homeserver(self, reactor, clock): + # We need to enable msc1849 support for aggregations + config = self.default_config() + config["experimental_msc1849_support_enabled"] = True + return self.setup_test_homeserver(config=config) + def prepare(self, reactor, clock, hs): self.room = self.helper.create_room_as(self.user_id) res = self.helper.send(self.room, body="Hi!") @@ -40,7 +48,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): creates the right shape of event. """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key=u"👍") self.assertEquals(200, channel.code, channel.json_body) event_id = channel.json_body["event_id"] @@ -72,7 +80,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) self.assertEquals(400, channel.code, channel.json_body) - def test_paginate(self): + def test_basic_paginate_relations(self): """Tests that calling pagination API corectly the latest relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") @@ -102,6 +110,243 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel.json_body["chunk"][0], ) + # Make sure next_batch has something in it that looks like it could be a + # valid token. + self.assertIsInstance( + channel.json_body.get("next_batch"), six.string_types, channel.json_body + ) + + def test_repeated_paginate_relations(self): + """Test that if we paginate using a limit and tokens then we get the + expected events. + """ + + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") + self.assertEquals(200, channel.code, channel.json_body) + expected_event_ids.append(channel.json_body["event_id"]) + + prev_token = None + found_event_ids = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1%s" + % (self.room, self.parent_id, from_token), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + def test_aggregation_pagination_groups(self): + """Test that we can paginate annotation groups correctly. + """ + + sent_groups = {u"👍": 10, u"a": 7, u"b": 5, u"c": 3, u"d": 2, u"e": 1} + for key in itertools.chain.from_iterable( + itertools.repeat(key, num) for key, num in sent_groups.items() + ): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", key=key + ) + self.assertEquals(200, channel.code, channel.json_body) + + prev_token = None + found_groups = {} + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1%s" + % (self.room, self.parent_id, from_token), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + + for groups in channel.json_body["chunk"]: + # We only expect reactions + self.assertEqual(groups["type"], "m.reaction", channel.json_body) + + # We should only see each key once + self.assertNotIn(groups["key"], found_groups, channel.json_body) + + found_groups[groups["key"]] = groups["count"] + + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + self.assertEquals(sent_groups, found_groups) + + def test_aggregation_pagination_within_group(self): + """Test that we can paginate within an annotation group. + """ + + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", key=u"👍" + ) + self.assertEquals(200, channel.code, channel.json_body) + expected_event_ids.append(channel.json_body["event_id"]) + + # Also send a different type of reaction so that we test we don't see it + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self.assertEquals(200, channel.code, channel.json_body) + + prev_token = None + found_event_ids = [] + encoded_key = six.moves.urllib.parse.quote_plus(u"👍".encode("utf-8")) + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s" + "/aggregations/%s/%s/m.reaction/%s?limit=1%s" + % ( + self.room, + self.parent_id, + RelationTypes.ANNOTATION, + encoded_key, + from_token, + ), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + def test_aggregation(self): + """Test that annotations get correctly aggregated. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + self.assertEquals(200, channel.code, channel.json_body) + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s" + % (self.room, self.parent_id), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEquals( + channel.json_body, + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + ) + + def test_aggregation_must_be_annotation(self): + """Test that aggregations must be annotations. + """ + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/m.replaces/%s?limit=1" + % (self.room, self.parent_id), + ) + self.render(request) + self.assertEquals(400, channel.code, channel.json_body) + + def test_aggregation_get_event(self): + """Test that annotations and references get correctly bundled when + getting the parent event. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + self.assertEquals(200, channel.code, channel.json_body) + reply_1 = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + self.assertEquals(200, channel.code, channel.json_body) + reply_2 = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.maxDiff = None + + self.assertEquals( + channel.json_body["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + RelationTypes.REFERENCES: { + "chunk": [{"event_id": reply_1}, {"event_id": reply_2}] + }, + }, + ) + def _send_relation(self, relation_type, event_type, key=None): """Helper function to send a relation pointing at `self.parent_id` @@ -116,7 +361,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): """ query = "" if key: - query = "?key=" + six.moves.urllib.parse.quote_plus(key) + query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8")) request, channel = self.make_request( "POST", -- cgit 1.5.1 From cd32375846397ed15f27a4f6602bf20999d2b8b3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 14:26:41 +0100 Subject: Add option to disable per-room profiles --- synapse/config/server.py | 11 +++++++++++ synapse/handlers/room_member.py | 9 +++++++++ 2 files changed, 20 insertions(+) (limited to 'synapse/config/server.py') diff --git a/synapse/config/server.py b/synapse/config/server.py index 7874cd9da7..1b8968608e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -173,6 +174,10 @@ class ServerConfig(Config): "require_membership_for_aliases", True, ) + # Whether to allow per-room membership profiles through the send of membership + # events with profile information that differ from the target's global profile. + self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + self.listeners = [] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): @@ -566,6 +571,12 @@ class ServerConfig(Config): # Defaults to 'true'. # #require_membership_for_aliases: false + + # Whether to allow per-room membership profiles through the send of membership + # events with profile information that differ from the target's global profile. + # Defaults to 'true'. + # + #allow_per_room_profiles: false """ % locals() def read_arguments(self, args): diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 3e86b9c690..ffc588d454 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -73,6 +74,7 @@ class RoomMemberHandler(object): self.spam_checker = hs.get_spam_checker() self._server_notices_mxid = self.config.server_notices_mxid self._enable_lookup = hs.config.enable_3pid_lookup + self.allow_per_room_profiles = self.config.allow_per_room_profiles # This is only used to get at ratelimit function, and # maybe_kick_guest_users. It's fine there are multiple of these as @@ -357,6 +359,13 @@ class RoomMemberHandler(object): # later on. content = dict(content) + if not self.allow_per_room_profiles: + # Strip profile data, knowing that new profile data will be added to the + # event's content in event_creation_handler.create_event() using the target's + # global profile. + content.pop("displayname", None) + content.pop("avatar_url", None) + effective_membership_state = action if action in ["kick", "unban"]: effective_membership_state = "leave" -- cgit 1.5.1 From 6368150a748e9303f34948873af360d8a62347b6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 23 May 2019 15:00:20 +0100 Subject: Add config option for setting homeserver's default room version (#5223) Replaces DEFAULT_ROOM_VERSION constant with a method that first checks the config, then returns a hardcoded value if the option is not present. That hardcoded value is now located in the server.py config file. --- changelog.d/5223.feature | 1 + docs/sample_config.yaml | 9 +++++++ synapse/api/room_versions.py | 4 ---- synapse/config/server.py | 32 +++++++++++++++++++++++++ synapse/handlers/room.py | 9 +++++-- synapse/rest/client/v2_alpha/capabilities.py | 5 ++-- tests/rest/client/v2_alpha/test_capabilities.py | 7 ++++-- 7 files changed, 57 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5223.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/5223.feature b/changelog.d/5223.feature new file mode 100644 index 0000000000..cfdf1ad41b --- /dev/null +++ b/changelog.d/5223.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 559fbcdd01..2a5a514d61 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -83,6 +83,15 @@ pid_file: DATADIR/homeserver.pid # #restrict_public_rooms_to_local_users: true +# The default room version for newly created rooms. +# +# Known room versions are listed here: +# https://matrix.org/docs/spec/#complete-list-of-room-versions +# +# For example, for room version 1, default_room_version should be set +# to "1". +#default_room_version: "1" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index b2895355a8..4085bd10b9 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -85,10 +85,6 @@ class RoomVersions(object): ) -# the version we will give rooms which are created on this server -DEFAULT_ROOM_VERSION = RoomVersions.V1 - - KNOWN_ROOM_VERSIONS = { v.identifier: v for v in ( RoomVersions.V1, diff --git a/synapse/config/server.py b/synapse/config/server.py index f34aa42afa..e9120d4d75 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -20,6 +20,7 @@ import os.path from netaddr import IPSet +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.endpoint import parse_and_validate_server_name from synapse.python_dependencies import DependencyException, check_requirements @@ -35,6 +36,8 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] +DEFAULT_ROOM_VERSION = "1" + class ServerConfig(Config): @@ -88,6 +91,22 @@ class ServerConfig(Config): "restrict_public_rooms_to_local_users", False, ) + default_room_version = config.get( + "default_room_version", DEFAULT_ROOM_VERSION, + ) + + # Ensure room version is a str + default_room_version = str(default_room_version) + + if default_room_version not in KNOWN_ROOM_VERSIONS: + raise ConfigError( + "Unknown default_room_version: %s, known room versions: %s" % + (default_room_version, list(KNOWN_ROOM_VERSIONS.keys())) + ) + + # Get the actual room version object rather than just the identifier + self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version] + # whether to enable search. If disabled, new entries will not be inserted # into the search tables and they will not be indexed. Users will receive # errors when attempting to search for messages. @@ -310,6 +329,10 @@ class ServerConfig(Config): unsecure_port = 8008 pid_file = os.path.join(data_dir_path, "homeserver.pid") + + # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the + # default config string + default_room_version = DEFAULT_ROOM_VERSION return """\ ## Server ## @@ -384,6 +407,15 @@ class ServerConfig(Config): # #restrict_public_rooms_to_local_users: true + # The default room version for newly created rooms. + # + # Known room versions are listed here: + # https://matrix.org/docs/spec/#complete-list-of-room-versions + # + # For example, for room version 1, default_room_version should be set + # to "1". + #default_room_version: "%(default_room_version)s" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e37ae96899..4a17911a87 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -27,7 +27,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -70,6 +70,7 @@ class RoomCreationHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + self.config = hs.config # linearizer to stop two upgrades happening at once self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") @@ -475,7 +476,11 @@ class RoomCreationHandler(BaseHandler): if ratelimit: yield self.ratelimit(requester) - room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier) + room_version = config.get( + "room_version", + self.config.default_room_version.identifier, + ) + if not isinstance(room_version, string_types): raise SynapseError( 400, diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index a868d06098..2b4892330c 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -16,7 +16,7 @@ import logging from twisted.internet import defer -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet from ._base import client_v2_patterns @@ -36,6 +36,7 @@ class CapabilitiesRestServlet(RestServlet): """ super(CapabilitiesRestServlet, self).__init__() self.hs = hs + self.config = hs.config self.auth = hs.get_auth() self.store = hs.get_datastore() @@ -48,7 +49,7 @@ class CapabilitiesRestServlet(RestServlet): response = { "capabilities": { "m.room_versions": { - "default": DEFAULT_ROOM_VERSION.identifier, + "default": self.config.default_room_version.identifier, "available": { v.identifier: v.disposition for v in KNOWN_ROOM_VERSIONS.values() diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index f3ef977404..bce5b0cf4c 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import synapse.rest.admin -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.rest.client.v1 import login from synapse.rest.client.v2_alpha import capabilities @@ -32,6 +32,7 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.url = b"/_matrix/client/r0/capabilities" hs = self.setup_test_homeserver() self.store = hs.get_datastore() + self.config = hs.config return hs def test_check_auth_required(self): @@ -51,8 +52,10 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) for room_version in capabilities['m.room_versions']['available'].keys(): self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version) + self.assertEqual( - DEFAULT_ROOM_VERSION.identifier, capabilities['m.room_versions']['default'] + self.config.default_room_version.identifier, + capabilities['m.room_versions']['default'], ) def test_get_change_password_capabilities(self): -- cgit 1.5.1 From dba9152d1593c9de176e969153b30b4ef19f29ae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 14:12:38 +0100 Subject: Add missing blank line in config (#5249) --- changelog.d/5249.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/server.py | 1 + 3 files changed, 3 insertions(+) create mode 100644 changelog.d/5249.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/5249.feature b/changelog.d/5249.feature new file mode 100644 index 0000000000..cfdf1ad41b --- /dev/null +++ b/changelog.d/5249.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2a5a514d61..421ae96f04 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -90,6 +90,7 @@ pid_file: DATADIR/homeserver.pid # # For example, for room version 1, default_room_version should be set # to "1". +# #default_room_version: "1" # The GC threshold parameters to pass to `gc.set_threshold`, if defined diff --git a/synapse/config/server.py b/synapse/config/server.py index e9120d4d75..e763e19e15 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -414,6 +414,7 @@ class ServerConfig(Config): # # For example, for room version 1, default_room_version should be set # to "1". + # #default_room_version: "%(default_room_version)s" # The GC threshold parameters to pass to `gc.set_threshold`, if defined -- cgit 1.5.1 From 26713515de97c98dda99a9b06325781fe09b1cbe Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 13:16:23 +0100 Subject: Neilj/mau tracking config explainer (#5284) Improve documentation of monthly active user blocking and mau_trial_days --- changelog.d/5284.misc | 1 + docs/sample_config.yaml | 16 ++++++++++++++++ synapse/config/server.py | 16 ++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 changelog.d/5284.misc (limited to 'synapse/config/server.py') diff --git a/changelog.d/5284.misc b/changelog.d/5284.misc new file mode 100644 index 0000000000..c4d42ca3d9 --- /dev/null +++ b/changelog.d/5284.misc @@ -0,0 +1 @@ +Improve sample config for monthly active user blocking. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 493ea9ee9e..0960b9b5ed 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -261,6 +261,22 @@ listeners: # Monthly Active User Blocking # +# Used in cases where the admin or server owner wants to limit to the +# number of monthly active users. +# +# 'limit_usage_by_mau' disables/enables monthly active user blocking. When +# anabled and a limit is reached the server returns a 'ResourceLimitError' +# with error type Codes.RESOURCE_LIMIT_EXCEEDED +# +# 'max_mau_value' is the hard limit of monthly active users above which +# the server will start blocking user actions. +# +# 'mau_trial_days' is a means to add a grace period for active users. It +# means that users must be active for this number of days before they +# can be considered active and guards against the case where lots of users +# sign up in a short space of time never to return after their initial +# session. +# #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 diff --git a/synapse/config/server.py b/synapse/config/server.py index e763e19e15..334921d421 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -585,6 +585,22 @@ class ServerConfig(Config): # Monthly Active User Blocking # + # Used in cases where the admin or server owner wants to limit to the + # number of monthly active users. + # + # 'limit_usage_by_mau' disables/enables monthly active user blocking. When + # anabled and a limit is reached the server returns a 'ResourceLimitError' + # with error type Codes.RESOURCE_LIMIT_EXCEEDED + # + # 'max_mau_value' is the hard limit of monthly active users above which + # the server will start blocking user actions. + # + # 'mau_trial_days' is a means to add a grace period for active users. It + # means that users must be active for this number of days before they + # can be considered active and guards against the case where lots of users + # sign up in a short space of time never to return after their initial + # session. + # #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 -- cgit 1.5.1 From a11865016e4fc8f691ce94ec25e8f40290df8329 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 20:13:47 +0100 Subject: Set default room version to v4. (#5379) Set default room version to v4. --- changelog.d/5379.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/storage/test_cleanup_extrems.py | 6 ++++++ tests/utils.py | 3 ++- 5 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5379.feature (limited to 'synapse/config/server.py') diff --git a/changelog.d/5379.feature b/changelog.d/5379.feature new file mode 100644 index 0000000000..7b64786fe6 --- /dev/null +++ b/changelog.d/5379.feature @@ -0,0 +1 @@ +Set default room version to v4. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ea73306fb9..4d7e6f3eb5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -91,7 +91,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "1" +#default_room_version: "4" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index 334921d421..7d56e2d141 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -36,7 +36,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] -DEFAULT_ROOM_VERSION = "1" +DEFAULT_ROOM_VERSION = "4" class ServerConfig(Config): diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 6dda66ecd3..6aa8b8b3c6 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -25,6 +25,11 @@ from tests.unittest import HomeserverTestCase class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """Test the background update to clean forward extremities table. """ + def make_homeserver(self, reactor, clock): + # Hack until we understand why test_forked_graph_cleanup fails with v4 + config = self.default_config() + config['default_room_version'] = '1' + return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() @@ -220,6 +225,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): Where SF* are soft failed, and with them A, B and C marked as extremities. This should resolve to B and C being marked as extremity. """ + # Create the room graph event_id_a = self.create_and_send_event() event_id_b = self.create_and_send_event() diff --git a/tests/utils.py b/tests/utils.py index b2817cf22c..f8c7ad2604 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -31,6 +31,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error from synapse.api.room_versions import RoomVersions from synapse.config.homeserver import HomeServerConfig +from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer from synapse.server import HomeServer @@ -173,7 +174,7 @@ def default_config(name, parse=False): "use_frozen_dicts": False, # We need a sane default_room_version, otherwise attempts to create # rooms will fail. - "default_room_version": "1", + "default_room_version": DEFAULT_ROOM_VERSION, # disable user directory updates, because they get done in the # background, which upsets the test runner. "update_user_directory": False, -- cgit 1.5.1