diff options
Diffstat (limited to 'synapse')
-rw-r--r-- | synapse/app/_base.py | 5 | ||||
-rw-r--r-- | synapse/config/_base.py | 9 | ||||
-rw-r--r-- | synapse/config/server.py | 48 | ||||
-rw-r--r-- | synapse/config/tls.py | 63 | ||||
-rw-r--r-- | synapse/storage/client_ips.py | 63 |
5 files changed, 134 insertions, 54 deletions
diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 50fd17c0be..5b0ca312e2 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -213,12 +213,13 @@ def refresh_certificate(hs): Refresh the TLS certificates that Synapse is using by re-reading them from disk and updating the TLS context factories to use them. """ - hs.config.read_certificate_from_disk() if not hs.config.has_tls_listener(): - # nothing else to do here + # attempt to reload the certs for the good of the tls_fingerprints + hs.config.read_certificate_from_disk(require_cert_and_key=False) return + hs.config.read_certificate_from_disk(require_cert_and_key=True) hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config) if hs._listening_services: diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 5858fb92b4..5aec43b702 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -257,7 +257,7 @@ class Config(object): "--keys-directory", metavar="DIRECTORY", help="Used with 'generate-*' options to specify where files such as" - " certs and signing keys should be stored in, unless explicitly" + " signing keys should be stored, unless explicitly" " specified in the config.", ) config_parser.add_argument( @@ -313,16 +313,11 @@ class Config(object): print( ( "A config file has been generated in %r for server name" - " %r with corresponding SSL keys and self-signed" - " certificates. Please review this file and customise it" + " %r. Please review this file and customise it" " to your needs." ) % (config_path, server_name) ) - print( - "If this server name is incorrect, you will need to" - " regenerate the SSL certificates" - ) return else: print( diff --git a/synapse/config/server.py b/synapse/config/server.py index 767897c419..c5c3aac8ed 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -387,47 +387,47 @@ class ServerConfig(Config): # webclient: A web client. Requires web_client_location to be set. # listeners: - # Main HTTPS listener. - # For when matrix traffic is sent directly to synapse. - - port: %(bind_port)s + # TLS-enabled listener: for when matrix traffic is sent directly to synapse. + # + # Disabled by default. To enable it, uncomment the following. (Note that you + # will also need to give Synapse a TLS key and certificate: see the TLS section + # below.) + # + # - port: %(bind_port)s + # type: http + # tls: true + # resources: + # - names: [client, federation] + + # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy + # that unwraps TLS. + # + # If you plan to use a reverse proxy, please see + # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst. + # + - port: %(unsecure_port)s + tls: false + bind_addresses: ['::1', '127.0.0.1'] type: http - tls: true + x_forwarded: true - # List of HTTP resources to serve on this listener. resources: - - names: [client] - compress: true - - names: [federation] + - names: [client, federation] compress: false - # example addional_resources: + # example additonal_resources: # # additional_resources: # "/_matrix/my/custom/endpoint": # module: my_module.CustomRequestHandler # config: {} - # Unsecure HTTP listener - # For when matrix traffic passes through a reverse-proxy that unwraps TLS. - - port: %(unsecure_port)s - tls: false - bind_addresses: ['::1', '127.0.0.1'] - type: http - x_forwarded: true - - resources: - - names: [client] - compress: true - - names: [federation] - compress: false - # Turn on the twisted ssh manhole service on localhost on the given # port. # - port: 9000 # bind_addresses: ['::1', '127.0.0.1'] # type: manhole - # Homeserver blocking # # How to reach the server admin, used in ResourceLimitError diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 86e6eb80db..5fb3486db1 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -23,7 +23,7 @@ from unpaddedbase64 import encode_base64 from OpenSSL import crypto -from synapse.config._base import Config +from synapse.config._base import Config, ConfigError logger = logging.getLogger(__name__) @@ -45,6 +45,19 @@ class TlsConfig(Config): self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) + + if self.has_tls_listener(): + if not self.tls_certificate_file: + raise ConfigError( + "tls_certificate_path must be specified if TLS-enabled listeners are " + "configured." + ) + if not self.tls_private_key_file: + raise ConfigError( + "tls_certificate_path must be specified if TLS-enabled listeners are " + "configured." + ) + self._original_tls_fingerprints = config.get("tls_fingerprints", []) if self._original_tls_fingerprints is None: @@ -105,26 +118,40 @@ class TlsConfig(Config): days_remaining = (expires_on - now).days return days_remaining - def read_certificate_from_disk(self): - """ - Read the certificates from disk. + def read_certificate_from_disk(self, require_cert_and_key): """ - self.tls_certificate = self.read_tls_certificate() + Read the certificates and private key from disk. - if self.has_tls_listener(): + Args: + require_cert_and_key (bool): set to True to throw an error if the certificate + and key file are not given + """ + if require_cert_and_key: self.tls_private_key = self.read_tls_private_key() + self.tls_certificate = self.read_tls_certificate() + elif self.tls_certificate_file: + # we only need the certificate for the tls_fingerprints. Reload it if we + # can, but it's not a fatal error if we can't. + try: + self.tls_certificate = self.read_tls_certificate() + except Exception as e: + logger.info( + "Unable to read TLS certificate (%s). Ignoring as no " + "tls listeners enabled.", e, + ) self.tls_fingerprints = list(self._original_tls_fingerprints) - # Check that our own certificate is included in the list of fingerprints - # and include it if it is not. - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, self.tls_certificate - ) - sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest()) - sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints) - if sha256_fingerprint not in sha256_fingerprints: - self.tls_fingerprints.append({u"sha256": sha256_fingerprint}) + if self.tls_certificate: + # Check that our own certificate is included in the list of fingerprints + # and include it if it is not. + x509_certificate_bytes = crypto.dump_certificate( + crypto.FILETYPE_ASN1, self.tls_certificate + ) + sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest()) + sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints) + if sha256_fingerprint not in sha256_fingerprints: + self.tls_fingerprints.append({u"sha256": sha256_fingerprint}) def default_config(self, config_dir_path, server_name, **kwargs): base_key_name = os.path.join(config_dir_path, server_name) @@ -149,10 +176,10 @@ class TlsConfig(Config): # See 'ACME support' below to enable auto-provisioning this certificate via # Let's Encrypt. # - tls_certificate_path: "%(tls_certificate_path)s" + # tls_certificate_path: "%(tls_certificate_path)s" # PEM-encoded private key for TLS - tls_private_key_path: "%(tls_private_key_path)s" + # tls_private_key_path: "%(tls_private_key_path)s" # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. @@ -177,7 +204,7 @@ class TlsConfig(Config): # acme: # ACME support is disabled by default. Uncomment the following line - # to enable it. + # (and tls_certificate_path and tls_private_key_path above) to enable it. # # enabled: true diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index cc23d7cdbe..9c21362226 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -191,12 +191,16 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): clause = "? <= last_seen AND last_seen < ?" args = (begin_last_seen, end_last_seen) + # (Note: The DISTINCT in the inner query is important to ensure that + # the COUNT(*) is accurate, otherwise double counting may happen due + # to the join effectively being a cross product) txn.execute( """ SELECT user_id, access_token, ip, - MAX(device_id), MAX(user_agent), MAX(last_seen) + MAX(device_id), MAX(user_agent), MAX(last_seen), + COUNT(*) FROM ( - SELECT user_id, access_token, ip + SELECT DISTINCT user_id, access_token, ip FROM user_ips WHERE {} ) c @@ -210,7 +214,60 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): # We've got some duplicates for i in res: - user_id, access_token, ip, device_id, user_agent, last_seen = i + user_id, access_token, ip, device_id, user_agent, last_seen, count = i + + # We want to delete the duplicates so we end up with only a + # single row. + # + # The naive way of doing this would be just to delete all rows + # and reinsert a constructed row. However, if there are a lot of + # duplicate rows this can cause the table to grow a lot, which + # can be problematic in two ways: + # 1. If user_ips is already large then this can cause the + # table to rapidly grow, potentially filling the disk. + # 2. Reinserting a lot of rows can confuse the table + # statistics for postgres, causing it to not use the + # correct indices for the query above, resulting in a full + # table scan. This is incredibly slow for large tables and + # can kill database performance. (This seems to mainly + # happen for the last query where the clause is simply `? < + # last_seen`) + # + # So instead we want to delete all but *one* of the duplicate + # rows. That is hard to do reliably, so we cheat and do a two + # step process: + # 1. Delete all rows with a last_seen strictly less than the + # max last_seen. This hopefully results in deleting all but + # one row the majority of the time, but there may be + # duplicate last_seen + # 2. If multiple rows remain, we fall back to the naive method + # and simply delete all rows and reinsert. + # + # Note that this relies on no new duplicate rows being inserted, + # but if that is happening then this entire process is futile + # anyway. + + # Do step 1: + + txn.execute( + """ + DELETE FROM user_ips + WHERE user_id = ? AND access_token = ? AND ip = ? AND last_seen < ? + """, + (user_id, access_token, ip, last_seen) + ) + if txn.rowcount == count - 1: + # We deleted all but one of the duplicate rows, i.e. there + # is exactly one remaining and so there is nothing left to + # do. + continue + elif txn.rowcount >= count: + raise Exception( + "We deleted more duplicate rows from 'user_ips' than expected", + ) + + # The previous step didn't delete enough rows, so we fallback to + # step 2: # Drop all the duplicates txn.execute( |