diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 043b48f8f3..5070094cad 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
RoomMemberListRestServlet,
RoomStateRestServlet,
)
+from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
@@ -96,6 +97,7 @@ class ClientReaderServer(HomeServer):
RoomEventContextServlet(self).register(resource)
RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource)
+ ThreepidRestServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index b116c17669..7da79dc827 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse
from synapse import events
-from synapse.api.urls import FEDERATION_PREFIX
+from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
@@ -44,6 +44,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -99,6 +100,9 @@ class FederationReaderServer(HomeServer):
),
})
+ if name in ["keys", "federation"]:
+ resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 569eb277a9..81f3b4b1ff 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -886,6 +886,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry
"""
+ if not self.config.use_presence and edu_type == "m.presence":
+ return
+
handler = self.edu_handlers.get(edu_type)
if handler:
return super(ReplicationFederationHandlerRegistry, self).on_edu(
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 5ba94be2ec..ebb81be377 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -393,7 +393,7 @@ class FederationStateServlet(BaseFederationServlet):
return self.handler.on_context_state_request(
origin,
context,
- parse_string_from_args(query, "event_id", None),
+ parse_string_from_args(query, "event_id", None, required=True),
)
@@ -404,7 +404,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
return self.handler.on_state_ids_request(
origin,
room_id,
- parse_string_from_args(query, "event_id", None),
+ parse_string_from_args(query, "event_id", None, required=True),
)
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 384d8a37a2..1334c630cc 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -68,9 +68,13 @@ class MatrixFederationAgent(object):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
- srv_resolver (SrvResolver|None):
+ _srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
+
+ _well_known_cache (TTLCache|None):
+ TTLCache impl for storing cached well-known lookups. None to use a default
+ implementation.
"""
def __init__(
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 92447b00d4..9e530defe0 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -54,8 +54,11 @@ class SlavedPresenceStore(BaseSlavedStore):
def stream_positions(self):
result = super(SlavedPresenceStore, self).stream_positions()
- position = self._presence_id_gen.get_current_token()
- result["presence"] = position
+
+ if self.hs.config.use_presence:
+ position = self._presence_id_gen.get_current_token()
+ result["presence"] = position
+
return result
def process_replication_rows(self, stream_name, token, rows):
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 586dddb40b..e558f90e1a 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -39,7 +39,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
Accepts a handler that will be called when new data is available or data
is required.
"""
- maxDelay = 5 # Try at least once every N seconds
+ maxDelay = 30 # Try at least once every N seconds
def __init__(self, hs, client_name, handler):
self.client_name = client_name
@@ -54,7 +54,6 @@ class ReplicationClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr):
logger.info("Connected to replication: %r", addr)
- self.resetDelay()
return ClientReplicationStreamProtocol(
self.client_name, self.server_name, self._clock, self.handler
)
@@ -90,15 +89,18 @@ class ReplicationClientHandler(object):
# Used for tests.
self.awaiting_syncs = {}
+ # The factory used to create connections.
+ self.factory = None
+
def start_replication(self, hs):
"""Helper method to start a replication connection to the remote server
using TCP.
"""
client_name = hs.config.worker_name
- factory = ReplicationClientFactory(hs, client_name, self)
+ self.factory = ReplicationClientFactory(hs, client_name, self)
host = hs.config.worker_replication_host
port = hs.config.worker_replication_port
- hs.get_reactor().connectTCP(host, port, factory)
+ hs.get_reactor().connectTCP(host, port, self.factory)
def on_rdata(self, stream_name, token, rows):
"""Called when we get new replication data. By default this just pokes
@@ -140,6 +142,7 @@ class ReplicationClientHandler(object):
args["account_data"] = user_account_data
elif room_account_data:
args["account_data"] = room_account_data
+
return args
def get_currently_syncing_users(self):
@@ -204,3 +207,14 @@ class ReplicationClientHandler(object):
for cmd in self.pending_commands:
connection.send_command(cmd)
self.pending_commands = []
+
+ def finished_connecting(self):
+ """Called when we have successfully subscribed and caught up to all
+ streams we're interested in.
+ """
+ logger.info("Finished connecting to server")
+
+ # We don't reset the delay any earlier as otherwise if there is a
+ # problem during start up we'll end up tight looping connecting to the
+ # server.
+ self.factory.resetDelay()
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 327556f6a1..2098c32a77 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -127,8 +127,11 @@ class RdataCommand(Command):
class PositionCommand(Command):
- """Sent by the client to tell the client the stream postition without
+ """Sent by the server to tell the client the stream postition without
needing to send an RDATA.
+
+ Sent to the client after all missing updates for a stream have been sent
+ to the client and they're now up to date.
"""
NAME = "POSITION"
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 429471c345..49ae5b3355 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -526,6 +526,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.server_name = server_name
self.handler = handler
+ # Set of stream names that have been subscribe to, but haven't yet
+ # caught up with. This is used to track when the client has been fully
+ # connected to the remote.
+ self.streams_connecting = set()
+
# Map of stream to batched updates. See RdataCommand for info on how
# batching works.
self.pending_batches = {}
@@ -548,6 +553,10 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
# We've now finished connecting to so inform the client handler
self.handler.update_connection(self)
+ # This will happen if we don't actually subscribe to any streams
+ if not self.streams_connecting:
+ self.handler.finished_connecting()
+
def on_SERVER(self, cmd):
if cmd.data != self.server_name:
logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@@ -577,6 +586,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
return self.handler.on_rdata(stream_name, cmd.token, rows)
def on_POSITION(self, cmd):
+ # When we get a `POSITION` command it means we've finished getting
+ # missing updates for the given stream, and are now up to date.
+ self.streams_connecting.discard(cmd.stream_name)
+ if not self.streams_connecting:
+ self.handler.finished_connecting()
+
return self.handler.on_position(cmd.stream_name, cmd.token)
def on_SYNC(self, cmd):
@@ -593,6 +608,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.id(), stream_name, token
)
+ self.streams_connecting.add(stream_name)
+
self.send_command(ReplicateCommand(stream_name, token))
def on_connection_closed(self):
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index d16a30acd8..fece1ef0b8 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -213,8 +214,7 @@ def get_filename_from_headers(headers):
Content-Disposition HTTP header.
Args:
- headers (twisted.web.http_headers.Headers): The HTTP
- request headers.
+ headers (dict[bytes, list[bytes]]): The HTTP request headers.
Returns:
A Unicode string of the filename, or None.
@@ -225,23 +225,12 @@ def get_filename_from_headers(headers):
if not content_disposition[0]:
return
- # dict of unicode: bytes, corresponding to the key value sections of the
- # Content-Disposition header.
- params = {}
- parts = content_disposition[0].split(b";")
- for i in parts:
- # Split into key-value pairs, if able
- # We don't care about things like `inline`, so throw it out
- if b"=" not in i:
- continue
-
- key, value = i.strip().split(b"=")
- params[key.decode('ascii')] = value
+ _, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
- upload_name_utf8 = params.get("filename*", None)
+ upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
@@ -267,12 +256,68 @@ def get_filename_from_headers(headers):
# If there isn't check for an ascii name.
if not upload_name:
- upload_name_ascii = params.get("filename", None)
+ upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
- # Make sure there's no %-quoted bytes. If there is, reject it as
- # non-valid ASCII.
- if b"%" not in upload_name_ascii:
- upload_name = upload_name_ascii.decode('ascii')
+ upload_name = upload_name_ascii.decode('ascii')
# This may be None here, indicating we did not find a matching name.
return upload_name
+
+
+def _parse_header(line):
+ """Parse a Content-type like header.
+
+ Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+ Args:
+ line (bytes): header to be parsed
+
+ Returns:
+ Tuple[bytes, dict[bytes, bytes]]:
+ the main content-type, followed by the parameter dictionary
+ """
+ parts = _parseparam(b';' + line)
+ key = next(parts)
+ pdict = {}
+ for p in parts:
+ i = p.find(b'=')
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i + 1:].strip()
+
+ # strip double-quotes
+ if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
+ value = value[1:-1]
+ value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
+ pdict[name] = value
+
+ return key, pdict
+
+
+def _parseparam(s):
+ """Generator which splits the input on ;, respecting double-quoted sequences
+
+ Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+ Args:
+ s (bytes): header to be parsed
+
+ Returns:
+ Iterable[bytes]: the split input
+ """
+ while s[:1] == b';':
+ s = s[1:]
+
+ # look for the next ;
+ end = s.find(b';')
+
+ # if there is an odd number of " marks between here and the next ;, skip to the
+ # next ; instead
+ while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
+ end = s.find(b';', end + 1)
+
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ yield f.strip()
+ s = s[end:]
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 06cd083a74..fb8df56cd5 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -7,9 +7,9 @@ import synapse.handlers.auth
import synapse.handlers.deactivate_account
import synapse.handlers.device
import synapse.handlers.e2e_keys
+import synapse.handlers.message
import synapse.handlers.room
import synapse.handlers.room_member
-import synapse.handlers.message
import synapse.handlers.set_password
import synapse.rest.media.v1.media_repository
import synapse.server_notices.server_notices_manager
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 9b9572890b..9b6c28892c 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -295,6 +295,39 @@ class RegistrationWorkerStore(SQLBaseStore):
return ret['user_id']
return None
+ @defer.inlineCallbacks
+ def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
+ yield self._simple_upsert("user_threepids", {
+ "medium": medium,
+ "address": address,
+ }, {
+ "user_id": user_id,
+ "validated_at": validated_at,
+ "added_at": added_at,
+ })
+
+ @defer.inlineCallbacks
+ def user_get_threepids(self, user_id):
+ ret = yield self._simple_select_list(
+ "user_threepids", {
+ "user_id": user_id
+ },
+ ['medium', 'address', 'validated_at', 'added_at'],
+ 'user_get_threepids'
+ )
+ defer.returnValue(ret)
+
+ def user_delete_threepid(self, user_id, medium, address):
+ return self._simple_delete(
+ "user_threepids",
+ keyvalues={
+ "user_id": user_id,
+ "medium": medium,
+ "address": address,
+ },
+ desc="user_delete_threepids",
+ )
+
class RegistrationStore(RegistrationWorkerStore,
background_updates.BackgroundUpdateStore):
@@ -633,39 +666,6 @@ class RegistrationStore(RegistrationWorkerStore,
defer.returnValue(res if res else False)
@defer.inlineCallbacks
- def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
- yield self._simple_upsert("user_threepids", {
- "medium": medium,
- "address": address,
- }, {
- "user_id": user_id,
- "validated_at": validated_at,
- "added_at": added_at,
- })
-
- @defer.inlineCallbacks
- def user_get_threepids(self, user_id):
- ret = yield self._simple_select_list(
- "user_threepids", {
- "user_id": user_id
- },
- ['medium', 'address', 'validated_at', 'added_at'],
- 'user_get_threepids'
- )
- defer.returnValue(ret)
-
- def user_delete_threepid(self, user_id, medium, address):
- return self._simple_delete(
- "user_threepids",
- keyvalues={
- "user_id": user_id,
- "medium": medium,
- "address": address,
- },
- desc="user_delete_threepids",
- )
-
- @defer.inlineCallbacks
def save_or_get_3pid_guest_access_token(
self, medium, address, access_token, inviter_user_id
):
|