diff options
author | Richard van der Hoff <richard@matrix.org> | 2019-09-19 18:13:31 +0100 |
---|---|---|
committer | Richard van der Hoff <richard@matrix.org> | 2019-09-19 18:13:31 +0100 |
commit | b65327ff661b87991566314cf07088a7e789b69b (patch) | |
tree | 75da1c79db8165ee6b3676b784a0d567551cb82d /synapse | |
parent | better logging (diff) | |
parent | fix sample config (diff) | |
download | synapse-b65327ff661b87991566314cf07088a7e789b69b.tar.xz |
Merge branch 'develop' into rav/saml_mapping_work
Diffstat (limited to 'synapse')
181 files changed, 6021 insertions, 2622 deletions
diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 179644852a..9e445cd808 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -22,9 +22,10 @@ from netaddr import IPAddress from twisted.internet import defer +import synapse.logging.opentracing as opentracing import synapse.types from synapse import event_auth -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes from synapse.api.errors import ( AuthError, Codes, @@ -178,6 +179,7 @@ class Auth(object): def get_public_keys(self, invite_event): return event_auth.get_public_keys(invite_event) + @opentracing.trace @defer.inlineCallbacks def get_user_by_req( self, request, allow_guest=False, rights="access", allow_expired=False @@ -209,6 +211,7 @@ class Auth(object): user_id, app_service = yield self._get_appservice_user_id(request) if user_id: request.authenticated_entity = user_id + opentracing.set_tag("authenticated_entity", user_id) if ip_addr and self.hs.config.track_appservice_user_ips: yield self.store.insert_client_ip( @@ -259,6 +262,7 @@ class Auth(object): ) request.authenticated_entity = user.to_string() + opentracing.set_tag("authenticated_entity", user.to_string()) return synapse.types.create_requester( user, token_id, is_guest, device_id, app_service=app_service @@ -272,25 +276,25 @@ class Auth(object): self.get_access_token_from_request(request) ) if app_service is None: - return (None, None) + return None, None if app_service.ip_range_whitelist: ip_address = IPAddress(self.hs.get_ip_from_request(request)) if ip_address not in app_service.ip_range_whitelist: - return (None, None) + return None, None if b"user_id" not in request.args: - return (app_service.sender, app_service) + return app_service.sender, app_service user_id = request.args[b"user_id"][0].decode("utf8") if app_service.sender == user_id: - return (app_service.sender, app_service) + return app_service.sender, app_service if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") if not (yield self.store.get_user_by_id(user_id)): raise AuthError(403, "Application service has not registered this user") - return (user_id, app_service) + return user_id, app_service @defer.inlineCallbacks def get_user_by_access_token(self, token, rights="access"): @@ -690,7 +694,7 @@ class Auth(object): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_was_in_room(room_id, user_id) - return (member_event.membership, member_event.event_id) + return member_event.membership, member_event.event_id except AuthError: visibility = yield self.state.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -699,14 +703,13 @@ class Auth(object): visibility and visibility.content["history_visibility"] == "world_readable" ): - return (Membership.JOIN, None) - return + return Membership.JOIN, None raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN ) @defer.inlineCallbacks - def check_auth_blocking(self, user_id=None, threepid=None): + def check_auth_blocking(self, user_id=None, threepid=None, user_type=None): """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag @@ -719,6 +722,9 @@ class Auth(object): with a MAU blocked server, normally they would be rejected but their threepid is on the reserved list. user_id and threepid should never be set at the same time. + + user_type(str|None): If present, is used to decide whether to check against + certain blocking reasons like MAU. """ # Never fail an auth check for the server notices users or support user @@ -756,6 +762,10 @@ class Auth(object): self.hs.config.mau_limits_reserved_threepids, threepid ): return + elif user_type == UserTypes.SUPPORT: + # If the user does not exist yet and is of type "support", + # allow registration. Support users are excluded from MAU checks. + return # Else if there is no room in the MAU bucket, bail current_mau = yield self.store.get_monthly_active_count() if current_mau >= self.hs.config.max_mau_value: diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 3ffde0d7fc..f29bce560c 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -122,7 +122,8 @@ class UserTypes(object): """ SUPPORT = "support" - ALL_USER_TYPES = (SUPPORT,) + BOT = "bot" + ALL_USER_TYPES = (SUPPORT, BOT) class RelationTypes(object): diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 69dcf3523f..c30fdeee9a 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -36,18 +36,20 @@ from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) +# list of tuples of function, args list, kwargs dict _sighup_callbacks = [] -def register_sighup(func): +def register_sighup(func, *args, **kwargs): """ Register a function to be called when a SIGHUP occurs. Args: func (function): Function to be called when sent a SIGHUP signal. - Will be called with a single argument, the homeserver. + Will be called with a single default argument, the homeserver. + *args, **kwargs: args and kwargs to be passed to the target function. """ - _sighup_callbacks.append(func) + _sighup_callbacks.append((func, args, kwargs)) def start_worker_reactor(appname, config, run_command=reactor.run): @@ -248,8 +250,8 @@ def start(hs, listeners=None): # we're not using systemd. sdnotify(b"RELOADING=1") - for i in _sighup_callbacks: - i(hs) + for i, args, kwargs in _sighup_callbacks: + i(hs, *args, **kwargs) sdnotify(b"READY=1") diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 1fd52a5526..04751a6a5e 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -227,8 +227,6 @@ def start(config_options): config.start_pushers = False config.send_federation = False - setup_logging(config, use_worker_options=True) - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -241,6 +239,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() # We use task.react as the basic run command as it correctly handles tearing diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 54bb114dec..767b87d2db 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -141,8 +141,6 @@ def start(config_options): assert config.worker_app == "synapse.app.appservice" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -167,6 +165,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ps, config, use_worker_options=True) + ps.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ps, config.worker_listeners diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 721bb5b119..dbcc414c42 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -119,7 +119,7 @@ class ClientReaderServer(HomeServer): KeyChangesServlet(self).register(resource) VoipRestServlet(self).register(resource) PushRuleRestServlet(self).register(resource) - VersionsRestServlet().register(resource) + VersionsRestServlet(self).register(resource) resources.update({"/_matrix/client": resource}) @@ -179,8 +179,6 @@ def start(config_options): assert config.worker_app == "synapse.app.client_reader" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -193,6 +191,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 473c8895d0..c67fe69a50 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -175,8 +175,6 @@ def start(config_options): assert config.worker_replication_http_port is not None - setup_logging(config, use_worker_options=True) - # This should only be done on the user directory worker or the master config.update_user_directory = False @@ -192,6 +190,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 5255d9e8cc..1ef027a88c 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -160,8 +160,6 @@ def start(config_options): assert config.worker_app == "synapse.app.federation_reader" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -174,6 +172,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index c5a2880e69..04fbb407af 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -171,8 +171,6 @@ def start(config_options): assert config.worker_app == "synapse.app.federation_sender" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -197,6 +195,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index e2822ca848..9504bfbc70 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet): except HttpResponseException as e: raise e.to_synapse_error() - return (200, result) + return 200, result @defer.inlineCallbacks def on_PUT(self, request, user_id): yield self.auth.get_user_by_req(request) - return (200, {}) + return 200, {} class KeyUploadServlet(RestServlet): @@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet): self.main_uri + request.uri.decode("ascii"), body, headers=headers ) - return (200, result) + return 200, result else: # Just interested in counts. result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - return (200, {"one_time_key_counts": result}) + return 200, {"one_time_key_counts": result} class FrontendProxySlavedStore( @@ -232,8 +232,6 @@ def start(config_options): assert config.worker_main_http_uri is not None - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -246,6 +244,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8233905844..774326dff9 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -341,8 +341,6 @@ def setup(config_options): # generating config files and shouldn't try to continue. sys.exit(0) - synapse.config.logger.setup_logging(config, use_worker_options=False) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -356,6 +354,8 @@ def setup(config_options): database_engine=database_engine, ) + synapse.config.logger.setup_logging(hs, config, use_worker_options=False) + logger.info("Preparing database: %s...", config.database_config["name"]) try: @@ -561,10 +561,12 @@ def run(hs): stats["database_engine"] = hs.get_datastore().database_engine_name stats["database_server_version"] = hs.get_datastore().get_server_version() - logger.info("Reporting stats to matrix.org: %s" % (stats,)) + logger.info( + "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats) + ) try: yield hs.get_simple_http_client().put_json( - "https://matrix.org/report-usage-stats/push", stats + hs.config.report_stats_endpoint, stats ) except Exception as e: logger.warn("Error reporting stats: %s", e) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 3a168577c7..2ac783ffa3 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -155,8 +155,6 @@ def start(config_options): "Please add ``enable_media_repo: false`` to the main config\n" ) - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -169,6 +167,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 692ffa2f04..d84732ee3c 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -184,8 +184,6 @@ def start(config_options): assert config.worker_app == "synapse.app.pusher" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts if config.start_pushers: @@ -210,6 +208,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ps, config, use_worker_options=True) + ps.setup() def start(): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index a1c3b162f7..473026fce5 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -435,8 +435,6 @@ def start(config_options): assert config.worker_app == "synapse.app.synchrotron" - setup_logging(config, use_worker_options=True) - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -450,6 +448,8 @@ def start(config_options): application_service_handler=SynchrotronApplicationService(), ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index cb29a1afab..e01afb39f2 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -197,8 +197,6 @@ def start(config_options): assert config.worker_app == "synapse.app.user_dir" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -223,6 +221,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 007ca75a94..3e25bf5747 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -107,7 +107,6 @@ class ApplicationServiceApi(SimpleHttpClient): except CodeMessageException as e: if e.code == 404: return False - return logger.warning("query_user to %s received %s", uri, e.code) except Exception as ex: logger.warning("query_user to %s threw exception %s", uri, ex) @@ -127,7 +126,6 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning("query_alias to %s received %s", uri, e.code) if e.code == 404: return False - return except Exception as ex: logger.warning("query_alias to %s threw exception %s", uri, ex) return False @@ -230,7 +228,6 @@ class ApplicationServiceApi(SimpleHttpClient): sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) return True - return except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) except Exception as ex: diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 42a350bff8..9998f822f1 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -70,35 +70,37 @@ class ApplicationServiceScheduler(object): self.store = hs.get_datastore() self.as_api = hs.get_application_service_api() - def create_recoverer(service, callback): - return _Recoverer(self.clock, self.store, self.as_api, service, callback) - - self.txn_ctrl = _TransactionController( - self.clock, self.store, self.as_api, create_recoverer - ) + self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api) self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock) @defer.inlineCallbacks def start(self): logger.info("Starting appservice scheduler") + # check for any DOWN ASes and start recoverers for them. - recoverers = yield _Recoverer.start( - self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered + services = yield self.store.get_appservices_by_state( + ApplicationServiceState.DOWN ) - self.txn_ctrl.add_recoverers(recoverers) + + for service in services: + self.txn_ctrl.start_recoverer(service) def submit_event_for_as(self, service, event): self.queuer.enqueue(service, event) class _ServiceQueuer(object): - """Queues events for the same application service together, sending - transactions as soon as possible. Once a transaction is sent successfully, - this schedules any other events in the queue to run. + """Queue of events waiting to be sent to appservices. + + Groups events into transactions per-appservice, and sends them on to the + TransactionController. Makes sure that we only have one transaction in flight per + appservice at a given time. """ def __init__(self, txn_ctrl, clock): self.queued_events = {} # dict of {service_id: [events]} + + # the appservices which currently have a transaction in flight self.requests_in_flight = set() self.txn_ctrl = txn_ctrl self.clock = clock @@ -136,13 +138,29 @@ class _ServiceQueuer(object): class _TransactionController(object): - def __init__(self, clock, store, as_api, recoverer_fn): + """Transaction manager. + + Builds AppServiceTransactions and runs their lifecycle. Also starts a Recoverer + if a transaction fails. + + (Note we have only have one of these in the homeserver.) + + Args: + clock (synapse.util.Clock): + store (synapse.storage.DataStore): + as_api (synapse.appservice.api.ApplicationServiceApi): + """ + + def __init__(self, clock, store, as_api): self.clock = clock self.store = store self.as_api = as_api - self.recoverer_fn = recoverer_fn - # keep track of how many recoverers there are - self.recoverers = [] + + # map from service id to recoverer instance + self.recoverers = {} + + # for UTs + self.RECOVERER_CLASS = _Recoverer @defer.inlineCallbacks def send(self, service, events): @@ -154,42 +172,45 @@ class _TransactionController(object): if sent: yield txn.complete(self.store) else: - run_in_background(self._start_recoverer, service) + run_in_background(self._on_txn_fail, service) except Exception: logger.exception("Error creating appservice transaction") - run_in_background(self._start_recoverer, service) + run_in_background(self._on_txn_fail, service) @defer.inlineCallbacks def on_recovered(self, recoverer): - self.recoverers.remove(recoverer) logger.info( "Successfully recovered application service AS ID %s", recoverer.service.id ) + self.recoverers.pop(recoverer.service.id) logger.info("Remaining active recoverers: %s", len(self.recoverers)) yield self.store.set_appservice_state( recoverer.service, ApplicationServiceState.UP ) - def add_recoverers(self, recoverers): - for r in recoverers: - self.recoverers.append(r) - if len(recoverers) > 0: - logger.info("New active recoverers: %s", len(self.recoverers)) - @defer.inlineCallbacks - def _start_recoverer(self, service): + def _on_txn_fail(self, service): try: yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN) - logger.info( - "Application service falling behind. Starting recoverer. AS ID %s", - service.id, - ) - recoverer = self.recoverer_fn(service, self.on_recovered) - self.add_recoverers([recoverer]) - recoverer.recover() + self.start_recoverer(service) except Exception: logger.exception("Error starting AS recoverer") + def start_recoverer(self, service): + """Start a Recoverer for the given service + + Args: + service (synapse.appservice.ApplicationService): + """ + logger.info("Starting recoverer for AS ID %s", service.id) + assert service.id not in self.recoverers + recoverer = self.RECOVERER_CLASS( + self.clock, self.store, self.as_api, service, self.on_recovered + ) + self.recoverers[service.id] = recoverer + recoverer.recover() + logger.info("Now %i active recoverers", len(self.recoverers)) + @defer.inlineCallbacks def _is_service_up(self, service): state = yield self.store.get_appservice_state(service) @@ -197,18 +218,17 @@ class _TransactionController(object): class _Recoverer(object): - @staticmethod - @defer.inlineCallbacks - def start(clock, store, as_api, callback): - services = yield store.get_appservices_by_state(ApplicationServiceState.DOWN) - recoverers = [_Recoverer(clock, store, as_api, s, callback) for s in services] - for r in recoverers: - logger.info( - "Starting recoverer for AS ID %s which was marked as " "DOWN", - r.service.id, - ) - r.recover() - return recoverers + """Manages retries and backoff for a DOWN appservice. + + We have one of these for each appservice which is currently considered DOWN. + + Args: + clock (synapse.util.Clock): + store (synapse.storage.DataStore): + as_api (synapse.appservice.api.ApplicationServiceApi): + service (synapse.appservice.ApplicationService): the service we are managing + callback (callable[_Recoverer]): called once the service recovers. + """ def __init__(self, clock, store, as_api, service, callback): self.clock = clock @@ -224,7 +244,9 @@ class _Recoverer(object): "as-recoverer-%s" % (self.service.id,), self.retry ) - self.clock.call_later((2 ** self.backoff_counter), _retry) + delay = 2 ** self.backoff_counter + logger.info("Scheduling retries on %s in %fs", self.service.id, delay) + self.clock.call_later(delay, _retry) def _backoff(self): # cap the backoff to be around 8.5min => (2^9) = 512 secs @@ -234,25 +256,30 @@ class _Recoverer(object): @defer.inlineCallbacks def retry(self): + logger.info("Starting retries on %s", self.service.id) try: - txn = yield self.store.get_oldest_unsent_txn(self.service) - if txn: + while True: + txn = yield self.store.get_oldest_unsent_txn(self.service) + if not txn: + # nothing left: we're done! + self.callback(self) + return + logger.info( "Retrying transaction %s for AS ID %s", txn.id, txn.service.id ) sent = yield txn.send(self.as_api) - if sent: - yield txn.complete(self.store) - # reset the backoff counter and retry immediately - self.backoff_counter = 1 - yield self.retry() - else: - self._backoff() - else: - self._set_service_recovered() - except Exception as e: - logger.exception(e) - self._backoff() - - def _set_service_recovered(self): - self.callback(self) + if not sent: + break + + yield txn.complete(self.store) + + # reset the backoff counter and then process the next transaction + self.backoff_counter = 1 + + except Exception: + logger.exception("Unexpected error running retries") + + # we didn't manage to send all of the transactions before we got an error of + # some flavour: reschedule the next retry. + self._backoff() diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py index f2a5a41e92..1e76e9559d 100644 --- a/synapse/config/__init__.py +++ b/synapse/config/__init__.py @@ -13,8 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import ConfigError +from ._base import ConfigError, find_config_files -# export ConfigError if somebody does import * +# export ConfigError and find_config_files if somebody does +# import * # this is largely a fudge to stop PEP8 moaning about the import -__all__ = ["ConfigError"] +__all__ = ["ConfigError", "find_config_files"] diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 6ce5cd07fb..31f6530978 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -181,6 +181,11 @@ class Config(object): generate_secrets=False, report_stats=None, open_private_ports=False, + listeners=None, + database_conf=None, + tls_certificate_path=None, + tls_private_key_path=None, + acme_domain=None, ): """Build a default configuration file @@ -207,6 +212,33 @@ class Config(object): open_private_ports (bool): True to leave private ports (such as the non-TLS HTTP listener) open to the internet. + listeners (list(dict)|None): A list of descriptions of the listeners + synapse should start with each of which specifies a port (str), a list of + resources (list(str)), tls (bool) and type (str). For example: + [{ + "port": 8448, + "resources": [{"names": ["federation"]}], + "tls": True, + "type": "http", + }, + { + "port": 443, + "resources": [{"names": ["client"]}], + "tls": False, + "type": "http", + }], + + + database (str|None): The database type to configure, either `psycog2` + or `sqlite3`. + + tls_certificate_path (str|None): The path to the tls certificate. + + tls_private_key_path (str|None): The path to the tls private key. + + acme_domain (str|None): The domain acme will try to validate. If + specified acme will be enabled. + Returns: str: the yaml config file """ @@ -220,6 +252,11 @@ class Config(object): generate_secrets=generate_secrets, report_stats=report_stats, open_private_ports=open_private_ports, + listeners=listeners, + database_conf=database_conf, + tls_certificate_path=tls_certificate_path, + tls_private_key_path=tls_private_key_path, + acme_domain=acme_domain, ) ) diff --git a/synapse/config/database.py b/synapse/config/database.py index 746a6cd1f4..118aafbd4a 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +from textwrap import indent + +import yaml from ._base import Config @@ -38,20 +41,28 @@ class DatabaseConfig(Config): self.set_databasepath(config.get("database_path")) - def generate_config_section(self, data_dir_path, **kwargs): - database_path = os.path.join(data_dir_path, "homeserver.db") - return ( - """\ - ## Database ## - - database: - # The database engine name + def generate_config_section(self, data_dir_path, database_conf, **kwargs): + if not database_conf: + database_path = os.path.join(data_dir_path, "homeserver.db") + database_conf = ( + """# The database engine name name: "sqlite3" # Arguments to pass to the engine args: # Path to the database database: "%(database_path)s" + """ + % locals() + ) + else: + database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip() + + return ( + """\ + ## Database ## + database: + %(database_conf)s # Number of events to cache in memory. # #event_cache_size: 10K diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 8381b8eb29..e5de768b0c 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -20,6 +20,7 @@ from __future__ import print_function # This file can't be called email.py because if it is, we cannot: import email.utils import os +from enum import Enum import pkg_resources @@ -74,19 +75,48 @@ class EmailConfig(Config): "renew_at" ) - email_trust_identity_server_for_password_resets = email_config.get( - "trust_identity_server_for_password_resets", False + self.threepid_behaviour_email = ( + # Have Synapse handle the email sending if account_threepid_delegates.email + # is not defined + # msisdn is currently always remote while Synapse does not support any method of + # sending SMS messages + ThreepidBehaviour.REMOTE + if self.account_threepid_delegate_email + else ThreepidBehaviour.LOCAL ) - self.email_password_reset_behaviour = ( - "remote" if email_trust_identity_server_for_password_resets else "local" - ) - self.password_resets_were_disabled_due_to_email_config = False - if self.email_password_reset_behaviour == "local" and email_config == {}: + # Prior to Synapse v1.4.0, there was another option that defined whether Synapse would + # use an identity server to password reset tokens on its behalf. We now warn the user + # if they have this set and tell them to use the updated option, while using a default + # identity server in the process. + self.using_identity_server_from_trusted_list = False + if ( + not self.account_threepid_delegate_email + and config.get("trust_identity_server_for_password_resets", False) is True + ): + # Use the first entry in self.trusted_third_party_id_servers instead + if self.trusted_third_party_id_servers: + # XXX: It's a little confusing that account_threepid_delegate_email is modified + # both in RegistrationConfig and here. We should factor this bit out + self.account_threepid_delegate_email = self.trusted_third_party_id_servers[ + 0 + ] + self.using_identity_server_from_trusted_list = True + else: + raise ConfigError( + "Attempted to use an identity server from" + '"trusted_third_party_id_servers" but it is empty.' + ) + + self.local_threepid_handling_disabled_due_to_email_config = False + if ( + self.threepid_behaviour_email == ThreepidBehaviour.LOCAL + and email_config == {} + ): # We cannot warn the user this has happened here # Instead do so when a user attempts to reset their password - self.password_resets_were_disabled_due_to_email_config = True + self.local_threepid_handling_disabled_due_to_email_config = True - self.email_password_reset_behaviour = "off" + self.threepid_behaviour_email = ThreepidBehaviour.OFF # Get lifetime of a validation token in milliseconds self.email_validation_token_lifetime = self.parse_duration( @@ -96,7 +126,7 @@ class EmailConfig(Config): if ( self.email_enable_notifs or account_validity_renewal_enabled - or self.email_password_reset_behaviour == "local" + or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL ): # make sure we can import the required deps import jinja2 @@ -106,7 +136,7 @@ class EmailConfig(Config): jinja2 bleach - if self.email_password_reset_behaviour == "local": + if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: required = ["smtp_host", "smtp_port", "notif_from"] missing = [] @@ -115,7 +145,7 @@ class EmailConfig(Config): missing.append("email." + k) if config.get("public_baseurl") is None: - missing.append("public_base_url") + missing.append("public_baseurl") if len(missing) > 0: raise RuntimeError( @@ -125,28 +155,45 @@ class EmailConfig(Config): % (", ".join(missing),) ) - # Templates for password reset emails + # These email templates have placeholders in them, and thus must be + # parsed using a templating engine during a request self.email_password_reset_template_html = email_config.get( "password_reset_template_html", "password_reset.html" ) self.email_password_reset_template_text = email_config.get( "password_reset_template_text", "password_reset.txt" ) - self.email_password_reset_failure_template = email_config.get( - "password_reset_failure_template", "password_reset_failure.html" + self.email_registration_template_html = email_config.get( + "registration_template_html", "registration.html" ) - # This template does not support any replaceable variables, so we will - # read it from the disk once during setup - email_password_reset_success_template = email_config.get( - "password_reset_success_template", "password_reset_success.html" + self.email_registration_template_text = email_config.get( + "registration_template_text", "registration.txt" + ) + self.email_password_reset_template_failure_html = email_config.get( + "password_reset_template_failure_html", "password_reset_failure.html" + ) + self.email_registration_template_failure_html = email_config.get( + "registration_template_failure_html", "registration_failure.html" + ) + + # These templates do not support any placeholder variables, so we + # will read them from disk once during setup + email_password_reset_template_success_html = email_config.get( + "password_reset_template_success_html", "password_reset_success.html" + ) + email_registration_template_success_html = email_config.get( + "registration_template_success_html", "registration_success.html" ) # Check templates exist for f in [ self.email_password_reset_template_html, self.email_password_reset_template_text, - self.email_password_reset_failure_template, - email_password_reset_success_template, + self.email_registration_template_html, + self.email_registration_template_text, + self.email_password_reset_template_failure_html, + email_password_reset_template_success_html, + email_registration_template_success_html, ]: p = os.path.join(self.email_template_dir, f) if not os.path.isfile(p): @@ -154,11 +201,17 @@ class EmailConfig(Config): # Retrieve content of web templates filepath = os.path.join( - self.email_template_dir, email_password_reset_success_template + self.email_template_dir, email_password_reset_template_success_html ) - self.email_password_reset_success_html_content = self.read_file( + self.email_password_reset_template_success_html = self.read_file( filepath, "email.password_reset_template_success_html" ) + filepath = os.path.join( + self.email_template_dir, email_registration_template_success_html + ) + self.email_registration_template_success_html_content = self.read_file( + filepath, "email.registration_template_success_html" + ) if self.email_enable_notifs: required = [ @@ -239,19 +292,6 @@ class EmailConfig(Config): # # # riot_base_url: "http://localhost/riot" # - # # Enable sending password reset emails via the configured, trusted - # # identity servers - # # - # # IMPORTANT! This will give a malicious or overtaken identity server - # # the ability to reset passwords for your users! Make absolutely sure - # # that you want to do this! It is strongly recommended that password - # # reset emails be sent by the homeserver instead - # # - # # If this option is set to false and SMTP options have not been - # # configured, resetting user passwords via email will be disabled - # # - # #trust_identity_server_for_password_resets: false - # # # Configure the time that a validation email or text message code # # will expire after sending # # @@ -283,9 +323,35 @@ class EmailConfig(Config): # #password_reset_template_html: password_reset.html # #password_reset_template_text: password_reset.txt # + # # Templates for registration emails sent by the homeserver + # # + # #registration_template_html: registration.html + # #registration_template_text: registration.txt + # # # Templates for password reset success and failure pages that a user # # will see after attempting to reset their password # # # #password_reset_template_success_html: password_reset_success.html # #password_reset_template_failure_html: password_reset_failure.html + # + # # Templates for registration success and failure pages that a user + # # will see after attempting to register using an email or phone + # # + # #registration_template_success_html: registration_success.html + # #registration_template_failure_html: registration_failure.html """ + + +class ThreepidBehaviour(Enum): + """ + Enum to define the behaviour of Synapse with regards to when it contacts an identity + server for 3pid registration and password resets + + REMOTE = use an external server to send tokens + LOCAL = send tokens ourselves + OFF = disable registration via 3pid and password resets + """ + + REMOTE = "remote" + LOCAL = "local" + OFF = "off" diff --git a/synapse/config/key.py b/synapse/config/key.py index fe8386985c..ba2199bceb 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -76,7 +76,7 @@ class KeyConfig(Config): config_dir_path, config["server_name"] + ".signing.key" ) - self.signing_key = self.read_signing_key(signing_key_path) + self.signing_key = self.read_signing_keys(signing_key_path, "signing_key") self.old_signing_keys = self.read_old_signing_keys( config.get("old_signing_keys", {}) @@ -85,6 +85,14 @@ class KeyConfig(Config): config.get("key_refresh_interval", "1d") ) + key_server_signing_keys_path = config.get("key_server_signing_keys_path") + if key_server_signing_keys_path: + self.key_server_signing_keys = self.read_signing_keys( + key_server_signing_keys_path, "key_server_signing_keys_path" + ) + else: + self.key_server_signing_keys = list(self.signing_key) + # if neither trusted_key_servers nor perspectives are given, use the default. if "perspectives" not in config and "trusted_key_servers" not in config: key_servers = [{"server_name": "matrix.org"}] @@ -210,16 +218,34 @@ class KeyConfig(Config): # #trusted_key_servers: # - server_name: "matrix.org" + # + + # The signing keys to use when acting as a trusted key server. If not specified + # defaults to the server signing key. + # + # Can contain multiple keys, one per line. + # + #key_server_signing_keys_path: "key_server_signing_keys.key" """ % locals() ) - def read_signing_key(self, signing_key_path): - signing_keys = self.read_file(signing_key_path, "signing_key") + def read_signing_keys(self, signing_key_path, name): + """Read the signing keys in the given path. + + Args: + signing_key_path (str) + name (str): Associated config key name + + Returns: + list[SigningKey] + """ + + signing_keys = self.read_file(signing_key_path, name) try: return read_signing_keys(signing_keys.splitlines(True)) except Exception as e: - raise ConfigError("Error reading signing_key: %s" % (str(e))) + raise ConfigError("Error reading %s: %s" % (name, str(e))) def read_old_signing_keys(self, old_signing_keys): keys = {} diff --git a/synapse/config/logger.py b/synapse/config/logger.py index d321d00b80..767ecfdf09 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -21,10 +21,19 @@ from string import Template import yaml -from twisted.logger import STDLibLogObserver, globalLogBeginner +from twisted.logger import ( + ILogObserver, + LogBeginner, + STDLibLogObserver, + globalLogBeginner, +) import synapse from synapse.app import _base as appbase +from synapse.logging._structured import ( + reload_structured_logging, + setup_structured_logging, +) from synapse.logging.context import LoggingContextFilter from synapse.util.versionstring import get_version_string @@ -85,7 +94,8 @@ class LoggingConfig(Config): """\ ## Logging ## - # A yaml python logging config file + # A yaml python logging config file as described by + # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema # log_config: "%(log_config)s" """ @@ -119,21 +129,10 @@ class LoggingConfig(Config): log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file)) -def setup_logging(config, use_worker_options=False): - """ Set up python logging - - Args: - config (LoggingConfig | synapse.config.workers.WorkerConfig): - configuration data - - use_worker_options (bool): True to use the 'worker_log_config' option - instead of 'log_config'. - - register_sighup (func | None): Function to call to register a - sighup handler. +def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): + """ + Set up Python stdlib logging. """ - log_config = config.worker_log_config if use_worker_options else config.log_config - if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" @@ -151,35 +150,10 @@ def setup_logging(config, use_worker_options=False): handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: + logging.config.dictConfig(log_config) - def load_log_config(): - with open(log_config, "r") as f: - logging.config.dictConfig(yaml.safe_load(f)) - - def sighup(*args): - # it might be better to use a file watcher or something for this. - load_log_config() - logging.info("Reloaded log config from %s due to SIGHUP", log_config) - - load_log_config() - appbase.register_sighup(sighup) - - # make sure that the first thing we log is a thing we can grep backwards - # for - logging.warn("***** STARTING SERVER *****") - logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) - logging.info("Server hostname: %s", config.server_name) - - # It's critical to point twisted's internal logging somewhere, otherwise it - # stacks up and leaks kup to 64K object; - # see: https://twistedmatrix.com/trac/ticket/8164 - # - # Routing to the python logging framework could be a performance problem if - # the handlers blocked for a long time as python.logging is a blocking API - # see https://twistedmatrix.com/documents/current/core/howto/logger.html - # filed as https://github.com/matrix-org/synapse/issues/1727 - # - # However this may not be too much of a problem if we are just writing to a file. + # Route Twisted's native logging through to the standard library logging + # system. observer = STDLibLogObserver() def _log(event): @@ -196,8 +170,71 @@ def setup_logging(config, use_worker_options=False): return observer(event) - globalLogBeginner.beginLoggingTo( - [_log], redirectStandardIO=not config.no_redirect_stdio - ) + logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") + + return observer + + +def _reload_stdlib_logging(*args, log_config=None): + logger = logging.getLogger("") + + if not log_config: + logger.warn("Reloaded a blank config?") + + logging.config.dictConfig(log_config) + + +def setup_logging( + hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner +) -> ILogObserver: + """ + Set up the logging subsystem. + + Args: + config (LoggingConfig | synapse.config.workers.WorkerConfig): + configuration data + + use_worker_options (bool): True to use the 'worker_log_config' option + instead of 'log_config'. + + logBeginner: The Twisted logBeginner to use. + + Returns: + The "root" Twisted Logger observer, suitable for sending logs to from a + Logger instance. + """ + log_config = config.worker_log_config if use_worker_options else config.log_config + + def read_config(*args, callback=None): + if log_config is None: + return None + + with open(log_config, "rb") as f: + log_config_body = yaml.safe_load(f.read()) + + if callback: + callback(log_config=log_config_body) + logging.info("Reloaded log config from %s due to SIGHUP", log_config) + + return log_config_body + + log_config_body = read_config() + + if log_config_body and log_config_body.get("structured") is True: + logger = setup_structured_logging( + hs, config, log_config_body, logBeginner=logBeginner + ) + appbase.register_sighup(read_config, callback=reload_structured_logging) + else: + logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner) + appbase.register_sighup(read_config, callback=_reload_stdlib_logging) + + # make sure that the first thing we log is a thing we can grep backwards + # for + logging.warn("***** STARTING SERVER *****") + logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) + logging.info("Server hostname: %s", config.server_name) + + return logger diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 3698441963..ec35a6b868 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,26 +14,47 @@ # See the License for the specific language governing permissions and # limitations under the License. +import attr + +from synapse.python_dependencies import DependencyException, check_requirements + from ._base import Config, ConfigError -MISSING_SENTRY = """Missing sentry-sdk library. This is required to enable sentry - integration. - """ + +@attr.s +class MetricsFlags(object): + known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + + @classmethod + def all_off(cls): + """ + Instantiate the flags with all options set to off. + """ + return cls(**{x.name: False for x in attr.fields(cls)}) class MetricsConfig(Config): def read_config(self, config, **kwargs): self.enable_metrics = config.get("enable_metrics", False) self.report_stats = config.get("report_stats", None) + self.report_stats_endpoint = config.get( + "report_stats_endpoint", "https://matrix.org/report-usage-stats/push" + ) self.metrics_port = config.get("metrics_port") self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1") + if self.enable_metrics: + _metrics_config = config.get("metrics_flags") or {} + self.metrics_flags = MetricsFlags(**_metrics_config) + else: + self.metrics_flags = MetricsFlags.all_off() + self.sentry_enabled = "sentry" in config if self.sentry_enabled: try: - import sentry_sdk # noqa F401 - except ImportError: - raise ConfigError(MISSING_SENTRY) + check_requirements("sentry") + except DependencyException as e: + raise ConfigError(e.message) self.sentry_dsn = config["sentry"].get("dsn") if not self.sentry_dsn: @@ -58,6 +80,16 @@ class MetricsConfig(Config): #sentry: # dsn: "..." + # Flags to enable Prometheus metrics which are not suitable to be + # enabled by default, either for performance reasons or limited use. + # + metrics_flags: + # Publish synapse_federation_known_servers, a g auge of the number of + # servers this homeserver knows about, including itself. May cause + # performance problems on large homeservers. + # + #known_servers: true + # Whether or not to report anonymized homeserver usage statistics. """ @@ -66,4 +98,10 @@ class MetricsConfig(Config): else: res += "report_stats: %s\n" % ("true" if report_stats else "false") + res += """ + # The endpoint to report the anonymized homeserver usage statistics to. + # Defaults to https://matrix.org/report-usage-stats/push + # + #report_stats_endpoint: https://example.com/report-usage-stats/push + """ return res diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 33f31cf213..587e2862b7 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -80,6 +80,12 @@ class RatelimitConfig(Config): "federation_rr_transactions_per_room_per_second", 50 ) + rc_admin_redaction = config.get("rc_admin_redaction") + if rc_admin_redaction: + self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction) + else: + self.rc_admin_redaction = None + def generate_config_section(self, **kwargs): return """\ ## Ratelimiting ## @@ -102,6 +108,9 @@ class RatelimitConfig(Config): # - one for login that ratelimits login requests based on the account the # client is attempting to log into, based on the amount of failed login # attempts for this account. + # - one for ratelimiting redactions by room admins. If this is not explicitly + # set then it uses the same ratelimiting as per rc_message. This is useful + # to allow room admins to deal with abuse quickly. # # The defaults are as shown below. # @@ -123,6 +132,10 @@ class RatelimitConfig(Config): # failed_attempts: # per_second: 0.17 # burst_count: 3 + # + #rc_admin_redaction: + # per_second: 1 + # burst_count: 50 # Ratelimiting settings for incoming federation diff --git a/synapse/config/registration.py b/synapse/config/registration.py index e2bee3c116..9548560edb 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -99,6 +99,10 @@ class RegistrationConfig(Config): self.trusted_third_party_id_servers = config.get( "trusted_third_party_id_servers", ["matrix.org", "vector.im"] ) + account_threepid_delegates = config.get("account_threepid_delegates") or {} + self.account_threepid_delegate_email = account_threepid_delegates.get("email") + self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") + self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) @@ -257,10 +261,42 @@ class RegistrationConfig(Config): # Also defines the ID server which will be called when an account is # deactivated (one will be picked arbitrarily). # + # Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity + # server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a + # background migration script, informing itself that the identity server all of its + # 3PIDs have been bound to is likely one of the below. + # + # As of Synapse v1.4.0, all other functionality of this option has been deprecated, and + # it is now solely used for the purposes of the background migration script, and can be + # removed once it has run. #trusted_third_party_id_servers: # - matrix.org # - vector.im + # Handle threepid (email/phone etc) registration and password resets through a set of + # *trusted* identity servers. Note that this allows the configured identity server to + # reset passwords for accounts! + # + # Be aware that if `email` is not set, and SMTP options have not been + # configured in the email config block, registration and user password resets via + # email will be globally disabled. + # + # Additionally, if `msisdn` is not set, registration and password resets via msisdn + # will be disabled regardless. This is due to Synapse currently not supporting any + # method of sending SMS messages on its own. + # + # To enable using an identity server for operations regarding a particular third-party + # identifier type, set the value to the URL of that identity server as shown in the + # examples below. + # + # Servers handling the these requests must answer the `/requestToken` endpoints defined + # by the Matrix Identity Service API specification: + # https://matrix.org/docs/spec/identity_service/latest + # + account_threepid_delegates: + #email: https://example.com # Delegate email sending to matrix.org + #msisdn: http://localhost:8090 # Delegate SMS sending to this local process + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/config/repository.py b/synapse/config/repository.py index fdb1f246d0..34f1a9a92d 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -16,6 +16,7 @@ import os from collections import namedtuple +from synapse.python_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module from ._base import Config, ConfigError @@ -34,17 +35,6 @@ THUMBNAIL_SIZE_YAML = """\ # method: %(method)s """ -MISSING_NETADDR = "Missing netaddr library. This is required for URL preview API." - -MISSING_LXML = """Missing lxml library. This is required for URL preview API. - - Install by running: - pip install lxml - - Requires libxslt1-dev system package. - """ - - ThumbnailRequirement = namedtuple( "ThumbnailRequirement", ["width", "height", "method", "media_type"] ) @@ -171,16 +161,10 @@ class ContentRepositoryConfig(Config): self.url_preview_enabled = config.get("url_preview_enabled", False) if self.url_preview_enabled: try: - import lxml - - lxml # To stop unused lint. - except ImportError: - raise ConfigError(MISSING_LXML) + check_requirements("url_preview") - try: - from netaddr import IPSet - except ImportError: - raise ConfigError(MISSING_NETADDR) + except DependencyException as e: + raise ConfigError(e.message) if "url_preview_ip_range_blacklist" not in config: raise ConfigError( @@ -189,6 +173,9 @@ class ContentRepositoryConfig(Config): "to work" ) + # netaddr is a dependency for url_preview + from netaddr import IPSet + self.url_preview_ip_range_blacklist = IPSet( config["url_preview_ip_range_blacklist"] ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 15449695d1..7f8d315954 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -17,8 +17,11 @@ import logging import os.path +import re +from textwrap import indent import attr +import yaml from netaddr import IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -159,6 +162,16 @@ class ServerConfig(Config): self.mau_trial_days = config.get("mau_trial_days", 0) + # How long to keep redacted events in the database in unredacted form + # before redacting them. + redaction_retention_period = config.get("redaction_retention_period", "7d") + if redaction_retention_period is not None: + self.redaction_retention_period = self.parse_duration( + redaction_retention_period + ) + else: + self.redaction_retention_period = None + # Options to disable HS self.hs_disabled = config.get("hs_disabled", False) self.hs_disabled_message = config.get("hs_disabled_message", "") @@ -325,7 +338,7 @@ class ServerConfig(Config): ( "The metrics_port configuration option is deprecated in Synapse 0.31 " "in favour of a listener. Please see " - "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst" + "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md" " on how to configure the new listener." ) ) @@ -352,7 +365,7 @@ class ServerConfig(Config): return any(l["tls"] for l in self.listeners) def generate_config_section( - self, server_name, data_dir_path, open_private_ports, **kwargs + self, server_name, data_dir_path, open_private_ports, listeners, **kwargs ): _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: @@ -366,11 +379,68 @@ class ServerConfig(Config): # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the # default config string default_room_version = DEFAULT_ROOM_VERSION + secure_listeners = [] + unsecure_listeners = [] + private_addresses = ["::1", "127.0.0.1"] + if listeners: + for listener in listeners: + if listener["tls"]: + secure_listeners.append(listener) + else: + # If we don't want open ports we need to bind the listeners + # to some address other than 0.0.0.0. Here we chose to use + # localhost. + # If the addresses are already bound we won't overwrite them + # however. + if not open_private_ports: + listener.setdefault("bind_addresses", private_addresses) + + unsecure_listeners.append(listener) + + secure_http_bindings = indent( + yaml.dump(secure_listeners), " " * 10 + ).lstrip() + + unsecure_http_bindings = indent( + yaml.dump(unsecure_listeners), " " * 10 + ).lstrip() + + if not unsecure_listeners: + unsecure_http_bindings = ( + """- port: %(unsecure_port)s + tls: false + type: http + x_forwarded: true""" + % locals() + ) + + if not open_private_ports: + unsecure_http_bindings += ( + "\n bind_addresses: ['::1', '127.0.0.1']" + ) + + unsecure_http_bindings += """ + + resources: + - names: [client, federation] + compress: false""" + + if listeners: + # comment out this block + unsecure_http_bindings = "#" + re.sub( + "\n {10}", + lambda match: match.group(0) + "#", + unsecure_http_bindings, + ) - unsecure_http_binding = "port: %i\n tls: false" % (unsecure_port,) - if not open_private_ports: - unsecure_http_binding += ( - "\n bind_addresses: ['::1', '127.0.0.1']" + if not secure_listeners: + secure_http_bindings = ( + """#- port: %(bind_port)s + # type: http + # tls: true + # resources: + # - names: [client, federation]""" + % locals() ) return ( @@ -501,8 +571,8 @@ class ServerConfig(Config): # # type: the type of listener. Normally 'http', but other valid options are: # 'manhole' (see docs/manhole.md), - # 'metrics' (see docs/metrics-howto.rst), - # 'replication' (see docs/workers.rst). + # 'metrics' (see docs/metrics-howto.md), + # 'replication' (see docs/workers.md). # # tls: set to true to enable TLS for this listener. Will use the TLS # key/cert specified in tls_private_key_path / tls_certificate_path. @@ -537,12 +607,12 @@ class ServerConfig(Config): # # media: the media API (/_matrix/media). # - # metrics: the metrics interface. See docs/metrics-howto.rst. + # metrics: the metrics interface. See docs/metrics-howto.md. # # openid: OpenID authentication. # # replication: the HTTP replication API (/_synapse/replication). See - # docs/workers.rst. + # docs/workers.md. # # static: static resources under synapse/static (/_matrix/static). (Mostly # useful for 'fallback authentication'.) @@ -556,25 +626,15 @@ class ServerConfig(Config): # will also need to give Synapse a TLS key and certificate: see the TLS section # below.) # - #- port: %(bind_port)s - # type: http - # tls: true - # resources: - # - names: [client, federation] + %(secure_http_bindings)s # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy # that unwraps TLS. # # If you plan to use a reverse proxy, please see - # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst. + # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md. # - - %(unsecure_http_binding)s - type: http - x_forwarded: true - - resources: - - names: [client, federation] - compress: false + %(unsecure_http_bindings)s # example additional_resources: # @@ -668,6 +728,13 @@ class ServerConfig(Config): # Defaults to 'true'. # #allow_per_room_profiles: false + + # How long to keep redacted events in unredacted form in the database. After + # this period redacted events get replaced with their redacted form in the DB. + # + # Defaults to `7d`. Set to `null` to disable. + # + redaction_retention_period: 7d """ % locals() ) diff --git a/synapse/config/stats.py b/synapse/config/stats.py index b518a3ed9c..b18ddbd1fa 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -27,19 +27,16 @@ class StatsConfig(Config): def read_config(self, config, **kwargs): self.stats_enabled = True - self.stats_bucket_size = 86400 + self.stats_bucket_size = 86400 * 1000 self.stats_retention = sys.maxsize stats_config = config.get("stats", None) if stats_config: self.stats_enabled = stats_config.get("enabled", self.stats_enabled) - self.stats_bucket_size = ( - self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000 + self.stats_bucket_size = self.parse_duration( + stats_config.get("bucket_size", "1d") ) - self.stats_retention = ( - self.parse_duration( - stats_config.get("retention", "%ds" % (sys.maxsize,)) - ) - / 1000 + self.stats_retention = self.parse_duration( + stats_config.get("retention", "%ds" % (sys.maxsize,)) ) def generate_config_section(self, config_dir_path, server_name, **kwargs): diff --git a/synapse/config/tls.py b/synapse/config/tls.py index ca508a224f..fc47ba3e9a 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -110,8 +110,15 @@ class TlsConfig(Config): # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] for entry in fed_whitelist_entries: + try: + entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) + except UnicodeEncodeError: + raise ConfigError( + "IDNA domain names are not allowed in the " + "federation_certificate_verification_whitelist: %s" % (entry,) + ) + # Convert globs to regex - entry_regex = glob_to_regex(entry) self.federation_certificate_verification_whitelist.append(entry_regex) # List of custom certificate authorities for federation traffic validation @@ -239,12 +246,38 @@ class TlsConfig(Config): self.tls_fingerprints.append({"sha256": sha256_fingerprint}) def generate_config_section( - self, config_dir_path, server_name, data_dir_path, **kwargs + self, + config_dir_path, + server_name, + data_dir_path, + tls_certificate_path, + tls_private_key_path, + acme_domain, + **kwargs ): + """If the acme_domain is specified acme will be enabled. + If the TLS paths are not specified the default will be certs in the + config directory""" + base_key_name = os.path.join(config_dir_path, server_name) - tls_certificate_path = base_key_name + ".tls.crt" - tls_private_key_path = base_key_name + ".tls.key" + if bool(tls_certificate_path) != bool(tls_private_key_path): + raise ConfigError( + "Please specify both a cert path and a key path or neither." + ) + + tls_enabled = ( + "" if tls_certificate_path and tls_private_key_path or acme_domain else "#" + ) + + if not tls_certificate_path: + tls_certificate_path = base_key_name + ".tls.crt" + if not tls_private_key_path: + tls_private_key_path = base_key_name + ".tls.key" + + acme_enabled = bool(acme_domain) + acme_domain = "matrix.example.com" + default_acme_account_file = os.path.join(data_dir_path, "acme_account.key") # this is to avoid the max line length. Sorrynotsorry @@ -269,11 +302,11 @@ class TlsConfig(Config): # instance, if using certbot, use `fullchain.pem` as your certificate, # not `cert.pem`). # - #tls_certificate_path: "%(tls_certificate_path)s" + %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s" # PEM-encoded private key for TLS # - #tls_private_key_path: "%(tls_private_key_path)s" + %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s" # Whether to verify TLS server certificates for outbound federation requests. # @@ -340,10 +373,10 @@ class TlsConfig(Config): # permission to listen on port 80. # acme: - # ACME support is disabled by default. Uncomment the following line - # (and tls_certificate_path and tls_private_key_path above) to enable it. + # ACME support is disabled by default. Set this to `true` and uncomment + # tls_certificate_path and tls_private_key_path above to enable it. # - #enabled: true + enabled: %(acme_enabled)s # Endpoint to use to request certificates. If you only want to test, # use Let's Encrypt's staging url: @@ -354,17 +387,17 @@ class TlsConfig(Config): # Port number to listen on for the HTTP-01 challenge. Change this if # you are forwarding connections through Apache/Nginx/etc. # - #port: 80 + port: 80 # Local addresses to listen on for incoming connections. # Again, you may want to change this if you are forwarding connections # through Apache/Nginx/etc. # - #bind_addresses: ['::', '0.0.0.0'] + bind_addresses: ['::', '0.0.0.0'] # How many days remaining on a certificate before it is renewed. # - #reprovision_threshold: 30 + reprovision_threshold: 30 # The domain that the certificate should be for. Normally this # should be the same as your Matrix domain (i.e., 'server_name'), but, @@ -378,7 +411,7 @@ class TlsConfig(Config): # # If not set, defaults to your 'server_name'. # - #domain: matrix.example.com + domain: %(acme_domain)s # file to use for the account key. This will be generated if it doesn't # exist. diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 95e7ccb3a3..85d99a3166 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.python_dependencies import DependencyException, check_requirements + from ._base import Config, ConfigError @@ -32,6 +34,11 @@ class TracerConfig(Config): if not self.opentracer_enabled: return + try: + check_requirements("opentracing") + except DependencyException as e: + raise ConfigError(e.message) + # The tracer is enabled so sanitize the config self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", []) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 06e63a96b5..e93f0b3705 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -15,7 +15,6 @@ import logging -import idna from service_identity import VerificationError from service_identity.pyopenssl import verify_hostname, verify_ip_address from zope.interface import implementer @@ -114,14 +113,20 @@ class ClientTLSOptionsFactory(object): self._no_verify_ssl_context = self._no_verify_ssl.getContext() self._no_verify_ssl_context.set_info_callback(self._context_info_cb) - def get_options(self, host): + def get_options(self, host: bytes): + + # IPolicyForHTTPS.get_options takes bytes, but we want to compare + # against the str whitelist. The hostnames in the whitelist are already + # IDNA-encoded like the hosts will be here. + ascii_host = host.decode("ascii") + # Check if certificate verification has been enabled should_verify = self._config.federation_verify_certificates # Check if we've disabled certificate verification for this host if should_verify: for regex in self._config.federation_certificate_verification_whitelist: - if regex.match(host): + if regex.match(ascii_host): should_verify = False break @@ -162,7 +167,7 @@ class SSLClientConnectionCreator(object): Replaces twisted.internet.ssl.ClientTLSOptions """ - def __init__(self, hostname, ctx, verify_certs): + def __init__(self, hostname: bytes, ctx, verify_certs: bool): self._ctx = ctx self._verifier = ConnectionVerifier(hostname, verify_certs) @@ -190,21 +195,16 @@ class ConnectionVerifier(object): # This code is based on twisted.internet.ssl.ClientTLSOptions. - def __init__(self, hostname, verify_certs): + def __init__(self, hostname: bytes, verify_certs): self._verify_certs = verify_certs - if isIPAddress(hostname) or isIPv6Address(hostname): - self._hostnameBytes = hostname.encode("ascii") + _decoded = hostname.decode("ascii") + if isIPAddress(_decoded) or isIPv6Address(_decoded): self._is_ip_address = True else: - # twisted's ClientTLSOptions falls back to the stdlib impl here if - # idna is not installed, but points out that lacks support for - # IDNA2008 (http://bugs.python.org/issue17305). - # - # We can rely on having idna. - self._hostnameBytes = idna.encode(hostname) self._is_ip_address = False + self._hostnameBytes = hostname self._hostnameASCII = self._hostnameBytes.decode("ascii") def verify_context_info_cb(self, ssl_connection, where): diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 41eabbe717..694fb2c816 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -83,7 +83,7 @@ def compute_content_hash(event_dict, hash_algorithm): event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) - return (hashed.name, hashed.digest()) + return hashed.name, hashed.digest() def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256): @@ -106,7 +106,7 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256): event_dict.pop("unsigned", None) event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) - return (hashed.name, hashed.digest()) + return hashed.name, hashed.digest() def compute_event_signature(event_dict, signature_name, signing_key): diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6c3e885e72..7cfad192e8 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -18,7 +18,6 @@ import logging from collections import defaultdict import six -from six import raise_from from six.moves import urllib import attr @@ -30,7 +29,6 @@ from signedjson.key import ( from signedjson.sign import ( SignatureVerifyException, encode_canonical_json, - sign_json, signature_ids, verify_signed_json, ) @@ -540,13 +538,7 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - # re-sign the json with our own key, so that it is ready if we are asked to - # give it out as a notary server - signed_key_json = sign_json( - response_json, self.config.server_name, self.config.signing_key[0] - ) - - signed_key_json_bytes = encode_canonical_json(signed_key_json) + key_json_bytes = encode_canonical_json(response_json) yield make_deferred_yieldable( defer.gatherResults( @@ -558,7 +550,7 @@ class BaseV2KeyFetcher(object): from_server=from_server, ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, - key_json_bytes=signed_key_json_bytes, + key_json_bytes=key_json_bytes, ) for key_id in verify_keys ], @@ -657,9 +649,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): }, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) + # these both have str() representations which we can't really improve upon + raise KeyLookupError(str(e)) except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) + raise KeyLookupError("Remote server returned an error: %s" % (e,)) keys = {} added_keys = [] @@ -821,9 +814,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher): timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) + # these both have str() representations which we can't really improve + # upon + raise KeyLookupError(str(e)) except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) + raise KeyLookupError("Remote server returned an error: %s" % (e,)) if response["server_name"] != server_name: raise KeyLookupError( diff --git a/synapse/event_auth.py b/synapse/event_auth.py index cd52e3f867..4e91df60e6 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -637,11 +637,11 @@ def auth_types_for_event(event): if event.type == EventTypes.Create: return [] - auth_types = [] - - auth_types.append((EventTypes.PowerLevels, "")) - auth_types.append((EventTypes.Member, event.sender)) - auth_types.append((EventTypes.Create, "")) + auth_types = [ + (EventTypes.PowerLevels, ""), + (EventTypes.Member, event.sender), + (EventTypes.Create, ""), + ] if event.type == EventTypes.Member: membership = event.content["membership"] diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index bec3080895..6ee6216660 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -355,7 +355,7 @@ class FederationClient(FederationBase): auth_chain.sort(key=lambda e: e.depth) - return (pdus, auth_chain) + return pdus, auth_chain except HttpResponseException as e: if e.code == 400 or e.code == 404: logger.info("Failed to use get_room_state_ids API, falling back") @@ -404,7 +404,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - return (signed_pdus, signed_auth) + return signed_pdus, signed_auth @defer.inlineCallbacks def get_events_from_store_or_dest(self, destination, room_id, event_ids): @@ -429,7 +429,7 @@ class FederationClient(FederationBase): missing_events.discard(k) if not missing_events: - return (signed_events, failed_to_fetch) + return signed_events, failed_to_fetch logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -465,7 +465,7 @@ class FederationClient(FederationBase): # We removed all events we successfully fetched from `batch` failed_to_fetch.update(batch) - return (signed_events, failed_to_fetch) + return signed_events, failed_to_fetch @defer.inlineCallbacks @log_function diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d216c46dfe..da06ab379d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -43,6 +43,7 @@ from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name from synapse.logging.context import nested_logging_context +from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -99,7 +100,7 @@ class FederationServer(FederationBase): res = self._transaction_from_pdus(pdus).get_dict() - return (200, res) + return 200, res @defer.inlineCallbacks @log_function @@ -162,7 +163,7 @@ class FederationServer(FederationBase): yield self.transaction_actions.set_response( origin, transaction, 400, response ) - return (400, response) + return 400, response received_pdus_counter.inc(len(transaction.pdus)) @@ -264,7 +265,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response(origin, transaction, 200, response) - return (200, response) + return 200, response @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): @@ -297,7 +298,7 @@ class FederationServer(FederationBase): event_id, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_state_ids_request(self, origin, room_id, event_id): @@ -314,7 +315,7 @@ class FederationServer(FederationBase): state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids) - return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) + return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} @defer.inlineCallbacks def _on_context_state_request_compute(self, room_id, event_id): @@ -344,15 +345,15 @@ class FederationServer(FederationBase): pdu = yield self.handler.get_persisted_pdu(origin, event_id) if pdu: - return (200, self._transaction_from_pdus([pdu]).get_dict()) + return 200, self._transaction_from_pdus([pdu]).get_dict() else: - return (404, "") + return 404, "" @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_make_join_request(self, origin, room_id, user_id, supported_versions): @@ -434,7 +435,7 @@ class FederationServer(FederationBase): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) yield self.handler.on_send_leave_request(origin, pdu) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): @@ -445,7 +446,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() auth_pdus = yield self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} - return (200, res) + return 200, res @defer.inlineCallbacks def on_query_auth_request(self, origin, content, room_id, event_id): @@ -498,7 +499,7 @@ class FederationServer(FederationBase): "missing": ret.get("missing", []), } - return (200, send_content) + return 200, send_content @log_function def on_query_client_keys(self, origin, content): @@ -507,6 +508,7 @@ class FederationServer(FederationBase): def on_query_user_devices(self, origin, user_id): return self.on_query_request("user_devices", user_id) + @trace @defer.inlineCallbacks @log_function def on_claim_client_keys(self, origin, content): @@ -515,6 +517,7 @@ class FederationServer(FederationBase): for device_id, algorithm in device_keys.items(): query.append((user_id, device_id, algorithm)) + log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = yield self.store.claim_e2e_one_time_keys(query) json_result = {} @@ -666,9 +669,9 @@ class FederationServer(FederationBase): return ret @defer.inlineCallbacks - def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): + def on_exchange_third_party_invite_request(self, room_id, event_dict): ret = yield self.handler.on_exchange_third_party_invite_request( - origin, room_id, event_dict + room_id, event_dict ) return ret @@ -808,12 +811,13 @@ class FederationHandlerRegistry(object): if not handler: logger.warn("No handler registered for EDU type %s", edu_type) - try: - yield handler(origin, content) - except SynapseError as e: - logger.info("Failed to handle edu %r: %r", edu_type, e) - except Exception: - logger.exception("Failed to handle edu %r", edu_type) + with start_active_span_from_edu(content, "handle_edu"): + try: + yield handler(origin, content) + except SynapseError as e: + logger.info("Failed to handle edu %r: %r", edu_type, e) + except Exception: + logger.exception("Failed to handle edu %r", edu_type) def on_query(self, query_type, args): handler = self.query_handlers.get(query_type) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 52706302f2..5b6c79c51a 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -14,11 +14,20 @@ # limitations under the License. import logging +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import HttpResponseException from synapse.federation.persistence import TransactionActions from synapse.federation.units import Transaction +from synapse.logging.opentracing import ( + extract_text_map, + set_tag, + start_active_span_follows_from, + tags, + whitelisted_homeserver, +) from synapse.util.metrics import measure_func logger = logging.getLogger(__name__) @@ -44,93 +53,115 @@ class TransactionManager(object): @defer.inlineCallbacks def send_new_transaction(self, destination, pending_pdus, pending_edus): - # Sort based on the order field - pending_pdus.sort(key=lambda t: t[1]) - pdus = [x[0] for x in pending_pdus] - edus = pending_edus - - success = True - - logger.debug("TX [%s] _attempt_new_transaction", destination) - - txn_id = str(self._next_txn_id) - - logger.debug( - "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)", - destination, - txn_id, - len(pdus), - len(edus), - ) - - transaction = Transaction.create_new( - origin_server_ts=int(self.clock.time_msec()), - transaction_id=txn_id, - origin=self._server_name, - destination=destination, - pdus=pdus, - edus=edus, - ) - - self._next_txn_id += 1 - - logger.info( - "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", - destination, - txn_id, - transaction.transaction_id, - len(pdus), - len(edus), - ) - - # Actually send the transaction - - # FIXME (erikj): This is a bit of a hack to make the Pdu age - # keys work - def json_data_cb(): - data = transaction.get_dict() - now = int(self.clock.time_msec()) - if "pdus" in data: - for p in data["pdus"]: - if "age_ts" in p: - unsigned = p.setdefault("unsigned", {}) - unsigned["age"] = now - int(p["age_ts"]) - del p["age_ts"] - return data - - try: - response = yield self._transport_layer.send_transaction( - transaction, json_data_cb + # Make a transaction-sending opentracing span. This span follows on from + # all the edus in that transaction. This needs to be done since there is + # no active span here, so if the edus were not received by the remote the + # span would have no causality and it would be forgotten. + # The span_contexts is a generator so that it won't be evaluated if + # opentracing is disabled. (Yay speed!) + + span_contexts = [] + keep_destination = whitelisted_homeserver(destination) + + for edu in pending_edus: + context = edu.get_context() + if context: + span_contexts.append(extract_text_map(json.loads(context))) + if keep_destination: + edu.strip_context() + + with start_active_span_follows_from("send_transaction", span_contexts): + + # Sort based on the order field + pending_pdus.sort(key=lambda t: t[1]) + pdus = [x[0] for x in pending_pdus] + edus = pending_edus + + success = True + + logger.debug("TX [%s] _attempt_new_transaction", destination) + + txn_id = str(self._next_txn_id) + + logger.debug( + "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)", + destination, + txn_id, + len(pdus), + len(edus), + ) + + transaction = Transaction.create_new( + origin_server_ts=int(self.clock.time_msec()), + transaction_id=txn_id, + origin=self._server_name, + destination=destination, + pdus=pdus, + edus=edus, ) - code = 200 - except HttpResponseException as e: - code = e.code - response = e.response - if e.code in (401, 404, 429) or 500 <= e.code: - logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) - raise e + self._next_txn_id += 1 - logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) + logger.info( + "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", + destination, + txn_id, + transaction.transaction_id, + len(pdus), + len(edus), + ) - if code == 200: - for e_id, r in response.get("pdus", {}).items(): - if "error" in r: + # Actually send the transaction + + # FIXME (erikj): This is a bit of a hack to make the Pdu age + # keys work + def json_data_cb(): + data = transaction.get_dict() + now = int(self.clock.time_msec()) + if "pdus" in data: + for p in data["pdus"]: + if "age_ts" in p: + unsigned = p.setdefault("unsigned", {}) + unsigned["age"] = now - int(p["age_ts"]) + del p["age_ts"] + return data + + try: + response = yield self._transport_layer.send_transaction( + transaction, json_data_cb + ) + code = 200 + except HttpResponseException as e: + code = e.code + response = e.response + + if e.code in (401, 404, 429) or 500 <= e.code: + logger.info( + "TX [%s] {%s} got %d response", destination, txn_id, code + ) + raise e + + logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) + + if code == 200: + for e_id, r in response.get("pdus", {}).items(): + if "error" in r: + logger.warn( + "TX [%s] {%s} Remote returned error for %s: %s", + destination, + txn_id, + e_id, + r, + ) + else: + for p in pdus: logger.warn( - "TX [%s] {%s} Remote returned error for %s: %s", + "TX [%s] {%s} Failed to send event %s", destination, txn_id, - e_id, - r, + p.event_id, ) - else: - for p in pdus: - logger.warn( - "TX [%s] {%s} Failed to send event %s", - destination, - txn_id, - p.event_id, - ) - success = False + success = False - return success + set_tag(tags.ERROR, not success) + return success diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 0cea0d2a10..482a101c09 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -327,21 +327,37 @@ class TransportLayerClient(object): include_all_networks=False, third_party_instance_id=None, ): - path = _create_v1_path("/publicRooms") - - args = {"include_all_networks": "true" if include_all_networks else "false"} - if third_party_instance_id: - args["third_party_instance_id"] = (third_party_instance_id,) - if limit: - args["limit"] = [str(limit)] - if since_token: - args["since"] = [since_token] - - # TODO(erikj): Actually send the search_filter across federation. - - response = yield self.client.get_json( - destination=remote_server, path=path, args=args, ignore_backoff=True - ) + if search_filter: + # this uses MSC2197 (Search Filtering over Federation) + path = _create_v1_path("/publicRooms") + + data = {"include_all_networks": "true" if include_all_networks else "false"} + if third_party_instance_id: + data["third_party_instance_id"] = third_party_instance_id + if limit: + data["limit"] = str(limit) + if since_token: + data["since"] = since_token + + data["filter"] = search_filter + + response = yield self.client.post_json( + destination=remote_server, path=path, data=data, ignore_backoff=True + ) + else: + path = _create_v1_path("/publicRooms") + + args = {"include_all_networks": "true" if include_all_networks else "false"} + if third_party_instance_id: + args["third_party_instance_id"] = (third_party_instance_id,) + if limit: + args["limit"] = [str(limit)] + if since_token: + args["since"] = [since_token] + + response = yield self.client.get_json( + destination=remote_server, path=path, args=args, ignore_backoff=True + ) return response diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9a86bd0263..7dc696c7ae 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -22,7 +22,6 @@ import re from twisted.internet.defer import maybeDeferred import synapse -import synapse.logging.opentracing as opentracing from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import ( @@ -39,6 +38,12 @@ from synapse.http.servlet import ( parse_string_from_args, ) from synapse.logging.context import run_in_background +from synapse.logging.opentracing import ( + start_active_span, + start_active_span_from_request, + tags, + whitelisted_homeserver, +) from synapse.types import ThirdPartyInstanceID, get_domain_from_id from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string @@ -288,19 +293,28 @@ class BaseFederationServlet(object): logger.warn("authenticate_request failed: %s", e) raise - # Start an opentracing span - with opentracing.start_active_span_from_context( - request.requestHeaders, - "incoming-federation-request", - tags={ - "request_id": request.get_request_id(), - opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER, - opentracing.tags.HTTP_METHOD: request.get_method(), - opentracing.tags.HTTP_URL: request.get_redacted_uri(), - opentracing.tags.PEER_HOST_IPV6: request.getClientIP(), - "authenticated_entity": origin, - }, - ): + request_tags = { + "request_id": request.get_request_id(), + tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, + tags.HTTP_METHOD: request.get_method(), + tags.HTTP_URL: request.get_redacted_uri(), + tags.PEER_HOST_IPV6: request.getClientIP(), + "authenticated_entity": origin, + "servlet_name": request.request_metrics.name, + } + + # Only accept the span context if the origin is authenticated + # and whitelisted + if origin and whitelisted_homeserver(origin): + scope = start_active_span_from_request( + request, "incoming-federation-request", tags=request_tags + ) + else: + scope = start_active_span( + "incoming-federation-request", tags=request_tags + ) + + with scope: if origin: with ratelimiter.ratelimit(origin) as d: await d @@ -328,7 +342,11 @@ class BaseFederationServlet(object): continue server.register_paths( - method, (pattern,), self._wrap(code), self.__class__.__name__ + method, + (pattern,), + self._wrap(code), + self.__class__.__name__, + trace=False, ) @@ -557,7 +575,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet): async def on_PUT(self, origin, content, query, room_id): content = await self.handler.on_exchange_third_party_invite_request( - origin, room_id, content + room_id, content ) return 200, content @@ -756,6 +774,42 @@ class PublicRoomList(BaseFederationServlet): ) return 200, data + async def on_POST(self, origin, content, query): + # This implements MSC2197 (Search Filtering over Federation) + if not self.allow_access: + raise FederationDeniedError(origin) + + limit = int(content.get("limit", 100)) + since_token = content.get("since", None) + search_filter = content.get("filter", None) + + include_all_networks = content.get("include_all_networks", False) + third_party_instance_id = content.get("third_party_instance_id", None) + + if include_all_networks: + network_tuple = None + if third_party_instance_id is not None: + raise SynapseError( + 400, "Can't use include_all_networks with an explicit network" + ) + elif third_party_instance_id is None: + network_tuple = ThirdPartyInstanceID(None, None) + else: + network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id) + + if search_filter is None: + logger.warning("Nonefilter") + + data = await self.handler.get_local_public_room_list( + limit=limit, + since_token=since_token, + search_filter=search_filter, + network_tuple=network_tuple, + from_federation=True, + ) + + return 200, data + class FederationVersionServlet(BaseFederationServlet): PATH = "/version" diff --git a/synapse/federation/units.py b/synapse/federation/units.py index 14aad8f09d..b4d743cde7 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -38,6 +38,12 @@ class Edu(JsonEncodedObject): internal_keys = ["origin", "destination"] + def get_context(self): + return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}") + + def strip_context(self): + getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}" + class Transaction(JsonEncodedObject): """ A transaction is a list of Pdus and Edus to be sent to a remote home diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index c29c78bd65..d15c6282fb 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -45,6 +45,7 @@ class BaseHandler(object): self.state_handler = hs.get_state_handler() self.distributor = hs.get_distributor() self.ratelimiter = hs.get_ratelimiter() + self.admin_redaction_ratelimiter = hs.get_admin_redaction_ratelimiter() self.clock = hs.get_clock() self.hs = hs @@ -53,7 +54,7 @@ class BaseHandler(object): self.event_builder_factory = hs.get_event_builder_factory() @defer.inlineCallbacks - def ratelimit(self, requester, update=True): + def ratelimit(self, requester, update=True, is_admin_redaction=False): """Ratelimits requests. Args: @@ -62,6 +63,9 @@ class BaseHandler(object): Set to False when doing multiple checks for one request (e.g. to check up front if we would reject the request), and set to True for the last call for a given request. + is_admin_redaction (bool): Whether this is a room admin/moderator + redacting an event. If so then we may apply different + ratelimits depending on config. Raises: LimitExceededError if the request should be ratelimited @@ -90,16 +94,33 @@ class BaseHandler(object): messages_per_second = override.messages_per_second burst_count = override.burst_count else: - messages_per_second = self.hs.config.rc_message.per_second - burst_count = self.hs.config.rc_message.burst_count - - allowed, time_allowed = self.ratelimiter.can_do_action( - user_id, - time_now, - rate_hz=messages_per_second, - burst_count=burst_count, - update=update, - ) + # We default to different values if this is an admin redaction and + # the config is set + if is_admin_redaction and self.hs.config.rc_admin_redaction: + messages_per_second = self.hs.config.rc_admin_redaction.per_second + burst_count = self.hs.config.rc_admin_redaction.burst_count + else: + messages_per_second = self.hs.config.rc_message.per_second + burst_count = self.hs.config.rc_message.burst_count + + if is_admin_redaction and self.hs.config.rc_admin_redaction: + # If we have separate config for admin redactions we use a separate + # ratelimiter + allowed, time_allowed = self.admin_redaction_ratelimiter.can_do_action( + user_id, + time_now, + rate_hz=messages_per_second, + burst_count=burst_count, + update=update, + ) + else: + allowed, time_allowed = self.ratelimiter.can_do_action( + user_id, + time_now, + rate_hz=messages_per_second, + burst_count=burst_count, + update=update, + ) if not allowed: raise LimitExceededError( retry_after_ms=int(1000 * (time_allowed - time_now)) diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 8acd9f9a83..38bc67191c 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -51,8 +51,8 @@ class AccountDataEventSource(object): {"type": account_data_type, "content": content, "room_id": room_id} ) - return (results, current_stream_id) + return results, current_stream_id @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): - return ([], config.to_id) + return [], config.to_id diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 34574f1a12..d04e0fe576 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -38,6 +38,7 @@ logger = logging.getLogger(__name__) class AccountValidityHandler(object): def __init__(self, hs): self.hs = hs + self.config = hs.config self.store = self.hs.get_datastore() self.sendmail = self.hs.get_sendmail() self.clock = self.hs.get_clock() @@ -62,9 +63,14 @@ class AccountValidityHandler(object): self._raw_from = email.utils.parseaddr(self._from_string)[1] self._template_html, self._template_text = load_jinja2_templates( - config=self.hs.config, - template_html_name=self.hs.config.email_expiry_template_html, - template_text_name=self.hs.config.email_expiry_template_text, + self.config.email_template_dir, + [ + self.config.email_expiry_template_html, + self.config.email_expiry_template_text, + ], + apply_format_ts_filter=True, + apply_mxc_to_http_filter=True, + public_baseurl=self.config.public_baseurl, ) # Check the renewal emails to send and send them every 30min. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2f22f56ca4..1a87b58838 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -94,6 +94,25 @@ class AdminHandler(BaseHandler): return ret + def get_user_server_admin(self, user): + """ + Get the admin bit on a user. + + Args: + user_id (UserID): the (necessarily local) user to manipulate + """ + return self.store.is_server_admin(user) + + def set_user_server_admin(self, user, admin): + """ + Set the admin bit on a user. + + Args: + user_id (UserID): the (necessarily local) user to manipulate + admin (bool): whether or not the user should be an admin of this server + """ + return self.store.set_server_admin(user, admin) + @defer.inlineCallbacks def export_user_data(self, user_id, writer): """Write all data we have on the user to the given writer. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index d1a51df6f9..3e9b298154 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -294,12 +294,10 @@ class ApplicationServicesHandler(object): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. return False - return user_info = yield self.store.get_user_by_id(user_id) if user_info: return False - return # user not found; could be the AS though, so check. services = self.store.get_app_services() diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0f3ebf7ef8..374372b69e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -38,6 +38,7 @@ from synapse.api.errors import ( UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.config.emailconfig import ThreepidBehaviour from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi from synapse.types import UserID @@ -158,7 +159,7 @@ class AuthHandler(BaseHandler): return params @defer.inlineCallbacks - def check_auth(self, flows, clientdict, clientip, password_servlet=False): + def check_auth(self, flows, clientdict, clientip): """ Takes a dictionary sent by the client in the login / registration protocol and handles the User-Interactive Auth flow. @@ -182,16 +183,6 @@ class AuthHandler(BaseHandler): clientip (str): The IP address of the client. - password_servlet (bool): Whether the request originated from - PasswordRestServlet. - XXX: This is a temporary hack to distinguish between checking - for threepid validations locally (in the case of password - resets) and using the identity server (in the case of binding - a 3PID during registration). Once we start using the - homeserver for both tasks, this distinction will no longer be - necessary. - - Returns: defer.Deferred[dict, dict, str]: a deferred tuple of (creds, params, session_id). @@ -247,9 +238,7 @@ class AuthHandler(BaseHandler): if "type" in authdict: login_type = authdict["type"] try: - result = yield self._check_auth_dict( - authdict, clientip, password_servlet=password_servlet - ) + result = yield self._check_auth_dict(authdict, clientip) if result: creds[login_type] = result self._save_session(session) @@ -280,7 +269,7 @@ class AuthHandler(BaseHandler): creds, list(clientdict), ) - return (creds, clientdict, session["id"]) + return creds, clientdict, session["id"] ret = self._auth_dict_for_flows(flows, session) ret["completed"] = list(creds) @@ -356,7 +345,7 @@ class AuthHandler(BaseHandler): return sess.setdefault("serverdict", {}).get(key, default) @defer.inlineCallbacks - def _check_auth_dict(self, authdict, clientip, password_servlet=False): + def _check_auth_dict(self, authdict, clientip): """Attempt to validate the auth dict provided by a client Args: @@ -374,11 +363,7 @@ class AuthHandler(BaseHandler): login_type = authdict["type"] checker = self.checkers.get(login_type) if checker is not None: - # XXX: Temporary workaround for having Synapse handle password resets - # See AuthHandler.check_auth for further details - res = yield checker( - authdict, clientip=clientip, password_servlet=password_servlet - ) + res = yield checker(authdict, clientip=clientip) return res # build a v1-login-style dict out of the authdict and fall back to the @@ -449,7 +434,7 @@ class AuthHandler(BaseHandler): return defer.succeed(True) @defer.inlineCallbacks - def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs): + def _check_threepid(self, medium, authdict, **kwargs): if "threepid_creds" not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -458,12 +443,18 @@ class AuthHandler(BaseHandler): identity_handler = self.hs.get_handlers().identity_handler logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - if ( - not password_servlet - or self.hs.config.email_password_reset_behaviour == "remote" - ): - threepid = yield identity_handler.threepid_from_creds(threepid_creds) - elif self.hs.config.email_password_reset_behaviour == "local": + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + if medium == "email": + threepid = yield identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_email, threepid_creds + ) + elif medium == "msisdn": + threepid = yield identity_handler.threepid_from_creds( + self.hs.config.account_threepid_delegate_msisdn, threepid_creds + ) + else: + raise SynapseError(400, "Unrecognized threepid medium: %s" % (medium,)) + elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: row = yield self.store.get_threepid_validation_session( medium, threepid_creds["client_secret"], @@ -722,7 +713,7 @@ class AuthHandler(BaseHandler): known_login_type = True is_valid = yield provider.check_password(qualified_user_id, password) if is_valid: - return (qualified_user_id, None) + return qualified_user_id, None if not hasattr(provider, "get_supported_login_types") or not hasattr( provider, "check_auth" @@ -766,7 +757,7 @@ class AuthHandler(BaseHandler): ) if canonical_user_id: - return (canonical_user_id, None) + return canonical_user_id, None if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) @@ -816,7 +807,7 @@ class AuthHandler(BaseHandler): result = (result, None) return result - return (None, None) + return None, None @defer.inlineCallbacks def _check_local_password(self, user_id, password): diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5c1cf83c9d..71a8f33da3 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -25,6 +25,7 @@ from synapse.api.errors import ( HttpResponseException, RequestSendFailed, ) +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util import stringutils from synapse.util.async_helpers import Linearizer @@ -45,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler): self.state = hs.get_state_handler() self._auth_handler = hs.get_auth_handler() + @trace @defer.inlineCallbacks def get_devices_by_user(self, user_id): """ @@ -56,6 +58,7 @@ class DeviceWorkerHandler(BaseHandler): defer.Deferred: list[dict[str, X]]: info on each device """ + set_tag("user_id", user_id) device_map = yield self.store.get_devices_by_user(user_id) ips = yield self.store.get_last_client_ip_by_device(user_id, device_id=None) @@ -64,8 +67,10 @@ class DeviceWorkerHandler(BaseHandler): for device in devices: _update_device_from_client_ips(device, ips) + log_kv(device_map) return devices + @trace @defer.inlineCallbacks def get_device(self, user_id, device_id): """ Retrieve the given device @@ -85,9 +90,14 @@ class DeviceWorkerHandler(BaseHandler): raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) + + set_tag("device", device) + set_tag("ips", ips) + return device @measure_func("device.get_user_ids_changed") + @trace @defer.inlineCallbacks def get_user_ids_changed(self, user_id, from_token): """Get list of users that have had the devices updated, or have newly @@ -97,6 +107,9 @@ class DeviceWorkerHandler(BaseHandler): user_id (str) from_token (StreamToken) """ + + set_tag("user_id", user_id) + set_tag("from_token", from_token) now_room_key = yield self.store.get_room_events_max_id() room_ids = yield self.store.get_rooms_for_user(user_id) @@ -148,6 +161,9 @@ class DeviceWorkerHandler(BaseHandler): # special-case for an empty prev state: include all members # in the changed list if not event_ids: + log_kv( + {"event": "encountered empty previous state", "room_id": room_id} + ) for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: @@ -200,7 +216,11 @@ class DeviceWorkerHandler(BaseHandler): possibly_joined = [] possibly_left = [] - return {"changed": list(possibly_joined), "left": list(possibly_left)} + result = {"changed": list(possibly_joined), "left": list(possibly_left)} + + log_kv(result) + + return result class DeviceHandler(DeviceWorkerHandler): @@ -267,6 +287,7 @@ class DeviceHandler(DeviceWorkerHandler): raise errors.StoreError(500, "Couldn't generate a device ID.") + @trace @defer.inlineCallbacks def delete_device(self, user_id, device_id): """ Delete the given device @@ -284,6 +305,10 @@ class DeviceHandler(DeviceWorkerHandler): except errors.StoreError as e: if e.code == 404: # no match + set_tag("error", True) + log_kv( + {"reason": "User doesn't have device id.", "device_id": device_id} + ) pass else: raise @@ -296,6 +321,7 @@ class DeviceHandler(DeviceWorkerHandler): yield self.notify_device_update(user_id, [device_id]) + @trace @defer.inlineCallbacks def delete_all_devices_for_user(self, user_id, except_device_id=None): """Delete all of the user's devices @@ -331,6 +357,8 @@ class DeviceHandler(DeviceWorkerHandler): except errors.StoreError as e: if e.code == 404: # no match + set_tag("error", True) + set_tag("reason", "User doesn't have that device id.") pass else: raise @@ -371,6 +399,7 @@ class DeviceHandler(DeviceWorkerHandler): else: raise + @trace @measure_func("notify_device_update") @defer.inlineCallbacks def notify_device_update(self, user_id, device_ids): @@ -386,6 +415,8 @@ class DeviceHandler(DeviceWorkerHandler): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) + set_tag("target_hosts", hosts) + position = yield self.store.add_device_change_to_streams( user_id, device_ids, list(hosts) ) @@ -405,6 +436,7 @@ class DeviceHandler(DeviceWorkerHandler): ) for host in hosts: self.federation_sender.send_device_messages(host) + log_kv({"message": "sent device update to host", "host": host}) @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): @@ -451,12 +483,15 @@ class DeviceListUpdater(object): iterable=True, ) + @trace @defer.inlineCallbacks def incoming_device_list_update(self, origin, edu_content): """Called on incoming device list update from federation. Responsible for parsing the EDU and adding to pending updates list. """ + set_tag("origin", origin) + set_tag("edu_content", edu_content) user_id = edu_content.pop("user_id") device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints @@ -471,12 +506,30 @@ class DeviceListUpdater(object): device_id, origin, ) + + set_tag("error", True) + log_kv( + { + "message": "Got a device list update edu from a user and " + "device which does not match the origin of the request.", + "user_id": user_id, + "device_id": device_id, + } + ) return room_ids = yield self.store.get_rooms_for_user(user_id) if not room_ids: # We don't share any rooms with this user. Ignore update, as we # probably won't get any further updates. + set_tag("error", True) + log_kv( + { + "message": "Got an update from a user for which " + "we don't share any rooms", + "other user_id": user_id, + } + ) logger.warning( "Got device list update edu for %r/%r, but don't share a room", user_id, @@ -578,6 +631,7 @@ class DeviceListUpdater(object): request: https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid """ + log_kv({"message": "Doing resync to update device list."}) # Fetch all devices for the user. origin = get_domain_from_id(user_id) try: @@ -594,13 +648,20 @@ class DeviceListUpdater(object): # eventually become consistent. return except FederationDeniedError as e: + set_tag("error", True) + log_kv({"reason": "FederationDeniedError"}) logger.info(e) return - except Exception: + except Exception as e: # TODO: Remember that we are now out of sync and try again # later + set_tag("error", True) + log_kv( + {"message": "Exception raised by federation request", "exception": e} + ) logger.exception("Failed to handle device list update for %s", user_id) return + log_kv({"result": result}) stream_id = result["stream_id"] devices = result["devices"] diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index e1ebb6346c..0043cbea17 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,9 +15,17 @@ import logging +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.logging.opentracing import ( + get_active_span_text_map, + log_kv, + set_tag, + start_active_span, +) from synapse.types import UserID, get_domain_from_id from synapse.util.stringutils import random_string @@ -78,7 +86,8 @@ class DeviceMessageHandler(object): @defer.inlineCallbacks def send_device_message(self, sender_user_id, message_type, messages): - + set_tag("number_of_messages", len(messages)) + set_tag("sender", sender_user_id) local_messages = {} remote_messages = {} for user_id, by_device in messages.items(): @@ -100,15 +109,21 @@ class DeviceMessageHandler(object): message_id = random_string(16) + context = get_active_span_text_map() + remote_edu_contents = {} for destination, messages in remote_messages.items(): - remote_edu_contents[destination] = { - "messages": messages, - "sender": sender_user_id, - "type": message_type, - "message_id": message_id, - } + with start_active_span("to_device_for_user"): + set_tag("destination", destination) + remote_edu_contents[destination] = { + "messages": messages, + "sender": sender_user_id, + "type": message_type, + "message_id": message_id, + "org.matrix.opentracing_context": json.dumps(context), + } + log_kv({"local_messages": local_messages}) stream_id = yield self.store.add_messages_to_device_inbox( local_messages, remote_edu_contents ) @@ -117,6 +132,7 @@ class DeviceMessageHandler(object): "to_device_key", stream_id, users=local_messages.keys() ) + log_kv({"remote_messages": remote_messages}) for destination in remote_messages.keys(): # Enqueue a new federation transaction to send the new # device messages to each remote destination. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 1f90b0d278..056fb97acb 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.api.errors import CodeMessageException, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace from synapse.types import UserID, get_domain_from_id from synapse.util import unwrapFirstError from synapse.util.retryutils import NotRetryingDestination @@ -46,6 +47,7 @@ class E2eKeysHandler(object): "client_keys", self.on_federation_query_client_keys ) + @trace @defer.inlineCallbacks def query_devices(self, query_body, timeout): """ Handle a device key query from a client @@ -81,6 +83,9 @@ class E2eKeysHandler(object): else: remote_queries[user_id] = device_ids + set_tag("local_key_query", local_query) + set_tag("remote_key_query", remote_queries) + # First get local devices. failures = {} results = {} @@ -121,6 +126,7 @@ class E2eKeysHandler(object): r[user_id] = remote_queries[user_id] # Now fetch any devices that we don't have in our cache + @trace @defer.inlineCallbacks def do_remote_query(destination): """This is called when we are querying the device list of a user on @@ -185,6 +191,8 @@ class E2eKeysHandler(object): except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure + set_tag("error", True) + set_tag("reason", failure) yield make_deferred_yieldable( defer.gatherResults( @@ -198,6 +206,7 @@ class E2eKeysHandler(object): return {"device_keys": results, "failures": failures} + @trace @defer.inlineCallbacks def query_local_devices(self, query): """Get E2E device keys for local users @@ -210,6 +219,7 @@ class E2eKeysHandler(object): defer.Deferred: (resolves to dict[string, dict[string, dict]]): map from user_id -> device_id -> device details """ + set_tag("local_query", query) local_query = [] result_dict = {} @@ -217,6 +227,14 @@ class E2eKeysHandler(object): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): logger.warning("Request for keys for non-local user %s", user_id) + log_kv( + { + "message": "Requested a local key for a user which" + " was not local to the homeserver", + "user_id": user_id, + } + ) + set_tag("error", True) raise SynapseError(400, "Not a user here") if not device_ids: @@ -241,6 +259,7 @@ class E2eKeysHandler(object): r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r + log_kv(results) return result_dict @defer.inlineCallbacks @@ -251,6 +270,7 @@ class E2eKeysHandler(object): res = yield self.query_local_devices(device_keys_query) return {"device_keys": res} + @trace @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): local_query = [] @@ -265,6 +285,9 @@ class E2eKeysHandler(object): domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = device_keys + set_tag("local_key_query", local_query) + set_tag("remote_key_query", remote_queries) + results = yield self.store.claim_e2e_one_time_keys(local_query) json_result = {} @@ -276,8 +299,10 @@ class E2eKeysHandler(object): key_id: json.loads(json_bytes) } + @trace @defer.inlineCallbacks def claim_client_keys(destination): + set_tag("destination", destination) device_keys = remote_queries[destination] try: remote_result = yield self.federation.claim_client_keys( @@ -290,6 +315,8 @@ class E2eKeysHandler(object): except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure + set_tag("error", True) + set_tag("reason", failure) yield make_deferred_yieldable( defer.gatherResults( @@ -313,9 +340,11 @@ class E2eKeysHandler(object): ), ) + log_kv({"one_time_keys": json_result, "failures": failures}) return {"one_time_keys": json_result, "failures": failures} @defer.inlineCallbacks + @tag_args def upload_keys_for_user(self, user_id, device_id, keys): time_now = self.clock.time_msec() @@ -329,6 +358,13 @@ class E2eKeysHandler(object): user_id, time_now, ) + log_kv( + { + "message": "Updating device_keys for user.", + "user_id": user_id, + "device_id": device_id, + } + ) # TODO: Sign the JSON with the server key changed = yield self.store.set_e2e_device_keys( user_id, device_id, time_now, device_keys @@ -336,12 +372,24 @@ class E2eKeysHandler(object): if changed: # Only notify about device updates *if* the keys actually changed yield self.device_handler.notify_device_update(user_id, [device_id]) - + else: + log_kv({"message": "Not updating device_keys for user", "user_id": user_id}) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: + log_kv( + { + "message": "Updating one_time_keys for device.", + "user_id": user_id, + "device_id": device_id, + } + ) yield self._upload_one_time_keys_for_user( user_id, device_id, time_now, one_time_keys ) + else: + log_kv( + {"message": "Did not update one_time_keys", "reason": "no keys given"} + ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an @@ -352,6 +400,7 @@ class E2eKeysHandler(object): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) + set_tag("one_time_key_counts", result) return {"one_time_key_counts": result} @defer.inlineCallbacks @@ -395,6 +444,7 @@ class E2eKeysHandler(object): (algorithm, key_id, encode_canonical_json(key).decode("ascii")) ) + log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys}) yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 41b871fc59..a9d80f708c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) +from synapse.logging.opentracing import log_kv, trace from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) @@ -49,6 +50,7 @@ class E2eRoomKeysHandler(object): # changed. self._upload_linearizer = Linearizer("upload_room_keys_lock") + @trace @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk get the E2E room keys for a given backup, optionally filtered to a given @@ -84,8 +86,10 @@ class E2eRoomKeysHandler(object): user_id, version, room_id, session_id ) + log_kv(results) return results + @trace @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk delete the E2E room keys for a given backup, optionally filtered to a given @@ -107,6 +111,7 @@ class E2eRoomKeysHandler(object): with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + @trace @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): """Bulk upload a list of room keys into a given backup version, asserting @@ -186,7 +191,14 @@ class E2eRoomKeysHandler(object): session_id(str): the session whose room_key we're setting room_key(dict): the room_key being set """ - + log_kv( + { + "message": "Trying to upload room key", + "room_id": room_id, + "session_id": session_id, + "user_id": user_id, + } + ) # get the room_key for this particular row current_room_key = None try: @@ -195,14 +207,23 @@ class E2eRoomKeysHandler(object): ) except StoreError as e: if e.code == 404: - pass + log_kv( + { + "message": "Room key not found.", + "room_id": room_id, + "user_id": user_id, + } + ) else: raise if self._should_replace_room_key(current_room_key, room_key): + log_kv({"message": "Replacing room key."}) yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + else: + log_kv({"message": "Not replacing room_key."}) @staticmethod def _should_replace_room_key(current_room_key, room_key): @@ -236,6 +257,7 @@ class E2eRoomKeysHandler(object): return False return True + @trace @defer.inlineCallbacks def create_version(self, user_id, version_info): """Create a new backup version. This automatically becomes the new @@ -294,6 +316,7 @@ class E2eRoomKeysHandler(object): raise return res + @trace @defer.inlineCallbacks def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup @@ -314,6 +337,7 @@ class E2eRoomKeysHandler(object): else: raise + @trace @defer.inlineCallbacks def update_version(self, user_id, version, version_info): """Update the info about a given version of the user's backup diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 2f1f10a9af..5e748687e3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -167,7 +167,6 @@ class EventHandler(BaseHandler): if not event: return None - return users = yield self.store.get_users_in_room(event.room_id) is_peeking = user.to_string() not in users diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86903b98b..f72b81d419 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -326,8 +326,9 @@ class FederationHandler(BaseHandler): ours = yield self.store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id - # type: list[dict[tuple[str, str], str]] - state_maps = list(ours.values()) + state_maps = list( + ours.values() + ) # type: list[dict[tuple[str, str], str]] # we don't need this any more, let's delete it. del ours @@ -1427,7 +1428,7 @@ class FederationHandler(BaseHandler): assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - return (origin, event, format_ver) + return origin, event, format_ver @defer.inlineCallbacks @log_function @@ -2529,12 +2530,17 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function - def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): + def on_exchange_third_party_invite_request(self, room_id, event_dict): """Handle an exchange_third_party_invite request from a remote server The remote server will call this when it wants to turn a 3pid invite into a normal m.room.member invite. + Args: + room_id (str): The ID of the room. + + event_dict (dict[str, Any]): Dictionary containing the event body. + Returns: Deferred: resolves (to None) """ diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index d199521b58..512f38e5a6 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -29,6 +29,7 @@ from synapse.api.errors import ( HttpResponseException, SynapseError, ) +from synapse.util.stringutils import random_string from ._base import BaseHandler @@ -41,86 +42,130 @@ class IdentityHandler(BaseHandler): self.http_client = hs.get_simple_http_client() self.federation_http_client = hs.get_http_client() + self.hs = hs - self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers) - self.trust_any_id_server_just_for_testing_do_not_use = ( - hs.config.use_insecure_ssl_client_just_for_testing_do_not_use - ) + def _extract_items_from_creds_dict(self, creds): + """ + Retrieve entries from a "credentials" dictionary - def _should_trust_id_server(self, id_server): - if id_server not in self.trusted_id_servers: - if self.trust_any_id_server_just_for_testing_do_not_use: - logger.warn( - "Trusting untrustworthy ID server %r even though it isn't" - " in the trusted id list for testing because" - " 'use_insecure_ssl_client_just_for_testing_do_not_use'" - " is set in the config", - id_server, - ) - else: - return False - return True + Args: + creds (dict[str, str]): Dictionary of credentials that contain the following keys: + * client_secret|clientSecret: A unique secret str provided by the client + * id_server|idServer: the domain of the identity server to query + * id_access_token: The access token to authenticate to the identity + server with. + + Returns: + tuple(str, str, str|None): A tuple containing the client_secret, the id_server, + and the id_access_token value if available. + """ + client_secret = creds.get("client_secret") or creds.get("clientSecret") + if not client_secret: + raise SynapseError( + 400, "No client_secret in creds", errcode=Codes.MISSING_PARAM + ) + + id_server = creds.get("id_server") or creds.get("idServer") + if not id_server: + raise SynapseError( + 400, "No id_server in creds", errcode=Codes.MISSING_PARAM + ) + + id_access_token = creds.get("id_access_token") + return client_secret, id_server, id_access_token @defer.inlineCallbacks - def threepid_from_creds(self, creds): - if "id_server" in creds: - id_server = creds["id_server"] - elif "idServer" in creds: - id_server = creds["idServer"] - else: - raise SynapseError(400, "No id_server in creds") + def threepid_from_creds(self, id_server, creds): + """ + Retrieve and validate a threepid identifier from a "credentials" dictionary against a + given identity server - if "client_secret" in creds: - client_secret = creds["client_secret"] - elif "clientSecret" in creds: - client_secret = creds["clientSecret"] - else: - raise SynapseError(400, "No client_secret in creds") + Args: + id_server (str|None): The identity server to validate 3PIDs against. If None, + we will attempt to extract id_server creds - if not self._should_trust_id_server(id_server): - logger.warn( - "%s is not a trusted ID server: rejecting 3pid " + "credentials", - id_server, - ) - return None + creds (dict[str, str]): Dictionary containing the following keys: + * id_server|idServer: An optional domain name of an identity server + * client_secret|clientSecret: A unique secret str provided by the client + * sid: The ID of the validation session - try: - data = yield self.http_client.get_json( - "https://%s%s" - % (id_server, "/_matrix/identity/api/v1/3pid/getValidated3pid"), - {"sid": creds["sid"], "client_secret": client_secret}, + Returns: + Deferred[dict[str,str|int]|None]: A dictionary consisting of response params to + the /getValidated3pid endpoint of the Identity Service API, or None if the + threepid was not found + """ + client_secret = creds.get("client_secret") or creds.get("clientSecret") + if not client_secret: + raise SynapseError( + 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM ) - except HttpResponseException as e: - logger.info("getValidated3pid failed with Matrix error: %r", e) - raise e.to_synapse_error() + session_id = creds.get("sid") + if not session_id: + raise SynapseError( + 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM + ) + if not id_server: + # Attempt to get the id_server from the creds dict + id_server = creds.get("id_server") or creds.get("idServer") + if not id_server: + raise SynapseError( + 400, "Missing param id_server in creds", errcode=Codes.MISSING_PARAM + ) - if "medium" in data: - return data - return None + query_params = {"sid": session_id, "client_secret": client_secret} + + url = "https://%s%s" % ( + id_server, + "/_matrix/identity/api/v1/3pid/getValidated3pid", + ) + + data = yield self.http_client.get_json(url, query_params) + return data if "medium" in data else None @defer.inlineCallbacks - def bind_threepid(self, creds, mxid): + def bind_threepid(self, creds, mxid, use_v2=True): + """Bind a 3PID to an identity server + + Args: + creds (dict[str, str]): Dictionary of credentials that contain the following keys: + * client_secret|clientSecret: A unique secret str provided by the client + * id_server|idServer: the domain of the identity server to query + * id_access_token: The access token to authenticate to the identity + server with. Required if use_v2 is true + mxid (str): The MXID to bind the 3PID to + use_v2 (bool): Whether to use v2 Identity Service API endpoints + + Returns: + Deferred[dict]: The response from the identity server + """ logger.debug("binding threepid %r to %s", creds, mxid) - data = None - if "id_server" in creds: - id_server = creds["id_server"] - elif "idServer" in creds: - id_server = creds["idServer"] - else: - raise SynapseError(400, "No id_server in creds") + client_secret, id_server, id_access_token = self._extract_items_from_creds_dict( + creds + ) - if "client_secret" in creds: - client_secret = creds["client_secret"] - elif "clientSecret" in creds: - client_secret = creds["clientSecret"] + sid = creds.get("sid") + if not sid: + raise SynapseError( + 400, "No sid in three_pid_creds", errcode=Codes.MISSING_PARAM + ) + + # If an id_access_token is not supplied, force usage of v1 + if id_access_token is None: + use_v2 = False + + # Decide which API endpoint URLs to use + headers = {} + bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} + if use_v2: + bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) + headers["Authorization"] = create_id_access_token_header(id_access_token) else: - raise SynapseError(400, "No client_secret in creds") + bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) try: data = yield self.http_client.post_json_get_json( - "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"), - {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid}, + bind_url, bind_data, headers=headers ) logger.debug("bound threepid %r to %s", creds, mxid) @@ -131,13 +176,23 @@ class IdentityHandler(BaseHandler): address=data["address"], id_server=id_server, ) + + return data + except HttpResponseException as e: + if e.code != 404 or not use_v2: + logger.error("3PID bind failed with Matrix error: %r", e) + raise e.to_synapse_error() except CodeMessageException as e: data = json.loads(e.msg) # XXX WAT? - return data + return data + + logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) + return (yield self.bind_threepid(creds, mxid, use_v2=False)) @defer.inlineCallbacks def try_unbind_threepid(self, mxid, threepid): - """Removes a binding from an identity server + """Attempt to remove a 3PID from an identity server, or if one is not provided, all + identity servers we're aware the binding is present on Args: mxid (str): Matrix user ID of binding to be removed @@ -188,6 +243,8 @@ class IdentityHandler(BaseHandler): server doesn't support unbinding """ url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) + url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") + content = { "mxid": mxid, "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, @@ -199,7 +256,7 @@ class IdentityHandler(BaseHandler): auth_headers = self.federation_http_client.build_auth_headers( destination=None, method="POST", - url_bytes="/_matrix/identity/api/v1/3pid/unbind".encode("ascii"), + url_bytes=url_bytes, content=content, destination_is=id_server, ) @@ -227,27 +284,121 @@ class IdentityHandler(BaseHandler): return changed @defer.inlineCallbacks + def send_threepid_validation( + self, + email_address, + client_secret, + send_attempt, + send_email_func, + next_link=None, + ): + """Send a threepid validation email for password reset or + registration purposes + + Args: + email_address (str): The user's email address + client_secret (str): The provided client secret + send_attempt (int): Which send attempt this is + send_email_func (func): A function that takes an email address, token, + client_secret and session_id, sends an email + and returns a Deferred. + next_link (str|None): The URL to redirect the user to after validation + + Returns: + The new session_id upon success + + Raises: + SynapseError is an error occurred when sending the email + """ + # Check that this email/client_secret/send_attempt combo is new or + # greater than what we've seen previously + session = yield self.store.get_threepid_validation_session( + "email", client_secret, address=email_address, validated=False + ) + + # Check to see if a session already exists and that it is not yet + # marked as validated + if session and session.get("validated_at") is None: + session_id = session["session_id"] + last_send_attempt = session["last_send_attempt"] + + # Check that the send_attempt is higher than previous attempts + if send_attempt <= last_send_attempt: + # If not, just return a success without sending an email + return session_id + else: + # An non-validated session does not exist yet. + # Generate a session id + session_id = random_string(16) + + # Generate a new validation token + token = random_string(32) + + # Send the mail with the link containing the token, client_secret + # and session_id + try: + yield send_email_func(email_address, token, client_secret, session_id) + except Exception: + logger.exception( + "Error sending threepid validation email to %s", email_address + ) + raise SynapseError(500, "An error was encountered when sending the email") + + token_expires = ( + self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime + ) + + yield self.store.start_or_continue_validation_session( + "email", + email_address, + session_id, + client_secret, + send_attempt, + next_link, + token, + token_expires, + ) + + return session_id + + @defer.inlineCallbacks def requestEmailToken( self, id_server, email, client_secret, send_attempt, next_link=None ): - if not self._should_trust_id_server(id_server): - raise SynapseError( - 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED - ) + """ + Request an external server send an email on our behalf for the purposes of threepid + validation. + + Args: + id_server (str): The identity server to proxy to + email (str): The email to send the message to + client_secret (str): The unique client_secret sends by the user + send_attempt (int): Which attempt this is + next_link: A link to redirect the user to once they submit the token + Returns: + The json response body from the server + """ params = { "email": email, "client_secret": client_secret, "send_attempt": send_attempt, } - if next_link: - params.update({"next_link": next_link}) + params["next_link"] = next_link + + if self.hs.config.using_identity_server_from_trusted_list: + # Warn that a deprecated config option is in use + logger.warn( + 'The config option "trust_identity_server_for_password_resets" ' + 'has been replaced by "account_threepid_delegate". ' + "Please consult the sample config at docs/sample_config.yaml for " + "details and update your config file." + ) try: data = yield self.http_client.post_json_get_json( - "https://%s%s" - % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"), + id_server + "/_matrix/identity/api/v1/validate/email/requestToken", params, ) return data @@ -257,28 +408,85 @@ class IdentityHandler(BaseHandler): @defer.inlineCallbacks def requestMsisdnToken( - self, id_server, country, phone_number, client_secret, send_attempt, **kwargs + self, + id_server, + country, + phone_number, + client_secret, + send_attempt, + next_link=None, ): - if not self._should_trust_id_server(id_server): - raise SynapseError( - 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED - ) + """ + Request an external server send an SMS message on our behalf for the purposes of + threepid validation. + Args: + id_server (str): The identity server to proxy to + country (str): The country code of the phone number + phone_number (str): The number to send the message to + client_secret (str): The unique client_secret sends by the user + send_attempt (int): Which attempt this is + next_link: A link to redirect the user to once they submit the token + Returns: + The json response body from the server + """ params = { "country": country, "phone_number": phone_number, "client_secret": client_secret, "send_attempt": send_attempt, } - params.update(kwargs) + if next_link: + params["next_link"] = next_link + + if self.hs.config.using_identity_server_from_trusted_list: + # Warn that a deprecated config option is in use + logger.warn( + 'The config option "trust_identity_server_for_password_resets" ' + 'has been replaced by "account_threepid_delegate". ' + "Please consult the sample config at docs/sample_config.yaml for " + "details and update your config file." + ) try: data = yield self.http_client.post_json_get_json( - "https://%s%s" - % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"), + id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", params, ) return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() + + +def create_id_access_token_header(id_access_token): + """Create an Authorization header for passing to SimpleHttpClient as the header value + of an HTTP request. + + Args: + id_access_token (str): An identity server access token. + + Returns: + list[str]: The ascii-encoded bearer token encased in a list. + """ + # Prefix with Bearer + bearer_token = "Bearer %s" % id_access_token + + # Encode headers to standard ascii + bearer_token.encode("ascii") + + # Return as a list as that's how SimpleHttpClient takes header values + return [bearer_token] + + +class LookupAlgorithm: + """ + Supported hashing algorithms when performing a 3PID lookup. + + SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 + encoding + NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext + """ + + SHA256 = "sha256" + NONE = "none" diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 42d6650ed9..f991efeee3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -449,8 +449,7 @@ class InitialSyncHandler(BaseHandler): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) - return (member_event.membership, member_event.event_id) - return + return member_event.membership, member_event.event_id except AuthError: visibility = yield self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -459,8 +458,7 @@ class InitialSyncHandler(BaseHandler): visibility and visibility.content["history_visibility"] == "world_readable" ): - return (Membership.JOIN, None) - return + return Membership.JOIN, None raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a5e23c4caf..1f8272784e 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -24,7 +24,7 @@ from twisted.internet import defer from twisted.internet.defer import succeed from synapse import event_auth -from synapse.api.constants import EventTypes, Membership, RelationTypes +from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes from synapse.api.errors import ( AuthError, Codes, @@ -469,6 +469,9 @@ class EventCreationHandler(object): u = yield self.store.get_user_by_id(user_id) assert u is not None + if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT): + # support and bot users are not required to consent + return if u["appservice_id"] is not None: # users registered by an appservice are exempt return @@ -726,7 +729,27 @@ class EventCreationHandler(object): assert not self.config.worker_app if ratelimit: - yield self.base_handler.ratelimit(requester) + # We check if this is a room admin redacting an event so that we + # can apply different ratelimiting. We do this by simply checking + # it's not a self-redaction (to avoid having to look up whether the + # user is actually admin or not). + is_admin_redaction = False + if event.type == EventTypes.Redaction: + original_event = yield self.store.get_event( + event.redacts, + check_redacted=False, + get_prev_content=False, + allow_rejected=False, + allow_none=True, + ) + + is_admin_redaction = ( + original_event and event.sender != original_event.sender + ) + + yield self.base_handler.ratelimit( + requester, is_admin_redaction=is_admin_redaction + ) yield self.base_handler.maybe_kick_guest_users(event, context) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d83aab3f74..5744f4579d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -70,6 +70,7 @@ class PaginationHandler(object): self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() + self._server_name = hs.hostname self.pagination_lock = ReadWriteLock() self._purges_in_progress_by_room = set() @@ -153,6 +154,22 @@ class PaginationHandler(object): """ return self._purges_by_id.get(purge_id) + async def purge_room(self, room_id): + """Purge the given room from the database""" + with (await self.pagination_lock.write(room_id)): + # check we know about the room + await self.store.get_room_version(room_id) + + # first check that we have no users in this room + joined = await defer.maybeDeferred( + self.store.is_host_joined, room_id, self._server_name + ) + + if joined: + raise SynapseError(400, "Users are still joined to this room") + + await self.store.purge_room(room_id) + @defer.inlineCallbacks def get_messages( self, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 94a9ca0357..053cf66b28 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -255,7 +255,7 @@ class PresenceHandler(object): self.unpersisted_users_changes = set() if unpersisted: - logger.info("Persisting %d upersisted presence updates", len(unpersisted)) + logger.info("Persisting %d unpersisted presence updates", len(unpersisted)) yield self.store.update_presence( [self.user_to_current_state[user_id] for user_id in unpersisted] ) @@ -1032,7 +1032,7 @@ class PresenceEventSource(object): # # Hence this guard where we just return nothing so that the sync # doesn't return. C.f. #5503. - return ([], max_token) + return [], max_token presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache @@ -1279,7 +1279,7 @@ def get_interested_parties(store, states): # Always notify self users_to_states.setdefault(state.user_id, []).append(state) - return (room_ids_to_states, users_to_states) + return room_ids_to_states, users_to_states @defer.inlineCallbacks diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 2cc237e6a5..8690f69d45 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -34,7 +34,7 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -MAX_DISPLAYNAME_LEN = 100 +MAX_DISPLAYNAME_LEN = 256 MAX_AVATAR_URL_LEN = 1000 diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 73973502a4..6854c751a6 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -148,7 +148,7 @@ class ReceiptEventSource(object): to_key = yield self.get_current_key() if from_key == to_key: - return ([], to_key) + return [], to_key events = yield self.store.get_linearized_receipts_for_rooms( room_ids, from_key=from_key, to_key=to_key diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4631fab94e..06bd03b77c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -24,13 +24,11 @@ from synapse.api.errors import ( AuthError, Codes, ConsentNotGivenError, - InvalidCaptchaError, LimitExceededError, RegistrationError, SynapseError, ) from synapse.config.server import is_threepid_reserved -from synapse.http.client import CaptchaServerHttpClient from synapse.http.servlet import assert_params_in_dict from synapse.replication.http.login import RegisterDeviceReplicationServlet from synapse.replication.http.register import ( @@ -39,7 +37,6 @@ from synapse.replication.http.register import ( ) from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer -from synapse.util.threepids import check_3pid_allowed from ._base import BaseHandler @@ -59,7 +56,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.captcha_client = CaptchaServerHttpClient(hs) self.identity_handler = self.hs.get_handlers().identity_handler self.ratelimiter = hs.get_registration_ratelimiter() @@ -279,16 +275,12 @@ class RegistrationHandler(BaseHandler): fake_requester = create_requester(user_id) # try to create the room if we're the first real user on the server. Note - # that an auto-generated support user is not a real user and will never be + # that an auto-generated support or bot user is not a real user and will never be # the user to create the room should_auto_create_rooms = False - is_support = yield self.store.is_support_user(user_id) - # There is an edge case where the first user is the support user, then - # the room is never created, though this seems unlikely and - # recoverable from given the support user being involved in the first - # place. - if self.hs.config.autocreate_auto_join_rooms and not is_support: - count = yield self.store.count_all_users() + is_real_user = yield self.store.is_real_user(user_id) + if self.hs.config.autocreate_auto_join_rooms and is_real_user: + count = yield self.store.count_real_users() should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: logger.info("Auto-joining %s to %s", user_id, r) @@ -362,70 +354,6 @@ class RegistrationHandler(BaseHandler): ) return user_id - @defer.inlineCallbacks - def check_recaptcha(self, ip, private_key, challenge, response): - """ - Checks a recaptcha is correct. - - Used only by c/s api v1 - """ - - captcha_response = yield self._validate_captcha( - ip, private_key, challenge, response - ) - if not captcha_response["valid"]: - logger.info( - "Invalid captcha entered from %s. Error: %s", - ip, - captcha_response["error_url"], - ) - raise InvalidCaptchaError(error_url=captcha_response["error_url"]) - else: - logger.info("Valid captcha entered from %s", ip) - - @defer.inlineCallbacks - def register_email(self, threepidCreds): - """ - Registers emails with an identity server. - - Used only by c/s api v1 - """ - - for c in threepidCreds: - logger.info( - "validating threepidcred sid %s on id server %s", - c["sid"], - c["idServer"], - ) - try: - threepid = yield self.identity_handler.threepid_from_creds(c) - except Exception: - logger.exception("Couldn't validate 3pid") - raise RegistrationError(400, "Couldn't validate 3pid") - - if not threepid: - raise RegistrationError(400, "Couldn't validate 3pid") - logger.info( - "got threepid with medium '%s' and address '%s'", - threepid["medium"], - threepid["address"], - ) - - if not check_3pid_allowed(self.hs, threepid["medium"], threepid["address"]): - raise RegistrationError(403, "Third party identifier is not allowed") - - @defer.inlineCallbacks - def bind_emails(self, user_id, threepidCreds): - """Links emails with a user ID and informs an identity server. - - Used only by c/s api v1 - """ - - # Now we have a matrix ID, bind it to the threepids we were given - for c in threepidCreds: - # XXX: This should be a deferred list, shouldn't it? - yield self.identity_handler.bind_threepid(c, user_id) - def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None): # don't allow people to register the server notices mxid if self._server_notices_mxid is not None: @@ -464,44 +392,7 @@ class RegistrationHandler(BaseHandler): return str(id) @defer.inlineCallbacks - def _validate_captcha(self, ip_addr, private_key, challenge, response): - """Validates the captcha provided. - - Used only by c/s api v1 - - Returns: - dict: Containing 'valid'(bool) and 'error_url'(str) if invalid. - - """ - response = yield self._submit_captcha(ip_addr, private_key, challenge, response) - # parse Google's response. Lovely format.. - lines = response.split("\n") - json = { - "valid": lines[0] == "true", - "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" - + "error=%s" % lines[1], - } - return json - - @defer.inlineCallbacks - def _submit_captcha(self, ip_addr, private_key, challenge, response): - """ - Used only by c/s api v1 - """ - data = yield self.captcha_client.post_urlencoded_get_raw( - "http://www.recaptcha.net:80/recaptcha/api/verify", - args={ - "privatekey": private_key, - "remoteip": ip_addr, - "challenge": challenge, - "response": response, - }, - ) - return data - - @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): room_id = room_identifier @@ -622,7 +513,7 @@ class RegistrationHandler(BaseHandler): initial_display_name=initial_display_name, is_guest=is_guest, ) - return (r["device_id"], r["access_token"]) + return r["device_id"], r["access_token"] valid_until_ms = None if self.session_lifetime is not None: @@ -648,9 +539,7 @@ class RegistrationHandler(BaseHandler): return (device_id, access_token) @defer.inlineCallbacks - def post_registration_actions( - self, user_id, auth_result, access_token, bind_email, bind_msisdn - ): + def post_registration_actions(self, user_id, auth_result, access_token): """A user has completed registration Args: @@ -659,18 +548,10 @@ class RegistrationHandler(BaseHandler): registered user. access_token (str|None): The access token of the newly logged in device, or None if `inhibit_login` enabled. - bind_email (bool): Whether to bind the email with the identity - server. - bind_msisdn (bool): Whether to bind the msisdn with the identity - server. """ if self.hs.config.worker_app: yield self._post_registration_client( - user_id=user_id, - auth_result=auth_result, - access_token=access_token, - bind_email=bind_email, - bind_msisdn=bind_msisdn, + user_id=user_id, auth_result=auth_result, access_token=access_token ) return @@ -683,13 +564,11 @@ class RegistrationHandler(BaseHandler): ): yield self.store.upsert_monthly_active_user(user_id) - yield self._register_email_threepid( - user_id, threepid, access_token, bind_email - ) + yield self._register_email_threepid(user_id, threepid, access_token) if auth_result and LoginType.MSISDN in auth_result: threepid = auth_result[LoginType.MSISDN] - yield self._register_msisdn_threepid(user_id, threepid, bind_msisdn) + yield self._register_msisdn_threepid(user_id, threepid) if auth_result and LoginType.TERMS in auth_result: yield self._on_user_consented(user_id, self.hs.config.user_consent_version) @@ -708,14 +587,12 @@ class RegistrationHandler(BaseHandler): yield self.post_consent_actions(user_id) @defer.inlineCallbacks - def _register_email_threepid(self, user_id, threepid, token, bind_email): + def _register_email_threepid(self, user_id, threepid, token): """Add an email address as a 3pid identifier Also adds an email pusher for the email address, if configured in the HS config - Also optionally binds emails to the given user_id on the identity server - Must be called on master. Args: @@ -723,8 +600,6 @@ class RegistrationHandler(BaseHandler): threepid (object): m.login.email.identity auth response token (str|None): access_token for the user, or None if not logged in. - bind_email (bool): true if the client requested the email to be - bound at the identity server Returns: defer.Deferred: """ @@ -766,29 +641,15 @@ class RegistrationHandler(BaseHandler): data={}, ) - if bind_email: - logger.info("bind_email specified: binding") - logger.debug("Binding emails %s to %s" % (threepid, user_id)) - yield self.identity_handler.bind_threepid( - threepid["threepid_creds"], user_id - ) - else: - logger.info("bind_email not specified: not binding email") - @defer.inlineCallbacks - def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn): + def _register_msisdn_threepid(self, user_id, threepid): """Add a phone number as a 3pid identifier - Also optionally binds msisdn to the given user_id on the identity server - Must be called on master. Args: user_id (str): id of user threepid (object): m.login.msisdn auth response - token (str): access_token for the user - bind_email (bool): true if the client requested the email to be - bound at the identity server Returns: defer.Deferred: """ @@ -804,12 +665,3 @@ class RegistrationHandler(BaseHandler): yield self._auth_handler.add_threepid( user_id, threepid["medium"], threepid["address"], threepid["validated_at"] ) - - if bind_msisdn: - logger.info("bind_msisdn specified: binding") - logger.debug("Binding msisdn %s to %s", threepid, user_id) - yield self.identity_handler.bind_threepid( - threepid["threepid_creds"], user_id - ) - else: - logger.info("bind_msisdn not specified: not binding msisdn") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5caa90c3b7..970be3c846 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -560,6 +560,18 @@ class RoomCreationHandler(BaseHandler): yield self.event_creation_handler.assert_accepted_privacy_policy(requester) + power_level_content_override = config.get("power_level_content_override") + if ( + power_level_content_override + and "users" in power_level_content_override + and user_id not in power_level_content_override["users"] + ): + raise SynapseError( + 400, + "Not a valid power_level_content_override: 'users' did not contain %s" + % (user_id,), + ) + invite_3pid_list = config.get("invite_3pid", []) visibility = config.get("visibility", None) @@ -567,8 +579,8 @@ class RoomCreationHandler(BaseHandler): room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public) + directory_handler = self.hs.get_handlers().directory_handler if room_alias: - directory_handler = self.hs.get_handlers().directory_handler yield directory_handler.create_association( requester=requester, room_id=room_id, @@ -604,7 +616,7 @@ class RoomCreationHandler(BaseHandler): initial_state=initial_state, creation_content=creation_content, room_alias=room_alias, - power_level_content_override=config.get("power_level_content_override"), + power_level_content_override=power_level_content_override, creator_join_profile=creator_join_profile, ) @@ -653,6 +665,7 @@ class RoomCreationHandler(BaseHandler): for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] + id_access_token = invite_3pid.get("id_access_token") # optional address = invite_3pid["address"] medium = invite_3pid["medium"] yield self.hs.get_room_member_handler().do_3pid_invite( @@ -663,6 +676,7 @@ class RoomCreationHandler(BaseHandler): id_server, requester, txn_id=None, + id_access_token=id_access_token, ) result = {"room_id": room_id} @@ -840,7 +854,6 @@ class RoomContextHandler(object): ) if not event: return None - return filtered = yield (filter_evts([event])) if not filtered: diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index e9094ad02b..a7e55f00e5 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64, encode_base64 from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules +from synapse.api.errors import Codes, HttpResponseException from synapse.types import ThirdPartyInstanceID from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -485,7 +486,33 @@ class RoomListHandler(BaseHandler): return {"chunk": [], "total_room_count_estimate": 0} if search_filter: - # We currently don't support searching across federation, so we have + # Searching across federation is defined in MSC2197. + # However, the remote homeserver may or may not actually support it. + # So we first try an MSC2197 remote-filtered search, then fall back + # to a locally-filtered search if we must. + + try: + res = yield self._get_remote_list_cached( + server_name, + limit=limit, + since_token=since_token, + include_all_networks=include_all_networks, + third_party_instance_id=third_party_instance_id, + search_filter=search_filter, + ) + return res + except HttpResponseException as hre: + syn_err = hre.to_synapse_error() + if hre.code in (404, 405) or syn_err.errcode in ( + Codes.UNRECOGNIZED, + Codes.NOT_FOUND, + ): + logger.debug("Falling back to locally-filtered /publicRooms") + else: + raise # Not an error that should trigger a fallback. + + # if we reach this point, then we fall back to the situation where + # we currently don't support searching across federation, so we have # to do it manually without pagination limit = None since_token = None diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 249a6d9c5d..35450feb6f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -29,9 +29,11 @@ from twisted.internet import defer from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError +from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.hash import sha256_and_url_safe_base64 from ._base import BaseHandler @@ -100,7 +102,7 @@ class RoomMemberHandler(object): raise NotImplementedError() @abc.abstractmethod - def _remote_reject_invite(self, remote_room_hosts, room_id, target): + def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Attempt to reject an invite for a room this server is not in. If we fail to do so we locally mark the invite as rejected. @@ -510,9 +512,7 @@ class RoomMemberHandler(object): return res @defer.inlineCallbacks - def send_membership_event( - self, requester, event, context, remote_room_hosts=None, ratelimit=True - ): + def send_membership_event(self, requester, event, context, ratelimit=True): """ Change the membership status of a user in a room. @@ -522,16 +522,10 @@ class RoomMemberHandler(object): act as the sender, will be skipped. event (SynapseEvent): The membership event. context: The context of the event. - is_guest (bool): Whether the sender is a guest. - room_hosts ([str]): Homeservers which are likely to already be in - the room, and could be danced with in order to join this - homeserver for the first time. ratelimit (bool): Whether to rate limit this request. Raises: SynapseError if there was a problem changing the membership. """ - remote_room_hosts = remote_room_hosts or [] - target_user = UserID.from_string(event.state_key) room_id = event.room_id @@ -634,7 +628,7 @@ class RoomMemberHandler(object): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - return (RoomID.from_string(room_id), servers) + return RoomID.from_string(room_id), servers @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -646,7 +640,15 @@ class RoomMemberHandler(object): @defer.inlineCallbacks def do_3pid_invite( - self, room_id, inviter, medium, address, id_server, requester, txn_id + self, + room_id, + inviter, + medium, + address, + id_server, + requester, + txn_id, + id_access_token=None, ): if self.config.block_non_admin_invites: is_requester_admin = yield self.auth.is_server_admin(requester.user) @@ -669,7 +671,12 @@ class RoomMemberHandler(object): Codes.FORBIDDEN, ) - invitee = yield self._lookup_3pid(id_server, medium, address) + if not self._enable_lookup: + raise SynapseError( + 403, "Looking up third-party identifiers is denied from this server" + ) + + invitee = yield self._lookup_3pid(id_server, medium, address, id_access_token) if invitee: yield self.update_membership( @@ -677,11 +684,18 @@ class RoomMemberHandler(object): ) else: yield self._make_and_store_3pid_invite( - requester, id_server, medium, address, room_id, inviter, txn_id=txn_id + requester, + id_server, + medium, + address, + room_id, + inviter, + txn_id=txn_id, + id_access_token=id_access_token, ) @defer.inlineCallbacks - def _lookup_3pid(self, id_server, medium, address): + def _lookup_3pid(self, id_server, medium, address, id_access_token=None): """Looks up a 3pid in the passed identity server. Args: @@ -689,14 +703,48 @@ class RoomMemberHandler(object): of the identity server to use. medium (str): The type of the third party identifier (e.g. "email"). address (str): The third party identifier (e.g. "foo@example.com"). + id_access_token (str|None): The access token to authenticate to the identity + server with + + Returns: + str|None: the matrix ID of the 3pid, or None if it is not recognized. + """ + if id_access_token is not None: + try: + results = yield self._lookup_3pid_v2( + id_server, id_access_token, medium, address + ) + return results + + except Exception as e: + # Catch HttpResponseExcept for a non-200 response code + # Check if this identity server does not know about v2 lookups + if isinstance(e, HttpResponseException) and e.code == 404: + # This is an old identity server that does not yet support v2 lookups + logger.warning( + "Attempted v2 lookup on v1 identity server %s. Falling " + "back to v1", + id_server, + ) + else: + logger.warning("Error when looking up hashing details: %s", e) + return None + + return (yield self._lookup_3pid_v1(id_server, medium, address)) + + @defer.inlineCallbacks + def _lookup_3pid_v1(self, id_server, medium, address): + """Looks up a 3pid in the passed identity server using v1 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). Returns: str: the matrix ID of the 3pid, or None if it is not recognized. """ - if not self._enable_lookup: - raise SynapseError( - 403, "Looking up third-party identifiers is denied from this server" - ) try: data = yield self.simple_http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), @@ -710,9 +758,116 @@ class RoomMemberHandler(object): return data["mxid"] except IOError as e: - logger.warn("Error from identity server lookup: %s" % (e,)) + logger.warning("Error from v1 identity server lookup: %s" % (e,)) + + return None + + @defer.inlineCallbacks + def _lookup_3pid_v2(self, id_server, id_access_token, medium, address): + """Looks up a 3pid in the passed identity server using v2 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + id_access_token (str): The access token to authenticate to the identity server with + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + + Returns: + Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. + """ + # Check what hashing details are supported by this identity server + hash_details = yield self.simple_http_client.get_json( + "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), + {"access_token": id_access_token}, + ) + + if not isinstance(hash_details, dict): + logger.warning( + "Got non-dict object when checking hash details of %s%s: %s", + id_server_scheme, + id_server, + hash_details, + ) + raise SynapseError( + 400, + "Non-dict object from %s%s during v2 hash_details request: %s" + % (id_server_scheme, id_server, hash_details), + ) + + # Extract information from hash_details + supported_lookup_algorithms = hash_details.get("algorithms") + lookup_pepper = hash_details.get("lookup_pepper") + if ( + not supported_lookup_algorithms + or not isinstance(supported_lookup_algorithms, list) + or not lookup_pepper + or not isinstance(lookup_pepper, str) + ): + raise SynapseError( + 400, + "Invalid hash details received from identity server %s%s: %s" + % (id_server_scheme, id_server, hash_details), + ) + + # Check if any of the supported lookup algorithms are present + if LookupAlgorithm.SHA256 in supported_lookup_algorithms: + # Perform a hashed lookup + lookup_algorithm = LookupAlgorithm.SHA256 + + # Hash address, medium and the pepper with sha256 + to_hash = "%s %s %s" % (address, medium, lookup_pepper) + lookup_value = sha256_and_url_safe_base64(to_hash) + + elif LookupAlgorithm.NONE in supported_lookup_algorithms: + # Perform a non-hashed lookup + lookup_algorithm = LookupAlgorithm.NONE + + # Combine together plaintext address and medium + lookup_value = "%s %s" % (address, medium) + + else: + logger.warning( + "None of the provided lookup algorithms of %s are supported: %s", + id_server, + supported_lookup_algorithms, + ) + raise SynapseError( + 400, + "Provided identity server does not support any v2 lookup " + "algorithms that this homeserver supports.", + ) + + # Authenticate with identity server given the access token from the client + headers = {"Authorization": create_id_access_token_header(id_access_token)} + + try: + lookup_results = yield self.simple_http_client.post_json_get_json( + "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), + { + "addresses": [lookup_value], + "algorithm": lookup_algorithm, + "pepper": lookup_pepper, + }, + headers=headers, + ) + except Exception as e: + logger.warning("Error when performing a v2 3pid lookup: %s", e) + raise SynapseError( + 500, "Unknown error occurred during identity server lookup" + ) + + # Check for a mapping from what we looked up to an MXID + if "mappings" not in lookup_results or not isinstance( + lookup_results["mappings"], dict + ): + logger.warning("No results from 3pid lookup") return None + # Return the MXID if it's available, or None otherwise + mxid = lookup_results["mappings"].get(lookup_value) + return mxid + @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): if server_hostname not in data["signatures"]: @@ -737,7 +892,15 @@ class RoomMemberHandler(object): @defer.inlineCallbacks def _make_and_store_3pid_invite( - self, requester, id_server, medium, address, room_id, user, txn_id + self, + requester, + id_server, + medium, + address, + room_id, + user, + txn_id, + id_access_token=None, ): room_state = yield self.state_handler.get_current_state(room_id) @@ -786,6 +949,7 @@ class RoomMemberHandler(object): room_name=room_name, inviter_display_name=inviter_display_name, inviter_avatar_url=inviter_avatar_url, + id_access_token=id_access_token, ) ) @@ -823,6 +987,7 @@ class RoomMemberHandler(object): room_name, inviter_display_name, inviter_avatar_url, + id_access_token=None, ): """ Asks an identity server for a third party invite. @@ -842,6 +1007,8 @@ class RoomMemberHandler(object): inviter_display_name (str): The current display name of the inviter. inviter_avatar_url (str): The URL of the inviter's avatar. + id_access_token (str|None): The access token to authenticate to the identity + server with Returns: A deferred tuple containing: @@ -852,12 +1019,6 @@ class RoomMemberHandler(object): display_name (str): A user-friendly name to represent the invited user. """ - - is_url = "%s%s/_matrix/identity/api/v1/store-invite" % ( - id_server_scheme, - id_server, - ) - invite_config = { "medium": medium, "address": address, @@ -871,22 +1032,66 @@ class RoomMemberHandler(object): "sender_avatar_url": inviter_avatar_url, } - try: - data = yield self.simple_http_client.post_json_get_json( - is_url, invite_config - ) - except HttpResponseException as e: - # Some identity servers may only support application/x-www-form-urlencoded - # types. This is especially true with old instances of Sydent, see - # https://github.com/matrix-org/sydent/pull/170 - logger.info( - "Failed to POST %s with JSON, falling back to urlencoded form: %s", - is_url, - e, + # Add the identity service access token to the JSON body and use the v2 + # Identity Service endpoints if id_access_token is present + data = None + base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) + + if id_access_token: + key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( + id_server_scheme, + id_server, ) - data = yield self.simple_http_client.post_urlencoded_get_json( - is_url, invite_config + + # Attempt a v2 lookup + url = base_url + "/v2/store-invite" + try: + data = yield self.simple_http_client.post_json_get_json( + url, + invite_config, + {"Authorization": create_id_access_token_header(id_access_token)}, + ) + except HttpResponseException as e: + if e.code != 404: + logger.info("Failed to POST %s with JSON: %s", url, e) + raise e + + if data is None: + key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( + id_server_scheme, + id_server, ) + url = base_url + "/api/v1/store-invite" + + try: + data = yield self.simple_http_client.post_json_get_json( + url, invite_config + ) + except HttpResponseException as e: + logger.warning( + "Error trying to call /store-invite on %s%s: %s", + id_server_scheme, + id_server, + e, + ) + + if data is None: + # Some identity servers may only support application/x-www-form-urlencoded + # types. This is especially true with old instances of Sydent, see + # https://github.com/matrix-org/sydent/pull/170 + try: + data = yield self.simple_http_client.post_urlencoded_get_json( + url, invite_config + ) + except HttpResponseException as e: + logger.warning( + "Error calling /store-invite on %s%s with fallback " + "encoding: %s", + id_server_scheme, + id_server, + e, + ) + raise e # TODO: Check for success token = data["token"] @@ -894,8 +1099,7 @@ class RoomMemberHandler(object): if "public_key" in data: fallback_public_key = { "public_key": data["public_key"], - "key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" - % (id_server_scheme, id_server), + "key_validity_url": key_validity_url, } else: fallback_public_key = public_keys[0] @@ -903,7 +1107,7 @@ class RoomMemberHandler(object): if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] - return (token, public_keys, fallback_public_key, display_name) + return token, public_keys, fallback_public_key, display_name @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): @@ -962,9 +1166,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) if complexity: - if complexity["v1"] > max_complexity: - return True - return False + return complexity["v1"] > max_complexity return None @defer.inlineCallbacks @@ -980,10 +1182,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): max_complexity = self.hs.config.limit_remote_rooms.complexity complexity = yield self.store.get_room_complexity(room_id) - if complexity["v1"] > max_complexity: - return True - - return False + return complexity["v1"] > max_complexity @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): @@ -1062,7 +1261,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): # The 'except' clause is very broad, but we need to # capture everything from DNS failures upwards # - logger.warn("Failed to reject invite: %s", e) + logger.warning("Failed to reject invite: %s", e) yield self.store.locally_reject_invite(target.to_string(), room_id) return {} diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 4449da6669..cbac7c347a 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -14,15 +14,14 @@ # limitations under the License. import logging +from collections import Counter from twisted.internet import defer -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import EventTypes, Membership from synapse.handlers.state_deltas import StateDeltasHandler from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import UserID -from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -62,11 +61,10 @@ class StatsHandler(StateDeltasHandler): def notify_new_event(self): """Called when there may be more deltas to process """ - if not self.hs.config.stats_enabled: + if not self.hs.config.stats_enabled or self._is_processing: return - if self._is_processing: - return + self._is_processing = True @defer.inlineCallbacks def process(): @@ -75,39 +73,78 @@ class StatsHandler(StateDeltasHandler): finally: self._is_processing = False - self._is_processing = True run_as_background_process("stats.notify_new_event", process) @defer.inlineCallbacks def _unsafe_process(self): # If self.pos is None then means we haven't fetched it from DB if self.pos is None: - self.pos = yield self.store.get_stats_stream_pos() - - # If still None then the initial background update hasn't happened yet - if self.pos is None: - return None + self.pos = yield self.store.get_stats_positions() # Loop round handling deltas until we're up to date + while True: - with Measure(self.clock, "stats_delta"): - deltas = yield self.store.get_current_state_deltas(self.pos) - if not deltas: - return + # Be sure to read the max stream_ordering *before* checking if there are any outstanding + # deltas, since there is otherwise a chance that we could miss updates which arrive + # after we check the deltas. + room_max_stream_ordering = yield self.store.get_room_max_stream_ordering() + if self.pos == room_max_stream_ordering: + break + + deltas = yield self.store.get_current_state_deltas(self.pos) + + if deltas: + logger.debug("Handling %d state deltas", len(deltas)) + room_deltas, user_deltas = yield self._handle_deltas(deltas) + + max_pos = deltas[-1]["stream_id"] + else: + room_deltas = {} + user_deltas = {} + max_pos = room_max_stream_ordering + + # Then count deltas for total_events and total_event_bytes. + room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes( + self.pos, max_pos + ) - logger.info("Handling %d state deltas", len(deltas)) - yield self._handle_deltas(deltas) + for room_id, fields in room_count.items(): + room_deltas.setdefault(room_id, {}).update(fields) - self.pos = deltas[-1]["stream_id"] - yield self.store.update_stats_stream_pos(self.pos) + for user_id, fields in user_count.items(): + user_deltas.setdefault(user_id, {}).update(fields) - event_processing_positions.labels("stats").set(self.pos) + logger.debug("room_deltas: %s", room_deltas) + logger.debug("user_deltas: %s", user_deltas) + + # Always call this so that we update the stats position. + yield self.store.bulk_update_stats_delta( + self.clock.time_msec(), + updates={"room": room_deltas, "user": user_deltas}, + stream_id=max_pos, + ) + + logger.debug("Handled room stats to %s -> %s", self.pos, max_pos) + + event_processing_positions.labels("stats").set(max_pos) + + self.pos = max_pos @defer.inlineCallbacks def _handle_deltas(self, deltas): + """Called with the state deltas to process + + Returns: + Deferred[tuple[dict[str, Counter], dict[str, counter]]] + Resovles to two dicts, the room deltas and the user deltas, + mapping from room/user ID to changes in the various fields. """ - Called with the state deltas to process - """ + + room_to_stats_deltas = {} + user_to_stats_deltas = {} + + room_to_state_updates = {} + for delta in deltas: typ = delta["type"] state_key = delta["state_key"] @@ -115,11 +152,10 @@ class StatsHandler(StateDeltasHandler): event_id = delta["event_id"] stream_id = delta["stream_id"] prev_event_id = delta["prev_event_id"] - stream_pos = delta["stream_id"] - logger.debug("Handling: %r %r, %s", typ, state_key, event_id) + logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id) - token = yield self.store.get_earliest_token_for_room_stats(room_id) + token = yield self.store.get_earliest_token_for_stats("room", room_id) # If the earliest token to begin from is larger than our current # stream ID, skip processing this delta. @@ -131,203 +167,132 @@ class StatsHandler(StateDeltasHandler): continue if event_id is None and prev_event_id is None: - # Errr... + logger.error( + "event ID is None and so is the previous event ID. stream_id: %s", + stream_id, + ) continue event_content = {} + sender = None if event_id is not None: event = yield self.store.get_event(event_id, allow_none=True) if event: event_content = event.content or {} + sender = event.sender + + # All the values in this dict are deltas (RELATIVE changes) + room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter()) - # We use stream_pos here rather than fetch by event_id as event_id - # may be None - now = yield self.store.get_received_ts_by_stream_pos(stream_pos) + room_state = room_to_state_updates.setdefault(room_id, {}) - # quantise time to the nearest bucket - now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size + if prev_event_id is None: + # this state event doesn't overwrite another, + # so it is a new effective/current state event + room_stats_delta["current_state_events"] += 1 if typ == EventTypes.Member: # we could use _get_key_change here but it's a bit inefficient # given we're not testing for a specific result; might as well # just grab the prev_membership and membership strings and # compare them. - prev_event_content = {} + # We take None rather than leave as a previous membership + # in the absence of a previous event because we do not want to + # reduce the leave count when a new-to-the-room user joins. + prev_membership = None if prev_event_id is not None: prev_event = yield self.store.get_event( prev_event_id, allow_none=True ) if prev_event: prev_event_content = prev_event.content + prev_membership = prev_event_content.get( + "membership", Membership.LEAVE + ) membership = event_content.get("membership", Membership.LEAVE) - prev_membership = prev_event_content.get("membership", Membership.LEAVE) - - if prev_membership == membership: - continue - if prev_membership == Membership.JOIN: - yield self.store.update_stats_delta( - now, "room", room_id, "joined_members", -1 - ) + if prev_membership is None: + logger.debug("No previous membership for this user.") + elif membership == prev_membership: + pass # noop + elif prev_membership == Membership.JOIN: + room_stats_delta["joined_members"] -= 1 elif prev_membership == Membership.INVITE: - yield self.store.update_stats_delta( - now, "room", room_id, "invited_members", -1 - ) + room_stats_delta["invited_members"] -= 1 elif prev_membership == Membership.LEAVE: - yield self.store.update_stats_delta( - now, "room", room_id, "left_members", -1 - ) + room_stats_delta["left_members"] -= 1 elif prev_membership == Membership.BAN: - yield self.store.update_stats_delta( - now, "room", room_id, "banned_members", -1 - ) + room_stats_delta["banned_members"] -= 1 else: - err = "%s is not a valid prev_membership" % (repr(prev_membership),) - logger.error(err) - raise ValueError(err) + raise ValueError( + "%r is not a valid prev_membership" % (prev_membership,) + ) + if membership == prev_membership: + pass # noop if membership == Membership.JOIN: - yield self.store.update_stats_delta( - now, "room", room_id, "joined_members", +1 - ) + room_stats_delta["joined_members"] += 1 elif membership == Membership.INVITE: - yield self.store.update_stats_delta( - now, "room", room_id, "invited_members", +1 - ) + room_stats_delta["invited_members"] += 1 + + if sender and self.is_mine_id(sender): + user_to_stats_deltas.setdefault(sender, Counter())[ + "invites_sent" + ] += 1 + elif membership == Membership.LEAVE: - yield self.store.update_stats_delta( - now, "room", room_id, "left_members", +1 - ) + room_stats_delta["left_members"] += 1 elif membership == Membership.BAN: - yield self.store.update_stats_delta( - now, "room", room_id, "banned_members", +1 - ) + room_stats_delta["banned_members"] += 1 else: - err = "%s is not a valid membership" % (repr(membership),) - logger.error(err) - raise ValueError(err) + raise ValueError("%r is not a valid membership" % (membership,)) user_id = state_key if self.is_mine_id(user_id): - # update user_stats as it's one of our users - public = yield self._is_public_room(room_id) - - if membership == Membership.LEAVE: - yield self.store.update_stats_delta( - now, - "user", - user_id, - "public_rooms" if public else "private_rooms", - -1, - ) - elif membership == Membership.JOIN: - yield self.store.update_stats_delta( - now, - "user", - user_id, - "public_rooms" if public else "private_rooms", - +1, - ) + # this accounts for transitions like leave → ban and so on. + has_changed_joinedness = (prev_membership == Membership.JOIN) != ( + membership == Membership.JOIN + ) - elif typ == EventTypes.Create: - # Newly created room. Add it with all blank portions. - yield self.store.update_room_state( - room_id, - { - "join_rules": None, - "history_visibility": None, - "encryption": None, - "name": None, - "topic": None, - "avatar": None, - "canonical_alias": None, - }, - ) + if has_changed_joinedness: + delta = +1 if membership == Membership.JOIN else -1 - elif typ == EventTypes.JoinRules: - yield self.store.update_room_state( - room_id, {"join_rules": event_content.get("join_rule")} - ) + user_to_stats_deltas.setdefault(user_id, Counter())[ + "joined_rooms" + ] += delta - is_public = yield self._get_key_change( - prev_event_id, event_id, "join_rule", JoinRules.PUBLIC - ) - if is_public is not None: - yield self.update_public_room_stats(now, room_id, is_public) + room_stats_delta["local_users_in_room"] += delta - elif typ == EventTypes.RoomHistoryVisibility: - yield self.store.update_room_state( - room_id, - {"history_visibility": event_content.get("history_visibility")}, + elif typ == EventTypes.Create: + room_state["is_federatable"] = ( + event_content.get("m.federate", True) is True ) - - is_public = yield self._get_key_change( - prev_event_id, event_id, "history_visibility", "world_readable" + if sender and self.is_mine_id(sender): + user_to_stats_deltas.setdefault(sender, Counter())[ + "rooms_created" + ] += 1 + elif typ == EventTypes.JoinRules: + room_state["join_rules"] = event_content.get("join_rule") + elif typ == EventTypes.RoomHistoryVisibility: + room_state["history_visibility"] = event_content.get( + "history_visibility" ) - if is_public is not None: - yield self.update_public_room_stats(now, room_id, is_public) - elif typ == EventTypes.Encryption: - yield self.store.update_room_state( - room_id, {"encryption": event_content.get("algorithm")} - ) + room_state["encryption"] = event_content.get("algorithm") elif typ == EventTypes.Name: - yield self.store.update_room_state( - room_id, {"name": event_content.get("name")} - ) + room_state["name"] = event_content.get("name") elif typ == EventTypes.Topic: - yield self.store.update_room_state( - room_id, {"topic": event_content.get("topic")} - ) + room_state["topic"] = event_content.get("topic") elif typ == EventTypes.RoomAvatar: - yield self.store.update_room_state( - room_id, {"avatar": event_content.get("url")} - ) + room_state["avatar"] = event_content.get("url") elif typ == EventTypes.CanonicalAlias: - yield self.store.update_room_state( - room_id, {"canonical_alias": event_content.get("alias")} - ) - - @defer.inlineCallbacks - def update_public_room_stats(self, ts, room_id, is_public): - """ - Increment/decrement a user's number of public rooms when a room they are - in changes to/from public visibility. + room_state["canonical_alias"] = event_content.get("alias") + elif typ == EventTypes.GuestAccess: + room_state["guest_access"] = event_content.get("guest_access") - Args: - ts (int): Timestamp in seconds - room_id (str) - is_public (bool) - """ - # For now, blindly iterate over all local users in the room so that - # we can handle the whole problem of copying buckets over as needed - user_ids = yield self.store.get_users_in_room(room_id) - - for user_id in user_ids: - if self.hs.is_mine(UserID.from_string(user_id)): - yield self.store.update_stats_delta( - ts, "user", user_id, "public_rooms", +1 if is_public else -1 - ) - yield self.store.update_stats_delta( - ts, "user", user_id, "private_rooms", -1 if is_public else +1 - ) + for room_id, state in room_to_state_updates.items(): + yield self.store.update_room_state(room_id, state) - @defer.inlineCallbacks - def _is_public_room(self, room_id): - join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules) - history_visibility = yield self.state.get_current_state( - room_id, EventTypes.RoomHistoryVisibility - ) - - if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or ( - ( - history_visibility - and history_visibility.content.get("history_visibility") - == "world_readable" - ) - ): - return True - else: - return False + return room_to_stats_deltas, user_to_stats_deltas diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 98da2318a0..19bca6717f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -378,7 +378,7 @@ class SyncHandler(object): event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - return (now_token, ephemeral_by_room) + return now_token, ephemeral_by_room @defer.inlineCallbacks def _load_filtered_recents( @@ -578,7 +578,6 @@ class SyncHandler(object): if not last_events: return None - return last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( @@ -786,9 +785,8 @@ class SyncHandler(object): batch.events[0].event_id, state_filter=state_filter ) else: - # Its not clear how we get here, but empirically we do - # (#5407). Logging has been added elsewhere to try and - # figure out where this state comes from. + # We can get here if the user has ignored the senders of all + # the recent events. state_at_timeline_start = yield self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) @@ -1333,7 +1331,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - return ([], [], [], []) + return [], [], [], [] ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1643,7 +1641,7 @@ class SyncHandler(object): ) room_entries.append(entry) - return (room_entries, invited, newly_joined_rooms, newly_left_rooms) + return room_entries, invited, newly_joined_rooms, newly_left_rooms @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): @@ -1717,7 +1715,7 @@ class SyncHandler(object): ) ) - return (room_entries, invited, []) + return room_entries, invited, [] @defer.inlineCallbacks def _generate_room_entry( @@ -1771,20 +1769,9 @@ class SyncHandler(object): newly_joined_room=newly_joined, ) - if not batch and batch.limited: - # This resulted in #5407, which is weird, so lets log! We do it - # here as we have the maximum amount of information. - user_id = sync_result_builder.sync_config.user.to_string() - logger.info( - "Issue #5407: Found limited batch with no events. user %s, room %s," - " sync_config %s, newly_joined %s, events %s, batch %s.", - user_id, - room_id, - sync_config, - newly_joined, - events, - batch, - ) + # Note: `batch` can be both empty and limited here in the case where + # `_load_filtered_recents` can't find any events the user should see + # (e.g. due to having ignored the sender of the last 50 events). if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index f882330293..ca8ae9fb5b 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -319,4 +319,4 @@ class TypingNotificationEventSource(object): return self.get_typing_handler()._latest_room_serial def get_pagination_rows(self, user, pagination_config, key): - return ([], pagination_config.from_key) + return [], pagination_config.from_key diff --git a/synapse/http/client.py b/synapse/http/client.py index 0ac20ebefc..51765ae3c0 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -35,7 +35,7 @@ from twisted.internet.interfaces import ( ) from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody +from twisted.web.client import Agent, HTTPConnectionPool, readBody from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers @@ -46,6 +46,7 @@ from synapse.http import ( redact_uri, ) from synapse.logging.context import make_deferred_yieldable +from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR @@ -269,42 +270,56 @@ class SimpleHttpClient(object): # log request but strip `access_token` (AS requests for example include this) logger.info("Sending request %s %s", method, redact_uri(uri)) - try: - body_producer = None - if data is not None: - body_producer = QuieterFileBodyProducer(BytesIO(data)) - - request_deferred = treq.request( - method, - uri, - agent=self.agent, - data=body_producer, - headers=headers, - **self._extra_treq_args - ) - request_deferred = timeout_deferred( - request_deferred, - 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, - ) - response = yield make_deferred_yieldable(request_deferred) - - incoming_responses_counter.labels(method, response.code).inc() - logger.info( - "Received response to %s %s: %s", method, redact_uri(uri), response.code - ) - return response - except Exception as e: - incoming_responses_counter.labels(method, "ERR").inc() - logger.info( - "Error sending request to %s %s: %s %s", - method, - redact_uri(uri), - type(e).__name__, - e.args[0], - ) - raise + with start_active_span( + "outgoing-client-request", + tags={ + tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, + tags.HTTP_METHOD: method, + tags.HTTP_URL: uri, + }, + finish_on_close=True, + ): + try: + body_producer = None + if data is not None: + body_producer = QuieterFileBodyProducer(BytesIO(data)) + + request_deferred = treq.request( + method, + uri, + agent=self.agent, + data=body_producer, + headers=headers, + **self._extra_treq_args + ) + request_deferred = timeout_deferred( + request_deferred, + 60, + self.hs.get_reactor(), + cancelled_to_request_timed_out_error, + ) + response = yield make_deferred_yieldable(request_deferred) + + incoming_responses_counter.labels(method, response.code).inc() + logger.info( + "Received response to %s %s: %s", + method, + redact_uri(uri), + response.code, + ) + return response + except Exception as e: + incoming_responses_counter.labels(method, "ERR").inc() + logger.info( + "Error sending request to %s %s: %s %s", + method, + redact_uri(uri), + type(e).__name__, + e.args[0], + ) + set_tag(tags.ERROR, True) + set_tag("error_reason", e.args[0]) + raise @defer.inlineCallbacks def post_urlencoded_get_json(self, uri, args={}, headers=None): @@ -599,38 +614,6 @@ def _readBodyToFile(response, stream, max_size): return d -class CaptchaServerHttpClient(SimpleHttpClient): - """ - Separate HTTP client for talking to google's captcha servers - Only slightly special because accepts partial download responses - - used only by c/s api v1 - """ - - @defer.inlineCallbacks - def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) - - response = yield self.request( - "POST", - url, - data=query_bytes, - headers=Headers( - { - b"Content-Type": [b"application/x-www-form-urlencoded"], - b"User-Agent": [self.user_agent], - } - ), - ) - - try: - body = yield make_deferred_yieldable(readBody(response)) - return body - except PartialDownloadError as e: - # twisted dislikes google's response, no content length. - return e.response - - def encode_urlencode_args(args): return {k: encode_urlencode_arg(v) for k, v in args.items()} diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 71a15f434d..647d26dc56 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -14,21 +14,21 @@ # limitations under the License. import logging +import urllib -import attr -from netaddr import IPAddress +from netaddr import AddrFormatError, IPAddress from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import IStreamClientEndpoint -from twisted.web.client import URI, Agent, HTTPConnectionPool +from twisted.web.client import Agent, HTTPConnectionPool from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent +from twisted.web.iweb import IAgent, IAgentEndpointFactory -from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util import Clock logger = logging.getLogger(__name__) @@ -36,8 +36,9 @@ logger = logging.getLogger(__name__) @implementer(IAgent) class MatrixFederationAgent(object): - """An Agent-like thing which provides a `request` method which will look up a matrix - server and send an HTTP request to it. + """An Agent-like thing which provides a `request` method which correctly + handles resolving matrix server names when using matrix://. Handles standard + https URIs as normal. Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.) @@ -51,9 +52,9 @@ class MatrixFederationAgent(object): SRVResolver impl to use for looking up SRV records. None to use a default implementation. - _well_known_cache (TTLCache|None): - TTLCache impl for storing cached well-known lookups. None to use a default - implementation. + _well_known_resolver (WellKnownResolver|None): + WellKnownResolver to use to perform well-known lookups. None to use a + default implementation. """ def __init__( @@ -61,49 +62,49 @@ class MatrixFederationAgent(object): reactor, tls_client_options_factory, _srv_resolver=None, - _well_known_cache=None, + _well_known_resolver=None, ): self._reactor = reactor self._clock = Clock(reactor) - - self._tls_client_options_factory = tls_client_options_factory - if _srv_resolver is None: - _srv_resolver = SrvResolver() - self._srv_resolver = _srv_resolver - self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - self._well_known_resolver = WellKnownResolver( + self._agent = Agent.usingEndpointFactory( self._reactor, - agent=Agent( - self._reactor, - pool=self._pool, - contextFactory=tls_client_options_factory, + MatrixHostnameEndpointFactory( + reactor, tls_client_options_factory, _srv_resolver ), - well_known_cache=_well_known_cache, + pool=self._pool, ) + if _well_known_resolver is None: + _well_known_resolver = WellKnownResolver( + self._reactor, + agent=Agent( + self._reactor, + pool=self._pool, + contextFactory=tls_client_options_factory, + ), + ) + + self._well_known_resolver = _well_known_resolver + @defer.inlineCallbacks def request(self, method, uri, headers=None, bodyProducer=None): """ Args: method (bytes): HTTP method: GET/POST/etc - uri (bytes): Absolute URI to be retrieved - headers (twisted.web.http_headers.Headers|None): HTTP headers to send with the request, or None to send no extra headers. - bodyProducer (twisted.web.iweb.IBodyProducer|None): An object which can generate bytes to make up the body of this request (for example, the properly encoded contents of a file for a file upload). Or None if the request is to have no body. - Returns: Deferred[twisted.web.iweb.IResponse]: fires when the header of the response has been received (regardless of the @@ -111,210 +112,207 @@ class MatrixFederationAgent(object): response from being received (including problems that prevent the request from being sent). """ - parsed_uri = URI.fromBytes(uri, defaultPort=-1) - res = yield self._route_matrix_uri(parsed_uri) + # We use urlparse as that will set `port` to None if there is no + # explicit port. + parsed_uri = urllib.parse.urlparse(uri) - # set up the TLS connection params + # If this is a matrix:// URI check if the server has delegated matrix + # traffic using well-known delegation. # - # XXX disabling TLS is really only supported here for the benefit of the - # unit tests. We should make the UTs cope with TLS rather than having to make - # the code support the unit tests. - if self._tls_client_options_factory is None: - tls_options = None - else: - tls_options = self._tls_client_options_factory.get_options( - res.tls_server_name.decode("ascii") + # We have to do this here and not in the endpoint as we need to rewrite + # the host header with the delegated server name. + delegated_server = None + if ( + parsed_uri.scheme == b"matrix" + and not _is_ip_literal(parsed_uri.hostname) + and not parsed_uri.port + ): + well_known_result = yield self._well_known_resolver.get_well_known( + parsed_uri.hostname + ) + delegated_server = well_known_result.delegated_server + + if delegated_server: + # Ok, the server has delegated matrix traffic to somewhere else, so + # lets rewrite the URL to replace the server with the delegated + # server name. + uri = urllib.parse.urlunparse( + ( + parsed_uri.scheme, + delegated_server, + parsed_uri.path, + parsed_uri.params, + parsed_uri.query, + parsed_uri.fragment, + ) ) + parsed_uri = urllib.parse.urlparse(uri) - # make sure that the Host header is set correctly + # We need to make sure the host header is set to the netloc of the + # server. if headers is None: headers = Headers() else: headers = headers.copy() if not headers.hasHeader(b"host"): - headers.addRawHeader(b"host", res.host_header) + headers.addRawHeader(b"host", parsed_uri.netloc) - class EndpointFactory(object): - @staticmethod - def endpointForURI(_uri): - ep = LoggingHostnameEndpoint( - self._reactor, res.target_host, res.target_port - ) - if tls_options is not None: - ep = wrapClientTLS(tls_options, ep) - return ep - - agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool) res = yield make_deferred_yieldable( - agent.request(method, uri, headers, bodyProducer) + self._agent.request(method, uri, headers, bodyProducer) ) + return res - @defer.inlineCallbacks - def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): - """Helper for `request`: determine the routing for a Matrix URI - Args: - parsed_uri (twisted.web.client.URI): uri to route. Note that it should be - parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1 - if there is no explicit port given. +@implementer(IAgentEndpointFactory) +class MatrixHostnameEndpointFactory(object): + """Factory for MatrixHostnameEndpoint for parsing to an Agent. + """ - lookup_well_known (bool): True if we should look up the .well-known file if - there is no SRV record. + def __init__(self, reactor, tls_client_options_factory, srv_resolver): + self._reactor = reactor + self._tls_client_options_factory = tls_client_options_factory - Returns: - Deferred[_RoutingResult] - """ - # check for an IP literal - try: - ip_address = IPAddress(parsed_uri.host.decode("ascii")) - except Exception: - # not an IP address - ip_address = None - - if ip_address: - port = parsed_uri.port - if port == -1: - port = 8448 - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=port, - ) + if srv_resolver is None: + srv_resolver = SrvResolver() - if parsed_uri.port != -1: - # there is an explicit port - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=parsed_uri.port, - ) + self._srv_resolver = srv_resolver - if lookup_well_known: - # try a .well-known lookup - well_known_result = yield self._well_known_resolver.get_well_known( - parsed_uri.host - ) - well_known_server = well_known_result.delegated_server - - if well_known_server: - # if we found a .well-known, start again, but don't do another - # .well-known lookup. - - # parse the server name in the .well-known response into host/port. - # (This code is lifted from twisted.web.client.URI.fromBytes). - if b":" in well_known_server: - well_known_host, well_known_port = well_known_server.rsplit(b":", 1) - try: - well_known_port = int(well_known_port) - except ValueError: - # the part after the colon could not be parsed as an int - # - we assume it is an IPv6 literal with no port (the closing - # ']' stops it being parsed as an int) - well_known_host, well_known_port = well_known_server, -1 - else: - well_known_host, well_known_port = well_known_server, -1 - - new_uri = URI( - scheme=parsed_uri.scheme, - netloc=well_known_server, - host=well_known_host, - port=well_known_port, - path=parsed_uri.path, - params=parsed_uri.params, - query=parsed_uri.query, - fragment=parsed_uri.fragment, - ) + def endpointForURI(self, parsed_uri): + return MatrixHostnameEndpoint( + self._reactor, + self._tls_client_options_factory, + self._srv_resolver, + parsed_uri, + ) - res = yield self._route_matrix_uri(new_uri, lookup_well_known=False) - return res - - # try a SRV lookup - service_name = b"_matrix._tcp.%s" % (parsed_uri.host,) - server_list = yield self._srv_resolver.resolve_service(service_name) - - if not server_list: - target_host = parsed_uri.host - port = 8448 - logger.debug( - "No SRV record for %s, using %s:%i", - parsed_uri.host.decode("ascii"), - target_host.decode("ascii"), - port, - ) + +@implementer(IStreamClientEndpoint) +class MatrixHostnameEndpoint(object): + """An endpoint that resolves matrix:// URLs using Matrix server name + resolution (i.e. via SRV). Does not check for well-known delegation. + + Args: + reactor (IReactor) + tls_client_options_factory (ClientTLSOptionsFactory|None): + factory to use for fetching client tls options, or none to disable TLS. + srv_resolver (SrvResolver): The SRV resolver to use + parsed_uri (twisted.web.client.URI): The parsed URI that we're wanting + to connect to. + """ + + def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri): + self._reactor = reactor + + self._parsed_uri = parsed_uri + + # set up the TLS connection params + # + # XXX disabling TLS is really only supported here for the benefit of the + # unit tests. We should make the UTs cope with TLS rather than having to make + # the code support the unit tests. + + if tls_client_options_factory is None: + self._tls_options = None else: - target_host, port = pick_server_from_list(server_list) - logger.debug( - "Picked %s:%i from SRV records for %s", - target_host.decode("ascii"), - port, - parsed_uri.host.decode("ascii"), + self._tls_options = tls_client_options_factory.get_options( + self._parsed_uri.host ) - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=target_host, - target_port=port, - ) + self._srv_resolver = srv_resolver + def connect(self, protocol_factory): + """Implements IStreamClientEndpoint interface + """ -@implementer(IStreamClientEndpoint) -class LoggingHostnameEndpoint(object): - """A wrapper for HostnameEndpint which logs when it connects""" + return run_in_background(self._do_connect, protocol_factory) - def __init__(self, reactor, host, port, *args, **kwargs): - self.host = host - self.port = port - self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs) + @defer.inlineCallbacks + def _do_connect(self, protocol_factory): + first_exception = None + + server_list = yield self._resolve_server() + + for server in server_list: + host = server.host + port = server.port + + try: + logger.info("Connecting to %s:%i", host.decode("ascii"), port) + endpoint = HostnameEndpoint(self._reactor, host, port) + if self._tls_options: + endpoint = wrapClientTLS(self._tls_options, endpoint) + result = yield make_deferred_yieldable( + endpoint.connect(protocol_factory) + ) - def connect(self, protocol_factory): - logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port) - return self.ep.connect(protocol_factory) + return result + except Exception as e: + logger.info( + "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e + ) + if not first_exception: + first_exception = e + # We return the first failure because that's probably the most interesting. + if first_exception: + raise first_exception -@attr.s -class _RoutingResult(object): - """The result returned by `_route_matrix_uri`. + # This shouldn't happen as we should always have at least one host/port + # to try and if that doesn't work then we'll have an exception. + raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,)) - Contains the parameters needed to direct a federation connection to a particular - server. + @defer.inlineCallbacks + def _resolve_server(self): + """Resolves the server name to a list of hosts and ports to attempt to + connect to. - Where a SRV record points to several servers, this object contains a single server - chosen from the list. - """ + Returns: + Deferred[list[Server]] + """ - host_header = attr.ib() - """ - The value we should assign to the Host header (host:port from the matrix - URI, or .well-known). + if self._parsed_uri.scheme != b"matrix": + return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)] - :type: bytes - """ + # Note: We don't do well-known lookup as that needs to have happened + # before now, due to needing to rewrite the Host header of the HTTP + # request. - tls_server_name = attr.ib() - """ - The server name we should set in the SNI (typically host, without port, from the - matrix URI or .well-known) + # We reparse the URI so that defaultPort is -1 rather than 80 + parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes()) - :type: bytes - """ + host = parsed_uri.hostname + port = parsed_uri.port - target_host = attr.ib() - """ - The hostname (or IP literal) we should route the TCP connection to (the target of the - SRV record, or the hostname from the URL/.well-known) + # If there is an explicit port or the host is an IP address we bypass + # SRV lookups and just use the given host/port. + if port or _is_ip_literal(host): + return [Server(host, port or 8448)] - :type: bytes - """ + server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host) + + if server_list: + return server_list + + # No SRV records, so we fallback to host and 8448 + return [Server(host, 8448)] - target_port = attr.ib() - """ - The port we should route the TCP connection to (the target of the SRV record, or - the port from the URL/.well-known, or 8448) - :type: int +def _is_ip_literal(host): + """Test if the given host name is either an IPv4 or IPv6 literal. + + Args: + host (bytes) + + Returns: + bool """ + + host = host.decode("ascii") + + try: + IPAddress(host) + return True + except AddrFormatError: + return False diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index b32188766d..3fe4ffb9e5 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) SERVER_CACHE = {} -@attr.s +@attr.s(slots=True, frozen=True) class Server(object): """ Our record of an individual server which can be tried to reach a destination. @@ -53,34 +53,47 @@ class Server(object): expires = attr.ib(default=0) -def pick_server_from_list(server_list): - """Randomly choose a server from the server list +def _sort_server_list(server_list): + """Given a list of SRV records sort them into priority order and shuffle + each priority with the given weight. + """ + priority_map = {} - Args: - server_list (list[Server]): list of candidate servers + for server in server_list: + priority_map.setdefault(server.priority, []).append(server) - Returns: - Tuple[bytes, int]: (host, port) pair for the chosen server - """ - if not server_list: - raise RuntimeError("pick_server_from_list called with empty list") + results = [] + for priority in sorted(priority_map): + servers = priority_map[priority] + + # This algorithms roughly follows the algorithm described in RFC2782, + # changed to remove an off-by-one error. + # + # N.B. Weights can be zero, which means that they should be picked + # rarely. + + total_weight = sum(s.weight for s in servers) + + # Total weight can become zero if there are only zero weight servers + # left, which we handle by just shuffling and appending to the results. + while servers and total_weight: + target_weight = random.randint(1, total_weight) - # TODO: currently we only use the lowest-priority servers. We should maintain a - # cache of servers known to be "down" and filter them out + for s in servers: + target_weight -= s.weight - min_priority = min(s.priority for s in server_list) - eligible_servers = list(s for s in server_list if s.priority == min_priority) - total_weight = sum(s.weight for s in eligible_servers) - target_weight = random.randint(0, total_weight) + if target_weight <= 0: + break - for s in eligible_servers: - target_weight -= s.weight + results.append(s) + servers.remove(s) + total_weight -= s.weight - if target_weight <= 0: - return s.host, s.port + if servers: + random.shuffle(servers) + results.extend(servers) - # this should be impossible. - raise RuntimeError("pick_server_from_list got to end of eligible server list.") + return results class SrvResolver(object): @@ -120,7 +133,7 @@ class SrvResolver(object): if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) - return servers + return _sort_server_list(servers) try: answers, _, _ = yield make_deferred_yieldable( @@ -169,4 +182,4 @@ class SrvResolver(object): ) self._cache[service_name] = list(servers) - return servers + return _sort_server_list(servers) diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index d2866ff67d..7ddfad286d 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -32,22 +32,40 @@ from synapse.util.metrics import Measure # period to cache .well-known results for by default WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 -# jitter to add to the .well-known default cache ttl -WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 +# jitter factor to add to the .well-known default cache ttls +WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1 # period to cache failure to fetch .well-known for WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 +# period to cache failure to fetch .well-known if there has recently been a +# valid well-known for that domain. +WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60 + +# period to remember there was a valid well-known after valid record expires +WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600 + # cap for .well-known cache period WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 # lower bound for .well-known cache period WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 +# Attempt to refetch a cached well-known N% of the TTL before it expires. +# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then +# we'll start trying to refetch 1 minute before it expires. +WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2 + +# Number of times we retry fetching a well-known for a domain we know recently +# had a valid entry. +WELL_KNOWN_RETRY_ATTEMPTS = 3 + + logger = logging.getLogger(__name__) _well_known_cache = TTLCache("well-known") +_had_valid_well_known_cache = TTLCache("had-valid-well-known") @attr.s(slots=True, frozen=True) @@ -59,14 +77,20 @@ class WellKnownResolver(object): """Handles well-known lookups for matrix servers. """ - def __init__(self, reactor, agent, well_known_cache=None): + def __init__( + self, reactor, agent, well_known_cache=None, had_well_known_cache=None + ): self._reactor = reactor self._clock = Clock(reactor) if well_known_cache is None: well_known_cache = _well_known_cache + if had_well_known_cache is None: + had_well_known_cache = _had_valid_well_known_cache + self._well_known_cache = well_known_cache + self._had_valid_well_known_cache = had_well_known_cache self._well_known_agent = RedirectAgent(agent) @defer.inlineCallbacks @@ -80,59 +104,86 @@ class WellKnownResolver(object): Deferred[WellKnownLookupResult]: The result of the lookup """ try: - result = self._well_known_cache[server_name] + prev_result, expiry, ttl = self._well_known_cache.get_with_expiry( + server_name + ) + + now = self._clock.time() + if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl: + return WellKnownLookupResult(delegated_server=prev_result) except KeyError: - # TODO: should we linearise so that we don't end up doing two .well-known - # requests for the same server in parallel? + prev_result = None + + # TODO: should we linearise so that we don't end up doing two .well-known + # requests for the same server in parallel? + try: with Measure(self._clock, "get_well_known"): - result, cache_period = yield self._do_get_well_known(server_name) + result, cache_period = yield self._fetch_well_known(server_name) + + except _FetchWellKnownFailure as e: + if prev_result and e.temporary: + # This is a temporary failure and we have a still valid cached + # result, so lets return that. Hopefully the next time we ask + # the remote will be back up again. + return WellKnownLookupResult(delegated_server=prev_result) + + result = None + + if self._had_valid_well_known_cache.get(server_name, False): + # We have recently seen a valid well-known record for this + # server, so we cache the lack of well-known for a shorter time. + cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD + else: + cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD + + # add some randomness to the TTL to avoid a stampeding herd + cache_period *= random.uniform( + 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + ) - if cache_period > 0: - self._well_known_cache.set(server_name, result, cache_period) + if cache_period > 0: + self._well_known_cache.set(server_name, result, cache_period) return WellKnownLookupResult(delegated_server=result) @defer.inlineCallbacks - def _do_get_well_known(self, server_name): + def _fetch_well_known(self, server_name): """Actually fetch and parse a .well-known, without checking the cache Args: server_name (bytes): name of the server, from the requested url + Raises: + _FetchWellKnownFailure if we fail to lookup a result + Returns: - Deferred[Tuple[bytes|None|object],int]: - result, cache period, where result is one of: - - the new server name from the .well-known (as a `bytes`) - - None if there was no .well-known file. - - INVALID_WELL_KNOWN if the .well-known was invalid + Deferred[Tuple[bytes,int]]: The lookup result and cache period. """ - uri = b"https://%s/.well-known/matrix/server" % (server_name,) - uri_str = uri.decode("ascii") - logger.info("Fetching %s", uri_str) + + had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False) + + # We do this in two steps to differentiate between possibly transient + # errors (e.g. can't connect to host, 503 response) and more permenant + # errors (such as getting a 404 response). + response, body = yield self._make_well_known_request( + server_name, retry=had_valid_well_known + ) + try: - response = yield make_deferred_yieldable( - self._well_known_agent.request(b"GET", uri) - ) - body = yield make_deferred_yieldable(readBody(response)) if response.code != 200: raise Exception("Non-200 response %s" % (response.code,)) parsed_body = json.loads(body.decode("utf-8")) logger.info("Response from .well-known: %s", parsed_body) - if not isinstance(parsed_body, dict): - raise Exception("not a dict") - if "m.server" not in parsed_body: - raise Exception("Missing key 'm.server'") - except Exception as e: - logger.info("Error fetching %s: %s", uri_str, e) - - # add some randomness to the TTL to avoid a stampeding herd every hour - # after startup - cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - return (None, cache_period) - result = parsed_body["m.server"].encode("ascii") + result = parsed_body["m.server"].encode("ascii") + except defer.CancelledError: + # Bail if we've been cancelled + raise + except Exception as e: + logger.info("Error parsing well-known for %s: %s", server_name, e) + raise _FetchWellKnownFailure(temporary=False) cache_period = _cache_period_from_headers( response.headers, time_now=self._reactor.seconds @@ -141,12 +192,68 @@ class WellKnownResolver(object): cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD # add some randomness to the TTL to avoid a stampeding herd every 24 hours # after startup - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + cache_period *= random.uniform( + 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + ) else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) - return (result, cache_period) + # We got a success, mark as such in the cache + self._had_valid_well_known_cache.set( + server_name, + bool(result), + cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID, + ) + + return result, cache_period + + @defer.inlineCallbacks + def _make_well_known_request(self, server_name, retry): + """Make the well known request. + + This will retry the request if requested and it fails (with unable + to connect or receives a 5xx error). + + Args: + server_name (bytes) + retry (bool): Whether to retry the request if it fails. + + Returns: + Deferred[tuple[IResponse, bytes]] Returns the response object and + body. Response may be a non-200 response. + """ + uri = b"https://%s/.well-known/matrix/server" % (server_name,) + uri_str = uri.decode("ascii") + + i = 0 + while True: + i += 1 + + logger.info("Fetching %s", uri_str) + try: + response = yield make_deferred_yieldable( + self._well_known_agent.request(b"GET", uri) + ) + body = yield make_deferred_yieldable(readBody(response)) + + if 500 <= response.code < 600: + raise Exception("Non-200 response %s" % (response.code,)) + + return response, body + except defer.CancelledError: + # Bail if we've been cancelled + raise + except Exception as e: + if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS: + logger.info("Error fetching %s: %s", uri_str, e) + raise _FetchWellKnownFailure(temporary=True) + + logger.info("Error fetching %s: %s. Retrying", uri_str, e) + + # Sleep briefly in the hopes that they come back up + yield self._clock.sleep(0.5) def _cache_period_from_headers(headers, time_now=time.time): @@ -185,3 +292,10 @@ def _parse_cache_control(headers): v = splits[1] if len(splits) > 1 else None cache_controls[k] = v return cache_controls + + +@attr.s() +class _FetchWellKnownFailure(Exception): + # True if we didn't get a non-5xx HTTP response, i.e. this may or may not be + # a temporary failure. + temporary = attr.ib() diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index d07d356464..3f7c93ffcb 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone from twisted.web.http_headers import Headers -import synapse.logging.opentracing as opentracing import synapse.metrics import synapse.util.retryutils from synapse.api.errors import ( @@ -50,6 +49,12 @@ from synapse.http import QuieterFileBodyProducer from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.logging.context import make_deferred_yieldable +from synapse.logging.opentracing import ( + inject_active_span_byte_dict, + set_tag, + start_active_span, + tags, +) from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -340,21 +345,20 @@ class MatrixFederationHttpClient(object): else: query_bytes = b"" - # Retreive current span - scope = opentracing.start_active_span( + scope = start_active_span( "outgoing-federation-request", tags={ - opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT, - opentracing.tags.PEER_ADDRESS: request.destination, - opentracing.tags.HTTP_METHOD: request.method, - opentracing.tags.HTTP_URL: request.path, + tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, + tags.PEER_ADDRESS: request.destination, + tags.HTTP_METHOD: request.method, + tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict = {} - opentracing.inject_active_span_byte_dict(headers_dict, request.destination) + inject_active_span_byte_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] @@ -436,9 +440,7 @@ class MatrixFederationHttpClient(object): response.phrase.decode("ascii", errors="replace"), ) - opentracing.set_tag( - opentracing.tags.HTTP_STATUS_CODE, response.code - ) + set_tag(tags.HTTP_STATUS_CODE, response.code) if 200 <= response.code < 300: pass diff --git a/synapse/http/server.py b/synapse/http/server.py index e6f351ba3b..cb9158fe1b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -40,6 +40,7 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.logging.context import preserve_fn +from synapse.logging.opentracing import trace_servlet from synapse.util.caches import intern_dict logger = logging.getLogger(__name__) @@ -257,7 +258,9 @@ class JsonResource(HttpServer, resource.Resource): self.path_regexs = {} self.hs = hs - def register_paths(self, method, path_patterns, callback, servlet_classname): + def register_paths( + self, method, path_patterns, callback, servlet_classname, trace=True + ): """ Registers a request handler against a regular expression. Later request URLs are checked against these regular expressions in order to identify an appropriate @@ -273,8 +276,16 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname (str): The name of the handler to be used in prometheus and opentracing logs. + + trace (bool): Whether we should start a span to trace the servlet. """ method = method.encode("utf-8") # method is bytes on py3 + + if trace: + # We don't extract the context from the servlet because we can't + # trust the sender + callback = trace_servlet(servlet_classname)(callback) + for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) self.path_regexs.setdefault(method, []).append( diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fd07bf7b8e..274c1a6a87 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -20,7 +20,6 @@ import logging from canonicaljson import json from synapse.api.errors import Codes, SynapseError -from synapse.logging.opentracing import trace_servlet logger = logging.getLogger(__name__) @@ -298,10 +297,7 @@ class RestServlet(object): servlet_classname = self.__class__.__name__ method_handler = getattr(self, "on_%s" % (method,)) http_server.register_paths( - method, - patterns, - trace_servlet(servlet_classname, method_handler), - servlet_classname, + method, patterns, method_handler, servlet_classname ) else: diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py new file mode 100644 index 0000000000..3220e985a9 --- /dev/null +++ b/synapse/logging/_structured.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os.path +import sys +import typing +import warnings +from typing import List + +import attr +from constantly import NamedConstant, Names, ValueConstant, Values +from zope.interface import implementer + +from twisted.logger import ( + FileLogObserver, + FilteringLogObserver, + ILogObserver, + LogBeginner, + Logger, + LogLevel, + LogLevelFilterPredicate, + LogPublisher, + eventAsText, + jsonFileLogObserver, +) + +from synapse.config._base import ConfigError +from synapse.logging._terse_json import ( + TerseJSONToConsoleLogObserver, + TerseJSONToTCPLogObserver, +) +from synapse.logging.context import LoggingContext + + +def stdlib_log_level_to_twisted(level: str) -> LogLevel: + """ + Convert a stdlib log level to Twisted's log level. + """ + lvl = level.lower().replace("warning", "warn") + return LogLevel.levelWithName(lvl) + + +@attr.s +@implementer(ILogObserver) +class LogContextObserver(object): + """ + An ILogObserver which adds Synapse-specific log context information. + + Attributes: + observer (ILogObserver): The target parent observer. + """ + + observer = attr.ib() + + def __call__(self, event: dict) -> None: + """ + Consume a log event and emit it to the parent observer after filtering + and adding log context information. + + Args: + event (dict) + """ + # Filter out some useless events that Twisted outputs + if "log_text" in event: + if event["log_text"].startswith("DNSDatagramProtocol starting on "): + return + + if event["log_text"].startswith("(UDP Port "): + return + + if event["log_text"].startswith("Timing out client") or event[ + "log_format" + ].startswith("Timing out client"): + return + + context = LoggingContext.current_context() + + # Copy the context information to the log event. + if context is not None: + context.copy_to_twisted_log_entry(event) + else: + # If there's no logging context, not even the root one, we might be + # starting up or it might be from non-Synapse code. Log it as if it + # came from the root logger. + event["request"] = None + event["scope"] = None + + self.observer(event) + + +class PythonStdlibToTwistedLogger(logging.Handler): + """ + Transform a Python stdlib log message into a Twisted one. + """ + + def __init__(self, observer, *args, **kwargs): + """ + Args: + observer (ILogObserver): A Twisted logging observer. + *args, **kwargs: Args/kwargs to be passed to logging.Handler. + """ + self.observer = observer + super().__init__(*args, **kwargs) + + def emit(self, record: logging.LogRecord) -> None: + """ + Emit a record to Twisted's observer. + + Args: + record (logging.LogRecord) + """ + + self.observer( + { + "log_time": record.created, + "log_text": record.getMessage(), + "log_format": "{log_text}", + "log_namespace": record.name, + "log_level": stdlib_log_level_to_twisted(record.levelname), + } + ) + + +def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver: + """ + A log observer that formats events like the traditional log formatter and + sends them to `outFile`. + + Args: + outFile (file object): The file object to write to. + """ + + def formatEvent(_event: dict) -> str: + event = dict(_event) + event["log_level"] = event["log_level"].name.upper() + event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + ( + event.get("log_format", "{log_text}") or "{log_text}" + ) + return eventAsText(event, includeSystem=False) + "\n" + + return FileLogObserver(outFile, formatEvent) + + +class DrainType(Names): + CONSOLE = NamedConstant() + CONSOLE_JSON = NamedConstant() + CONSOLE_JSON_TERSE = NamedConstant() + FILE = NamedConstant() + FILE_JSON = NamedConstant() + NETWORK_JSON_TERSE = NamedConstant() + + +class OutputPipeType(Values): + stdout = ValueConstant(sys.__stdout__) + stderr = ValueConstant(sys.__stderr__) + + +@attr.s +class DrainConfiguration(object): + name = attr.ib() + type = attr.ib() + location = attr.ib() + options = attr.ib(default=None) + + +@attr.s +class NetworkJSONTerseOptions(object): + maximum_buffer = attr.ib(type=int) + + +DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}} + + +def parse_drain_configs( + drains: dict +) -> typing.Generator[DrainConfiguration, None, None]: + """ + Parse the drain configurations. + + Args: + drains (dict): A list of drain configurations. + + Yields: + DrainConfiguration instances. + + Raises: + ConfigError: If any of the drain configuration items are invalid. + """ + for name, config in drains.items(): + if "type" not in config: + raise ConfigError("Logging drains require a 'type' key.") + + try: + logging_type = DrainType.lookupByName(config["type"].upper()) + except ValueError: + raise ConfigError( + "%s is not a known logging drain type." % (config["type"],) + ) + + if logging_type in [ + DrainType.CONSOLE, + DrainType.CONSOLE_JSON, + DrainType.CONSOLE_JSON_TERSE, + ]: + location = config.get("location") + if location is None or location not in ["stdout", "stderr"]: + raise ConfigError( + ( + "The %s drain needs the 'location' key set to " + "either 'stdout' or 'stderr'." + ) + % (logging_type,) + ) + + pipe = OutputPipeType.lookupByName(location).value + + yield DrainConfiguration(name=name, type=logging_type, location=pipe) + + elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]: + if "location" not in config: + raise ConfigError( + "The %s drain needs the 'location' key set." % (logging_type,) + ) + + location = config.get("location") + if os.path.abspath(location) != location: + raise ConfigError( + "File paths need to be absolute, '%s' is a relative path" + % (location,) + ) + yield DrainConfiguration(name=name, type=logging_type, location=location) + + elif logging_type in [DrainType.NETWORK_JSON_TERSE]: + host = config.get("host") + port = config.get("port") + maximum_buffer = config.get("maximum_buffer", 1000) + yield DrainConfiguration( + name=name, + type=logging_type, + location=(host, port), + options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer), + ) + + else: + raise ConfigError( + "The %s drain type is currently not implemented." + % (config["type"].upper(),) + ) + + +def setup_structured_logging( + hs, + config, + log_config: dict, + logBeginner: LogBeginner, + redirect_stdlib_logging: bool = True, +) -> LogPublisher: + """ + Set up Twisted's structured logging system. + + Args: + hs: The homeserver to use. + config (HomeserverConfig): The configuration of the Synapse homeserver. + log_config (dict): The log configuration to use. + """ + if config.no_redirect_stdio: + raise ConfigError( + "no_redirect_stdio cannot be defined using structured logging." + ) + + logger = Logger() + + if "drains" not in log_config: + raise ConfigError("The logging configuration requires a list of drains.") + + observers = [] # type: List[ILogObserver] + + for observer in parse_drain_configs(log_config["drains"]): + # Pipe drains + if observer.type == DrainType.CONSOLE: + logger.debug( + "Starting up the {name} console logger drain", name=observer.name + ) + observers.append(SynapseFileLogObserver(observer.location)) + elif observer.type == DrainType.CONSOLE_JSON: + logger.debug( + "Starting up the {name} JSON console logger drain", name=observer.name + ) + observers.append(jsonFileLogObserver(observer.location)) + elif observer.type == DrainType.CONSOLE_JSON_TERSE: + logger.debug( + "Starting up the {name} terse JSON console logger drain", + name=observer.name, + ) + observers.append( + TerseJSONToConsoleLogObserver(observer.location, metadata={}) + ) + + # File drains + elif observer.type == DrainType.FILE: + logger.debug("Starting up the {name} file logger drain", name=observer.name) + log_file = open(observer.location, "at", buffering=1, encoding="utf8") + observers.append(SynapseFileLogObserver(log_file)) + elif observer.type == DrainType.FILE_JSON: + logger.debug( + "Starting up the {name} JSON file logger drain", name=observer.name + ) + log_file = open(observer.location, "at", buffering=1, encoding="utf8") + observers.append(jsonFileLogObserver(log_file)) + + elif observer.type == DrainType.NETWORK_JSON_TERSE: + metadata = {"server_name": hs.config.server_name} + log_observer = TerseJSONToTCPLogObserver( + hs=hs, + host=observer.location[0], + port=observer.location[1], + metadata=metadata, + maximum_buffer=observer.options.maximum_buffer, + ) + log_observer.start() + observers.append(log_observer) + else: + # We should never get here, but, just in case, throw an error. + raise ConfigError("%s drain type cannot be configured" % (observer.type,)) + + publisher = LogPublisher(*observers) + log_filter = LogLevelFilterPredicate() + + for namespace, namespace_config in log_config.get( + "loggers", DEFAULT_LOGGERS + ).items(): + # Set the log level for twisted.logger.Logger namespaces + log_filter.setLogLevelForNamespace( + namespace, + stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")), + ) + + # Also set the log levels for the stdlib logger namespaces, to prevent + # them getting to PythonStdlibToTwistedLogger and having to be formatted + if "level" in namespace_config: + logging.getLogger(namespace).setLevel(namespace_config.get("level")) + + f = FilteringLogObserver(publisher, [log_filter]) + lco = LogContextObserver(f) + + if redirect_stdlib_logging: + stuff_into_twisted = PythonStdlibToTwistedLogger(lco) + stdliblogger = logging.getLogger() + stdliblogger.addHandler(stuff_into_twisted) + + # Always redirect standard I/O, otherwise other logging outputs might miss + # it. + logBeginner.beginLoggingTo([lco], redirectStandardIO=True) + + return publisher + + +def reload_structured_logging(*args, log_config=None) -> None: + warnings.warn( + "Currently the structured logging system can not be reloaded, doing nothing" + ) diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py new file mode 100644 index 0000000000..0ebbde06f2 --- /dev/null +++ b/synapse/logging/_terse_json.py @@ -0,0 +1,280 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Log formatters that output terse JSON. +""" + +import sys +from collections import deque +from ipaddress import IPv4Address, IPv6Address, ip_address +from math import floor +from typing import IO + +import attr +from simplejson import dumps +from zope.interface import implementer + +from twisted.application.internet import ClientService +from twisted.internet.endpoints import ( + HostnameEndpoint, + TCP4ClientEndpoint, + TCP6ClientEndpoint, +) +from twisted.internet.protocol import Factory, Protocol +from twisted.logger import FileLogObserver, ILogObserver, Logger +from twisted.python.failure import Failure + + +def flatten_event(event: dict, metadata: dict, include_time: bool = False): + """ + Flatten a Twisted logging event to an dictionary capable of being sent + as a log event to a logging aggregation system. + + The format is vastly simplified and is not designed to be a "human readable + string" in the sense that traditional logs are. Instead, the structure is + optimised for searchability and filtering, with human-understandable log + keys. + + Args: + event (dict): The Twisted logging event we are flattening. + metadata (dict): Additional data to include with each log message. This + can be information like the server name. Since the target log + consumer does not know who we are other than by host IP, this + allows us to forward through static information. + include_time (bool): Should we include the `time` key? If False, the + event time is stripped from the event. + """ + new_event = {} + + # If it's a failure, make the new event's log_failure be the traceback text. + if "log_failure" in event: + new_event["log_failure"] = event["log_failure"].getTraceback() + + # If it's a warning, copy over a string representation of the warning. + if "warning" in event: + new_event["warning"] = str(event["warning"]) + + # Stdlib logging events have "log_text" as their human-readable portion, + # Twisted ones have "log_format". For now, include the log_format, so that + # context only given in the log format (e.g. what is being logged) is + # available. + if "log_text" in event: + new_event["log"] = event["log_text"] + else: + new_event["log"] = event["log_format"] + + # We want to include the timestamp when forwarding over the network, but + # exclude it when we are writing to stdout. This is because the log ingester + # (e.g. logstash, fluentd) can add its own timestamp. + if include_time: + new_event["time"] = round(event["log_time"], 2) + + # Convert the log level to a textual representation. + new_event["level"] = event["log_level"].name.upper() + + # Ignore these keys, and do not transfer them over to the new log object. + # They are either useless (isError), transferred manually above (log_time, + # log_level, etc), or contain Python objects which are not useful for output + # (log_logger, log_source). + keys_to_delete = [ + "isError", + "log_failure", + "log_format", + "log_level", + "log_logger", + "log_source", + "log_system", + "log_time", + "log_text", + "observer", + "warning", + ] + + # If it's from the Twisted legacy logger (twisted.python.log), it adds some + # more keys we want to purge. + if event.get("log_namespace") == "log_legacy": + keys_to_delete.extend(["message", "system", "time"]) + + # Rather than modify the dictionary in place, construct a new one with only + # the content we want. The original event should be considered 'frozen'. + for key in event.keys(): + + if key in keys_to_delete: + continue + + if isinstance(event[key], (str, int, bool, float)) or event[key] is None: + # If it's a plain type, include it as is. + new_event[key] = event[key] + else: + # If it's not one of those basic types, write out a string + # representation. This should probably be a warning in development, + # so that we are sure we are only outputting useful data. + new_event[key] = str(event[key]) + + # Add the metadata information to the event (e.g. the server_name). + new_event.update(metadata) + + return new_event + + +def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver: + """ + A log observer that formats events to a flattened JSON representation. + + Args: + outFile: The file object to write to. + metadata: Metadata to be added to each log object. + """ + + def formatEvent(_event: dict) -> str: + flattened = flatten_event(_event, metadata) + return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n" + + return FileLogObserver(outFile, formatEvent) + + +@attr.s +@implementer(ILogObserver) +class TerseJSONToTCPLogObserver(object): + """ + An IObserver that writes JSON logs to a TCP target. + + Args: + hs (HomeServer): The Homeserver that is being logged for. + host: The host of the logging target. + port: The logging target's port. + metadata: Metadata to be added to each log entry. + """ + + hs = attr.ib() + host = attr.ib(type=str) + port = attr.ib(type=int) + metadata = attr.ib(type=dict) + maximum_buffer = attr.ib(type=int) + _buffer = attr.ib(default=attr.Factory(deque), type=deque) + _writer = attr.ib(default=None) + _logger = attr.ib(default=attr.Factory(Logger)) + + def start(self) -> None: + + # Connect without DNS lookups if it's a direct IP. + try: + ip = ip_address(self.host) + if isinstance(ip, IPv4Address): + endpoint = TCP4ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + elif isinstance(ip, IPv6Address): + endpoint = TCP6ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + except ValueError: + endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + + factory = Factory.forProtocol(Protocol) + self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service.startService() + + def _write_loop(self) -> None: + """ + Implement the write loop. + """ + if self._writer: + return + + self._writer = self._service.whenConnected() + + @self._writer.addBoth + def writer(r): + if isinstance(r, Failure): + r.printTraceback(file=sys.__stderr__) + self._writer = None + self.hs.get_reactor().callLater(1, self._write_loop) + return + + try: + for event in self._buffer: + r.transport.write( + dumps(event, ensure_ascii=False, separators=(",", ":")).encode( + "utf8" + ) + ) + r.transport.write(b"\n") + self._buffer.clear() + except Exception as e: + sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),)) + + self._writer = False + self.hs.get_reactor().callLater(1, self._write_loop) + + def _handle_pressure(self) -> None: + """ + Handle backpressure by shedding events. + + The buffer will, in this order, until the buffer is below the maximum: + - Shed DEBUG events + - Shed INFO events + - Shed the middle 50% of the events. + """ + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out DEBUGs + self._buffer = deque( + filter(lambda event: event["level"] != "DEBUG", self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out INFOs + self._buffer = deque( + filter(lambda event: event["level"] != "INFO", self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Cut the middle entries out + buffer_split = floor(self.maximum_buffer / 2) + + old_buffer = self._buffer + self._buffer = deque() + + for i in range(buffer_split): + self._buffer.append(old_buffer.popleft()) + + end_buffer = [] + for i in range(buffer_split): + end_buffer.append(old_buffer.pop()) + + self._buffer.extend(reversed(end_buffer)) + + def __call__(self, event: dict) -> None: + flattened = flatten_event(event, self.metadata, include_time=True) + self._buffer.append(flattened) + + # Handle backpressure, if it exists. + try: + self._handle_pressure() + except Exception: + # If handling backpressure fails,clear the buffer and log the + # exception. + self._buffer.clear() + self._logger.failure("Failed clearing backpressure") + + # Try and write immediately. + self._write_loop() diff --git a/synapse/logging/context.py b/synapse/logging/context.py index b456c31f70..63379bfb93 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -25,6 +25,7 @@ See doc/log_contexts.rst for details on how this works. import logging import threading import types +from typing import Any, List from twisted.internet import defer, threads @@ -194,7 +195,7 @@ class LoggingContext(object): class Sentinel(object): """Sentinel to represent the root context""" - __slots__ = [] + __slots__ = [] # type: List[Any] def __str__(self): return "sentinel" @@ -202,6 +203,10 @@ class LoggingContext(object): def copy_to(self, record): pass + def copy_to_twisted_log_entry(self, record): + record["request"] = None + record["scope"] = None + def start(self): pass @@ -330,6 +335,13 @@ class LoggingContext(object): # we also track the current scope: record.scope = self.scope + def copy_to_twisted_log_entry(self, record): + """ + Copy logging fields from this context to a Twisted log record. + """ + record["request"] = self.request + record["scope"] = self.scope + def start(self): if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index d2c209c471..308a27213b 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -43,6 +43,9 @@ OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as an optional dependency. This does however limit the number of modifiable spans at any point in the code to one. From here out references to `opentracing` in the code snippets refer to the Synapses module. +Most methods provided in the module have a direct correlation to those provided +by opentracing. Refer to docs there for a more in-depth documentation on some of +the args and methods. Tracing ------- @@ -68,52 +71,62 @@ set a tag on the current active span. Tracing functions ----------------- -Functions can be easily traced using decorators. There is a decorator for -'normal' function and for functions which are actually deferreds. The name of +Functions can be easily traced using decorators. The name of the function becomes the operation name for the span. .. code-block:: python - from synapse.logging.opentracing import trace, trace_deferred + from synapse.logging.opentracing import trace - # Start a span using 'normal_function' as the operation name + # Start a span using 'interesting_function' as the operation name @trace - def normal_function(*args, **kwargs): + def interesting_function(*args, **kwargs): # Does all kinds of cool and expected things return something_usual_and_useful - # Start a span using 'deferred_function' as the operation name - @trace_deferred - @defer.inlineCallbacks - def deferred_function(*args, **kwargs): - # We start - yield we_wait - # we finish - return something_usual_and_useful -Operation names can be explicitly set for functions by using -``trace_using_operation_name`` and -``trace_deferred_using_operation_name`` +Operation names can be explicitly set for a function by passing the +operation name to ``trace`` .. code-block:: python - from synapse.logging.opentracing import ( - trace_using_operation_name, - trace_deferred_using_operation_name - ) + from synapse.logging.opentracing import trace - @trace_using_operation_name("A *much* better operation name") - def normal_function(*args, **kwargs): + @trace(opname="a_better_operation_name") + def interesting_badly_named_function(*args, **kwargs): # Does all kinds of cool and expected things return something_usual_and_useful - @trace_deferred_using_operation_name("Another exciting operation name!") - @defer.inlineCallbacks - def deferred_function(*args, **kwargs): - # We start - yield we_wait - # we finish - return something_usual_and_useful +Setting Tags +------------ + +To set a tag on the active span do + +.. code-block:: python + + from synapse.logging.opentracing import set_tag + + set_tag(tag_name, tag_value) + +There's a convenient decorator to tag all the args of the method. It uses +inspection in order to use the formal parameter names prefixed with 'ARG_' as +tag names. It uses kwarg names as tag names without the prefix. + +.. code-block:: python + + from synapse.logging.opentracing import tag_args + + @tag_args + def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): + pass + + set_fates("the story", "the end", "the act") + # This will have the following tags + # - ARG_clotho: "the story" + # - ARG_lachesis: "the end" + # - ARG_atropos: "the act" + # - father: "Zues" + # - mother: "Themis" Contexts and carriers --------------------- @@ -136,6 +149,9 @@ unchartered waters will require the enforcement of the whitelist. ``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes in a destination and compares it to the whitelist. +Most injection methods take a 'destination' arg. The context will only be injected +if the destination matches the whitelist or the destination is None. + ======= Gotchas ======= @@ -161,16 +177,54 @@ from twisted.internet import defer from synapse.config import ConfigError +# Helper class + + +class _DummyTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + try: import opentracing + + tags = opentracing.tags except ImportError: opentracing = None + tags = _DummyTagNames try: from jaeger_client import Config as JaegerConfig from synapse.logging.scopecontextmanager import LogContextScopeManager except ImportError: - JaegerConfig = None - LogContextScopeManager = None + JaegerConfig = None # type: ignore + LogContextScopeManager = None # type: ignore logger = logging.getLogger(__name__) @@ -185,8 +239,7 @@ _homeserver_whitelist = None def only_if_tracing(func): - """Executes the function only if we're tracing. Otherwise return. - Assumes the function wrapped may return None""" + """Executes the function only if we're tracing. Otherwise returns None.""" @wraps(func) def _only_if_tracing_inner(*args, **kwargs): @@ -198,6 +251,41 @@ def only_if_tracing(func): return _only_if_tracing_inner +def ensure_active_span(message, ret=None): + """Executes the operation only if opentracing is enabled and there is an active span. + If there is no active span it logs message at the error level. + + Args: + message (str): Message which fills in "There was no active span when trying to %s" + in the error log if there is no active span and opentracing is enabled. + ret (object): return value if opentracing is None or there is no active span. + + Returns (object): The result of the func or ret if opentracing is disabled or there + was no active span. + """ + + def ensure_active_span_inner_1(func): + @wraps(func) + def ensure_active_span_inner_2(*args, **kwargs): + if not opentracing: + return ret + + if not opentracing.tracer.active_span: + logger.error( + "There was no active span when trying to %s." + " Did you forget to start one or did a context slip?", + message, + ) + + return ret + + return func(*args, **kwargs) + + return ensure_active_span_inner_2 + + return ensure_active_span_inner_1 + + @contextlib.contextmanager def _noop_context_manager(*args, **kwargs): """Does exactly what it says on the tin""" @@ -239,10 +327,6 @@ def init_tracer(config): scope_manager=LogContextScopeManager(config), ).initialize_tracer() - # Set up tags to be opentracing's tags - global tags - tags = opentracing.tags - # Whitelisting @@ -269,7 +353,7 @@ def whitelisted_homeserver(destination): Args: destination (str) """ - _homeserver_whitelist + if _homeserver_whitelist: return _homeserver_whitelist.match(destination) return False @@ -299,30 +383,28 @@ def start_active_span( if opentracing is None: return _noop_context_manager() - else: - # We need to enter the scope here for the logcontext to become active - return opentracing.tracer.start_active_span( - operation_name, - child_of=child_of, - references=references, - tags=tags, - start_time=start_time, - ignore_active_span=ignore_active_span, - finish_on_close=finish_on_close, - ) + return opentracing.tracer.start_active_span( + operation_name, + child_of=child_of, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) def start_active_span_follows_from(operation_name, contexts): if opentracing is None: return _noop_context_manager() - else: - references = [opentracing.follows_from(context) for context in contexts] - scope = start_active_span(operation_name, references=references) - return scope + + references = [opentracing.follows_from(context) for context in contexts] + scope = start_active_span(operation_name, references=references) + return scope -def start_active_span_from_context( - headers, +def start_active_span_from_request( + request, operation_name, references=None, tags=None, @@ -331,9 +413,9 @@ def start_active_span_from_context( finish_on_close=True, ): """ - Extracts a span context from Twisted Headers. + Extracts a span context from a Twisted Request. args: - headers (twisted.web.http_headers.Headers) + headers (twisted.web.http.Request) For the other args see opentracing.tracer @@ -347,7 +429,9 @@ def start_active_span_from_context( if opentracing is None: return _noop_context_manager() - header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()} + header_dict = { + k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() + } context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) return opentracing.tracer.start_active_span( @@ -413,19 +497,19 @@ def start_active_span_from_edu( # Opentracing setters for tags, logs, etc -@only_if_tracing +@ensure_active_span("set a tag") def set_tag(key, value): """Sets a tag on the active span""" opentracing.tracer.active_span.set_tag(key, value) -@only_if_tracing +@ensure_active_span("log") def log_kv(key_values, timestamp=None): """Log to the active span""" opentracing.tracer.active_span.log_kv(key_values, timestamp) -@only_if_tracing +@ensure_active_span("set the traces operation name") def set_operation_name(operation_name): """Sets the operation name of the active span""" opentracing.tracer.active_span.set_operation_name(operation_name) @@ -434,13 +518,18 @@ def set_operation_name(operation_name): # Injection and extraction -@only_if_tracing -def inject_active_span_twisted_headers(headers, destination): +@ensure_active_span("inject the span into a header") +def inject_active_span_twisted_headers(headers, destination, check_destination=True): """ Injects a span context into twisted headers in-place Args: headers (twisted.web.http_headers.Headers) + destination (str): address of entity receiving the span context. If check_destination + is true the context will only be injected if the destination matches the + opentracing whitelist + check_destination (bool): If false, destination will be ignored and the context + will always be injected. span (opentracing.Span) Returns: @@ -454,7 +543,7 @@ def inject_active_span_twisted_headers(headers, destination): https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py """ - if not whitelisted_homeserver(destination): + if check_destination and not whitelisted_homeserver(destination): return span = opentracing.tracer.active_span @@ -465,14 +554,19 @@ def inject_active_span_twisted_headers(headers, destination): headers.addRawHeaders(key, value) -@only_if_tracing -def inject_active_span_byte_dict(headers, destination): +@ensure_active_span("inject the span into a byte dict") +def inject_active_span_byte_dict(headers, destination, check_destination=True): """ Injects a span context into a dict where the headers are encoded as byte strings Args: headers (dict) + destination (str): address of entity receiving the span context. If check_destination + is true the context will only be injected if the destination matches the + opentracing whitelist + check_destination (bool): If false, destination will be ignored and the context + will always be injected. span (opentracing.Span) Returns: @@ -485,7 +579,7 @@ def inject_active_span_byte_dict(headers, destination): here: https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py """ - if not whitelisted_homeserver(destination): + if check_destination and not whitelisted_homeserver(destination): return span = opentracing.tracer.active_span @@ -497,16 +591,18 @@ def inject_active_span_byte_dict(headers, destination): headers[key.encode()] = [value.encode()] -@only_if_tracing -def inject_active_span_text_map(carrier, destination=None): +@ensure_active_span("inject the span into a text map") +def inject_active_span_text_map(carrier, destination, check_destination=True): """ Injects a span context into a dict Args: carrier (dict) - destination (str): the name of the remote server. The span context - will only be injected if the destination matches the homeserver_whitelist - or destination is None. + destination (str): address of entity receiving the span context. If check_destination + is true the context will only be injected if the destination matches the + opentracing whitelist + check_destination (bool): If false, destination will be ignored and the context + will always be injected. Returns: In-place modification of carrier @@ -519,7 +615,7 @@ def inject_active_span_text_map(carrier, destination=None): https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py """ - if destination and not whitelisted_homeserver(destination): + if check_destination and not whitelisted_homeserver(destination): return opentracing.tracer.inject( @@ -527,6 +623,31 @@ def inject_active_span_text_map(carrier, destination=None): ) +@ensure_active_span("get the active span context as a dict", ret={}) +def get_active_span_text_map(destination=None): + """ + Gets a span context as a dict. This can be used instead of manually + injecting a span into an empty carrier. + + Args: + destination (str): the name of the remote server. + + Returns: + dict: the active span's context if opentracing is enabled, otherwise empty. + """ + + if destination and not whitelisted_homeserver(destination): + return {} + + carrier = {} + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + + return carrier + + +@ensure_active_span("get the span context as a string.", ret={}) def active_span_context_as_string(): """ Returns: @@ -566,70 +687,30 @@ def extract_text_map(carrier): # Tracing decorators -def trace(func): +def trace(func=None, opname=None): """ Decorator to trace a function. - Sets the operation name to that of the function's. + Sets the operation name to that of the function's or that given + as operation_name. See the module's doc string for usage + examples. """ - if opentracing is None: - return func - - @wraps(func) - def _trace_inner(self, *args, **kwargs): - if opentracing is None: - return func(self, *args, **kwargs) - scope = start_active_span(func.__name__) - scope.__enter__() - - try: - result = func(self, *args, **kwargs) - if isinstance(result, defer.Deferred): - - def call_back(result): - scope.__exit__(None, None, None) - return result - - def err_back(result): - scope.span.set_tag(tags.ERROR, True) - scope.__exit__(None, None, None) - return result - - result.addCallbacks(call_back, err_back) - - else: - scope.__exit__(None, None, None) - - return result - - except Exception as e: - scope.__exit__(type(e), None, e.__traceback__) - raise - - return _trace_inner - - -def trace_using_operation_name(operation_name): - """Decorator to trace a function. Explicitely sets the operation_name.""" - - def trace(func): - """ - Decorator to trace a function. - Sets the operation name to that of the function's. - """ + def decorator(func): if opentracing is None: return func + _opname = opname if opname else func.__name__ + @wraps(func) - def _trace_inner(self, *args, **kwargs): + def _trace_inner(*args, **kwargs): if opentracing is None: - return func(self, *args, **kwargs) + return func(*args, **kwargs) - scope = start_active_span(operation_name) + scope = start_active_span(_opname) scope.__enter__() try: - result = func(self, *args, **kwargs) + result = func(*args, **kwargs) if isinstance(result, defer.Deferred): def call_back(result): @@ -642,6 +723,7 @@ def trace_using_operation_name(operation_name): return result result.addCallbacks(call_back, err_back) + else: scope.__exit__(None, None, None) @@ -653,7 +735,10 @@ def trace_using_operation_name(operation_name): return _trace_inner - return trace + if func: + return decorator(func) + else: + return decorator def tag_args(func): @@ -665,76 +750,54 @@ def tag_args(func): return func @wraps(func) - def _tag_args_inner(self, *args, **kwargs): + def _tag_args_inner(*args, **kwargs): argspec = inspect.getargspec(func) for i, arg in enumerate(argspec.args[1:]): set_tag("ARG_" + arg, args[i]) set_tag("args", args[len(argspec.args) :]) set_tag("kwargs", kwargs) - return func(self, *args, **kwargs) + return func(*args, **kwargs) return _tag_args_inner -def trace_servlet(servlet_name, func): +def trace_servlet(servlet_name, extract_context=False): """Decorator which traces a serlet. It starts a span with some servlet specific - tags such as the servlet_name and request information""" - if not opentracing: - return func + tags such as the servlet_name and request information - @wraps(func) - @defer.inlineCallbacks - def _trace_servlet_inner(request, *args, **kwargs): - with start_active_span( - "incoming-client-request", - tags={ + Args: + servlet_name (str): The name to be used for the span's operation_name + extract_context (bool): Whether to attempt to extract the opentracing + context from the request the servlet is handling. + + """ + + def _trace_servlet_inner_1(func): + if not opentracing: + return func + + @wraps(func) + @defer.inlineCallbacks + def _trace_servlet_inner(request, *args, **kwargs): + request_tags = { "request_id": request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), tags.PEER_HOST_IPV6: request.getClientIP(), - "servlet_name": servlet_name, - }, - ): - result = yield defer.maybeDeferred(func, request, *args, **kwargs) - return result - - return _trace_servlet_inner - - -# Helper class + } + if extract_context: + scope = start_active_span_from_request( + request, servlet_name, tags=request_tags + ) + else: + scope = start_active_span(servlet_name, tags=request_tags) -class _DummyTagNames(object): - """wrapper of opentracings tags. We need to have them if we - want to reference them without opentracing around. Clearly they - should never actually show up in a trace. `set_tags` overwrites - these with the correct ones.""" - - INVALID_TAG = "invalid-tag" - COMPONENT = INVALID_TAG - DATABASE_INSTANCE = INVALID_TAG - DATABASE_STATEMENT = INVALID_TAG - DATABASE_TYPE = INVALID_TAG - DATABASE_USER = INVALID_TAG - ERROR = INVALID_TAG - HTTP_METHOD = INVALID_TAG - HTTP_STATUS_CODE = INVALID_TAG - HTTP_URL = INVALID_TAG - MESSAGE_BUS_DESTINATION = INVALID_TAG - PEER_ADDRESS = INVALID_TAG - PEER_HOSTNAME = INVALID_TAG - PEER_HOST_IPV4 = INVALID_TAG - PEER_HOST_IPV6 = INVALID_TAG - PEER_PORT = INVALID_TAG - PEER_SERVICE = INVALID_TAG - SAMPLING_PRIORITY = INVALID_TAG - SERVICE = INVALID_TAG - SPAN_KIND = INVALID_TAG - SPAN_KIND_CONSUMER = INVALID_TAG - SPAN_KIND_PRODUCER = INVALID_TAG - SPAN_KIND_RPC_CLIENT = INVALID_TAG - SPAN_KIND_RPC_SERVER = INVALID_TAG + with scope: + result = yield defer.maybeDeferred(func, request, *args, **kwargs) + return result + return _trace_servlet_inner -tags = _DummyTagNames + return _trace_servlet_inner_1 diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 488280b4a6..bec3b13397 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -20,6 +20,7 @@ import os import platform import threading import time +from typing import Dict, Union import six @@ -29,20 +30,20 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF from twisted.internet import reactor +import synapse from synapse.metrics._exposition import ( MetricsResource, generate_latest, start_http_server, ) +from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) METRICS_PREFIX = "/_synapse/metrics" running_on_pypy = platform.python_implementation() == "PyPy" -all_metrics = [] -all_collectors = [] -all_gauges = {} +all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]] HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") @@ -385,6 +386,16 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name" # finished being processed. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) +# Build info of the running server. +build_info = Gauge( + "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"] +) +build_info.labels( + " ".join([platform.python_implementation(), platform.python_version()]), + get_version_string(synapse), + " ".join([platform.system(), platform.release()]), +).set(1) + last_ticked = time.time() diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 1933ecd3e3..74d9c3ecd3 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -36,7 +36,9 @@ from twisted.web.resource import Resource try: from prometheus_client.samples import Sample except ImportError: - Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"]) + Sample = namedtuple( + "Sample", ["name", "labels", "value", "timestamp", "exemplar"] + ) # type: ignore CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 41147d4292..735b882363 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -101,7 +101,7 @@ class ModuleApi(object): ) user_id = yield self.register_user(localpart, displayname, emails) _, access_token = yield self.register_device(user_id) - return (user_id, access_token) + return user_id, access_token def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. diff --git a/synapse/notifier.py b/synapse/notifier.py index bd80c801b6..4e091314e6 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -472,11 +472,11 @@ class Notifier(object): joined_room_ids = yield self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: - return ([explicit_room_id], True) + return [explicit_room_id], True if (yield self._is_world_readable(explicit_room_id)): - return ([explicit_room_id], False) + return [explicit_room_id], False raise AuthError(403, "Non-joined access not allowed") - return (joined_room_ids, True) + return joined_room_ids, True @defer.inlineCallbacks def _is_world_readable(self, room_id): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c831975635..22491f3700 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object): pl_event = auth_events.get(POWER_KEY) - return (pl_event.content if pl_event else {}, sender_level) + return pl_event.content if pl_event else {}, sender_level @defer.inlineCallbacks def action_for_event_by_user(self, event, context): diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index bd5d53af91..6299587808 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -22,6 +22,7 @@ from prometheus_client import Counter from twisted.internet import defer from twisted.internet.error import AlreadyCalled, AlreadyCancelled +from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException @@ -194,7 +195,17 @@ class HttpPusher(object): ) for push_action in unprocessed: - processed = yield self._process_one(push_action) + with opentracing.start_active_span( + "http-push", + tags={ + "authenticated_entity": self.user_id, + "event_id": push_action["event_id"], + "app_id": self.app_id, + "app_display_name": self.app_display_name, + }, + ): + processed = yield self._process_one(push_action) + if processed: http_push_processed_counter.inc() self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 4245ce26f3..3dfd527849 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -131,14 +131,11 @@ class Mailer(object): email_address (str): Email address we're sending the password reset to token (str): Unique token generated by the server to verify - password reset email was received + the email was received client_secret (str): Unique token generated by the client to group together multiple email sending attempts sid (str): The generated session ID """ - if email.utils.parseaddr(email_address)[1] == "": - raise RuntimeError("Invalid 'to' email address") - link = ( self.hs.config.public_baseurl + "_matrix/client/unstable/password_reset/email/submit_token" @@ -149,7 +146,34 @@ class Mailer(object): yield self.send_email( email_address, - "[%s] Password Reset Email" % self.hs.config.server_name, + "[%s] Password Reset" % self.hs.config.server_name, + template_vars, + ) + + @defer.inlineCallbacks + def send_registration_mail(self, email_address, token, client_secret, sid): + """Send an email with a registration confirmation link to a user + + Args: + email_address (str): Email address we're sending the registration + link to + token (str): Unique token generated by the server to verify + the email was received + client_secret (str): Unique token generated by the client to + group together multiple email sending attempts + sid (str): The generated session ID + """ + link = ( + self.hs.config.public_baseurl + + "_matrix/client/unstable/registration/email/submit_token" + "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid) + ) + + template_vars = {"link": link} + + yield self.send_email( + email_address, + "[%s] Register your Email Address" % self.hs.config.server_name, template_vars, ) @@ -605,25 +629,50 @@ def format_ts_filter(value, format): return time.strftime(format, time.localtime(value / 1000)) -def load_jinja2_templates(config, template_html_name, template_text_name): - """Load the jinja2 email templates from disk +def load_jinja2_templates( + template_dir, + template_filenames, + apply_format_ts_filter=False, + apply_mxc_to_http_filter=False, + public_baseurl=None, +): + """Loads and returns one or more jinja2 templates and applies optional filters + + Args: + template_dir (str): The directory where templates are stored + template_filenames (list[str]): A list of template filenames + apply_format_ts_filter (bool): Whether to apply a template filter that formats + timestamps + apply_mxc_to_http_filter (bool): Whether to apply a template filter that converts + mxc urls to http urls + public_baseurl (str|None): The public baseurl of the server. Required for + apply_mxc_to_http_filter to be enabled Returns: - (template_html, template_text) + A list of jinja2 templates corresponding to the given list of filenames, + with order preserved """ - logger.info("loading email templates from '%s'", config.email_template_dir) - loader = jinja2.FileSystemLoader(config.email_template_dir) + logger.info( + "loading email templates %s from '%s'", template_filenames, template_dir + ) + loader = jinja2.FileSystemLoader(template_dir) env = jinja2.Environment(loader=loader) - env.filters["format_ts"] = format_ts_filter - env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) - template_html = env.get_template(template_html_name) - template_text = env.get_template(template_text_name) + if apply_format_ts_filter: + env.filters["format_ts"] = format_ts_filter + + if apply_mxc_to_http_filter and public_baseurl: + env.filters["mxc_to_http"] = _create_mxc_to_http_filter(public_baseurl) + + templates = [] + for template_filename in template_filenames: + template = env.get_template(template_filename) + templates.append(template) - return template_html, template_text + return templates -def _create_mxc_to_http_filter(config): +def _create_mxc_to_http_filter(public_baseurl): def mxc_to_http_filter(value, width, height, resize_method="crop"): if value[0:6] != "mxc://": return "" @@ -636,7 +685,7 @@ def _create_mxc_to_http_filter(config): params = {"width": width, "height": height, "method": resize_method} return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( - config.public_baseurl, + public_baseurl, serverAndMediaId, urllib.parse.urlencode(params), fragment or "", diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index a9c64a9c54..f277aeb131 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -35,6 +35,7 @@ except Exception: class PusherFactory(object): def __init__(self, hs): self.hs = hs + self.config = hs.config self.pusher_types = {"http": HttpPusher} @@ -42,12 +43,16 @@ class PusherFactory(object): if hs.config.email_enable_notifs: self.mailers = {} # app_name -> Mailer - templates = load_jinja2_templates( - config=hs.config, - template_html_name=hs.config.email_notif_template_html, - template_text_name=hs.config.email_notif_template_text, + self.notif_template_html, self.notif_template_text = load_jinja2_templates( + self.config.email_template_dir, + [ + self.config.email_notif_template_html, + self.config.email_notif_template_text, + ], + apply_format_ts_filter=True, + apply_mxc_to_http_filter=True, + public_baseurl=self.config.public_baseurl, ) - self.notif_template_html, self.notif_template_text = templates self.pusher_types["email"] = self._create_email_pusher @@ -78,6 +83,6 @@ class PusherFactory(object): if "data" in pusherdict and "brand" in pusherdict["data"]: app_name = pusherdict["data"]["brand"] else: - app_name = self.hs.config.email_app_name + app_name = self.config.email_app_name return app_name diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c6465c0386..0bd563edc7 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +from typing import Set from pkg_resources import ( DistributionNotFound, @@ -47,9 +48,9 @@ REQUIREMENTS = [ "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1. "service_identity>=18.1.0", - # our logcontext handling relies on the ability to cancel inlineCallbacks - # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7. - "Twisted>=18.7.0", + # Twisted 18.9 introduces some logger improvements that the structured + # logger utilises + "Twisted>=18.9.0", "treq>=15.1", # Twisted has required pyopenssl 16.0 since about Twisted 16.6. "pyopenssl>=16.0.0", @@ -97,7 +98,7 @@ CONDITIONAL_REQUIREMENTS = { "jwt": ["pyjwt>=1.6.4"], } -ALL_OPTIONAL_REQUIREMENTS = set() +ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): # Exclude systemd as it's a system-based requirement. @@ -147,7 +148,13 @@ def check_requirements(for_feature=None): ) except DistributionNotFound: deps_needed.append(dependency) - errors.append("Needed %s but it was not installed" % (dependency,)) + if for_feature: + errors.append( + "Needed %s for the '%s' feature but it was not installed" + % (dependency, for_feature) + ) + else: + errors.append("Needed %s but it was not installed" % (dependency,)) if not for_feature: # Check the optional dependencies are up to date. We allow them to not be @@ -168,8 +175,8 @@ def check_requirements(for_feature=None): pass if deps_needed: - for e in errors: - logging.error(e) + for err in errors: + logging.error(err) raise DependencyException(deps_needed) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 2e0594e581..03560c1f0e 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -28,6 +28,11 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) +from synapse.logging.opentracing import ( + inject_active_span_byte_dict, + trace, + trace_servlet, +) from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -128,6 +133,7 @@ class ReplicationEndpoint(object): client = hs.get_simple_http_client() + @trace(opname="outgoing_replication_request") @defer.inlineCallbacks def send_request(**kwargs): data = yield cls._serialize_payload(**kwargs) @@ -165,8 +171,10 @@ class ReplicationEndpoint(object): # have a good idea that the request has either succeeded or failed on # the master, and so whether we should clean up or not. while True: + headers = {} + inject_active_span_byte_dict(headers, None, check_destination=False) try: - result = yield request_func(uri, data) + result = yield request_func(uri, data, headers=headers) break except CodeMessageException as e: if e.code != 504 or not cls.RETRY_ON_TIMEOUT: @@ -205,7 +213,12 @@ class ReplicationEndpoint(object): args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args) pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args)) - http_server.register_paths(method, [pattern], handler, self.__class__.__name__) + handler = trace_servlet(self.__class__.__name__, extract_context=True)(handler) + # We don't let register paths trace this servlet using the default tracing + # options because we wish to extract the context explicitly. + http_server.register_paths( + method, [pattern], handler, self.__class__.__name__, trace=False + ) def _cached_handler(self, request, txn_id, **kwargs): """Called on new incoming requests when caching is enabled. Checks diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index fed4f08820..2f16955954 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts, backfilled ) - return (200, {}) + return 200, {} class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): @@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): result = yield self.registry.on_edu(edu_type, origin, edu_content) - return (200, result) + return 200, result class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): result = yield self.registry.on_query(query_type, args) - return (200, result) + return 200, result class ReplicationCleanRoomRestServlet(ReplicationEndpoint): @@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): def _handle_request(self, request, room_id): yield self.store.clean_room_for_join(room_id) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index f17d3a2da4..786f5232b2 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): user_id, device_id, initial_display_name, is_guest ) - return (200, {"device_id": device_id, "access_token": access_token}) + return 200, {"device_id": device_id, "access_token": access_token} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 4217335d88..b9ce3477ad 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): remote_room_hosts, room_id, user_id, event_content ) - return (200, {}) + return 200, {} class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): yield self.store.locally_reject_invite(user_id, room_id) ret = {} - return (200, ret) + return 200, ret class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): @@ -202,7 +202,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): else: raise Exception("Unrecognized change: %r", change) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 3341320a87..38260256cf 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): address=content["address"], ) - return (200, {}) + return 200, {} class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @@ -106,7 +106,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): self.registration_handler = hs.get_registration_handler() @staticmethod - def _serialize_payload(user_id, auth_result, access_token, bind_email, bind_msisdn): + def _serialize_payload(user_id, auth_result, access_token): """ Args: user_id (str): The user ID that consented @@ -114,17 +114,8 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): registered user. access_token (str|None): The access token of the newly logged in device, or None if `inhibit_login` enabled. - bind_email (bool): Whether to bind the email with the identity - server - bind_msisdn (bool): Whether to bind the msisdn with the identity - server """ - return { - "auth_result": auth_result, - "access_token": access_token, - "bind_email": bind_email, - "bind_msisdn": bind_msisdn, - } + return {"auth_result": auth_result, "access_token": access_token} @defer.inlineCallbacks def _handle_request(self, request, user_id): @@ -132,18 +123,12 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): auth_result = content["auth_result"] access_token = content["access_token"] - bind_email = content["bind_email"] - bind_msisdn = content["bind_msisdn"] yield self.registration_handler.post_registration_actions( - user_id=user_id, - auth_result=auth_result, - access_token=access_token, - bind_email=bind_email, - bind_msisdn=bind_msisdn, + user_id=user_id, auth_result=auth_result, access_token=access_token ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index eff7bd7305..adb9b2f7f4 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index c10b85d2ff..f03111c259 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -158,7 +158,7 @@ class Stream(object): updates, current_token = yield self.get_updates_since(self.last_token) self.last_token = current_token - return (updates, current_token) + return updates, current_token @defer.inlineCallbacks def get_updates_since(self, from_token): @@ -172,14 +172,14 @@ class Stream(object): sent over the replication steam. """ if from_token in ("NOW", "now"): - return ([], self.upto_token) + return [], self.upto_token current_token = self.upto_token from_token = int(from_token) if from_token == current_token: - return ([], current_token) + return [], current_token if self._LIMITED: rows = yield self.update_function( @@ -198,7 +198,7 @@ class Stream(object): if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) - return (updates, current_token) + return updates, current_token def current_token(self): """Gets the current token of the underlying streams. Should be provided diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html index 4fa7b36734..a197bf872c 100644 --- a/synapse/res/templates/password_reset.html +++ b/synapse/res/templates/password_reset.html @@ -4,6 +4,6 @@ <a href="{{ link }}">{{ link }}</a> - <p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p> + <p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p> </body> </html> diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt index f0deff59a7..6aa6527560 100644 --- a/synapse/res/templates/password_reset.txt +++ b/synapse/res/templates/password_reset.txt @@ -3,5 +3,5 @@ was you, please click the link below to confirm resetting your password: {{ link }} -If this was not you, please disregard this email and contact your server -administrator. Thank you. +If this was not you, DO NOT click the link above and instead contact your +server administrator. Thank you. diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html index 0b132cf8db..9e3c4446e3 100644 --- a/synapse/res/templates/password_reset_failure.html +++ b/synapse/res/templates/password_reset_failure.html @@ -1,6 +1,8 @@ <html> <head></head> <body> -<p>{{ failure_reason }}. Your password has not been reset.</p> +<p>The request failed for the following reason: {{ failure_reason }}.</p> + +<p>Your password has not been reset.</p> </body> </html> diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html new file mode 100644 index 0000000000..16730a527f --- /dev/null +++ b/synapse/res/templates/registration.html @@ -0,0 +1,11 @@ +<html> +<body> + <p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p> + + <a href="{{ link }}">Verify Your Email Address</a> + + <p>If this was not you, you can safely disregard this email.</p> + + <p>Thank you.</p> +</body> +</html> diff --git a/synapse/res/templates/registration.txt b/synapse/res/templates/registration.txt new file mode 100644 index 0000000000..cb4f16a90c --- /dev/null +++ b/synapse/res/templates/registration.txt @@ -0,0 +1,10 @@ +Hello there, + +You have asked us to register this email with a new Matrix account. If this +was you, please click the link below to confirm your email address: + +{{ link }} + +If this was not you, you can safely disregard this email. + +Thank you. diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html new file mode 100644 index 0000000000..2833d79c37 --- /dev/null +++ b/synapse/res/templates/registration_failure.html @@ -0,0 +1,6 @@ +<html> +<head></head> +<body> +<p>Validation failed for the following reason: {{ failure_reason }}.</p> +</body> +</html> diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html new file mode 100644 index 0000000000..fbd6e4018f --- /dev/null +++ b/synapse/res/templates/registration_success.html @@ -0,0 +1,6 @@ +<html> +<head></head> +<body> +<p>Your email has now been validated, please return to your client. You may now close this window.</p> +</body> +</html> diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 1d20b96d03..4a1fc2ec2b 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -73,7 +73,7 @@ class ClientRestResource(JsonResource): @staticmethod def register_servlets(client_resource, hs): - versions.register_servlets(client_resource) + versions.register_servlets(hs, client_resource) # Deprecated in r0 initial_sync.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5720cab425..81b6bd8816 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -41,8 +41,10 @@ from synapse.rest.admin._base import ( assert_user_is_admin, historical_admin_path_patterns, ) -from synapse.rest.admin.media import register_servlets_for_media_repo +from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo +from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet +from synapse.rest.admin.users import UserAdminServlet from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -50,7 +52,7 @@ logger = logging.getLogger(__name__) class UsersRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)") + PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$") def __init__(self, hs): self.hs = hs @@ -67,7 +69,7 @@ class UsersRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_users() - return (200, ret) + return 200, ret class VersionServlet(RestServlet): @@ -118,7 +120,7 @@ class UserRegisterServlet(RestServlet): nonce = self.hs.get_secrets().token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) - return (200, {"nonce": nonce}) + return 200, {"nonce": nonce} @defer.inlineCallbacks def on_POST(self, request): @@ -210,7 +212,7 @@ class UserRegisterServlet(RestServlet): ) result = yield register._create_registration_details(user_id, body) - return (200, result) + return 200, result class WhoisRestServlet(RestServlet): @@ -235,7 +237,7 @@ class WhoisRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_whois(target_user) - return (200, ret) + return 200, ret class PurgeHistoryRestServlet(RestServlet): @@ -320,7 +322,7 @@ class PurgeHistoryRestServlet(RestServlet): room_id, token, delete_local_events=delete_local_events ) - return (200, {"purge_id": purge_id}) + return 200, {"purge_id": purge_id} class PurgeHistoryStatusRestServlet(RestServlet): @@ -345,7 +347,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): if purge_status is None: raise NotFoundError("purge id '%s' not found" % purge_id) - return (200, purge_status.asdict()) + return 200, purge_status.asdict() class DeactivateAccountRestServlet(RestServlet): @@ -377,7 +379,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class ShutdownRoomRestServlet(RestServlet): @@ -547,7 +549,7 @@ class ResetPasswordRestServlet(RestServlet): yield self._set_password_handler.set_password( target_user_id, new_password, requester ) - return (200, {}) + return 200, {} class GetUsersPaginatedRestServlet(RestServlet): @@ -589,7 +591,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -617,7 +619,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - return (200, ret) + return 200, ret class SearchUsersRestServlet(RestServlet): @@ -660,7 +662,7 @@ class SearchUsersRestServlet(RestServlet): logger.info("term: %s ", term) ret = yield self.handlers.admin_handler.search_users(term) - return (200, ret) + return 200, ret class DeleteGroupAdminRestServlet(RestServlet): @@ -683,7 +685,7 @@ class DeleteGroupAdminRestServlet(RestServlet): raise SynapseError(400, "Can only delete local groups") yield self.group_server.delete_group(group_id, requester.user.to_string()) - return (200, {}) + return 200, {} class AccountValidityRenewServlet(RestServlet): @@ -714,7 +716,7 @@ class AccountValidityRenewServlet(RestServlet): ) res = {"expiration_ts": expiration_ts} - return (200, res) + return 200, res ######################################################################################## @@ -738,8 +740,10 @@ def register_servlets(hs, http_server): Register all the admin servlets. """ register_servlets_for_client_rest_resource(hs, http_server) + PurgeRoomServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) + UserAdminServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): @@ -757,9 +761,12 @@ def register_servlets_for_client_rest_resource(hs, http_server): DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) - # Load the media repo ones if we're using them. + # Load the media repo ones if we're using them. Otherwise load the servlets which + # don't need a media repo (typically readonly admin APIs). if hs.config.can_load_media_repo: register_servlets_for_media_repo(hs, http_server) + else: + ListMediaInRoom(hs).register(http_server) # don't add more things here: new servlets should only be exposed on # /_synapse/admin so should not go here. Instead register them in AdminRestResource. diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 824df919f2..ed7086d09c 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -49,7 +49,7 @@ class QuarantineMediaInRoom(RestServlet): room_id, requester.user.to_string() ) - return (200, {"num_quarantined": num_quarantined}) + return 200, {"num_quarantined": num_quarantined} class ListMediaInRoom(RestServlet): @@ -60,6 +60,7 @@ class ListMediaInRoom(RestServlet): def __init__(self, hs): self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -70,7 +71,7 @@ class ListMediaInRoom(RestServlet): local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - return (200, {"local": local_mxcs, "remote": remote_mxcs}) + return 200, {"local": local_mxcs, "remote": remote_mxcs} class PurgeMediaCacheRestServlet(RestServlet): @@ -89,7 +90,7 @@ class PurgeMediaCacheRestServlet(RestServlet): ret = yield self.media_repository.delete_old_remote_media(before_ts) - return (200, ret) + return 200, ret def register_servlets_for_media_repo(hs, http_server): diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py new file mode 100644 index 0000000000..f474066542 --- /dev/null +++ b/synapse/rest/admin/purge_room_servlet.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin import assert_requester_is_admin + + +class PurgeRoomServlet(RestServlet): + """Servlet which will remove all trace of a room from the database + + POST /_synapse/admin/v1/purge_room + { + "room_id": "!room:id" + } + + returns: + + {} + """ + + PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + self.hs = hs + self.auth = hs.get_auth() + self.pagination_handler = hs.get_pagination_handler() + + async def on_POST(self, request): + await assert_requester_is_admin(self.auth, request) + + body = parse_json_object_from_request(request) + assert_params_in_dict(body, ("room_id",)) + + await self.pagination_handler.purge_room(body["room_id"]) + + return 200, {} diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 656526fea5..ae2cbe2e0a 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -92,7 +92,7 @@ class SendServerNoticeServlet(RestServlet): event_content=body["content"], ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_PUT(self, request, txn_id): return self.txns.fetch_or_execute_request( diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py new file mode 100644 index 0000000000..9720a3bab0 --- /dev/null +++ b/synapse/rest/admin/users.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin +from synapse.types import UserID + + +class UserAdminServlet(RestServlet): + """ + Get or set whether or not a user is a server administrator. + + Note that only local users can be server administrators, and that an + administrator may not demote themselves. + + Only server administrators can use this API. + + Examples: + * Get + GET /_synapse/admin/v1/users/@nonadmin:example.com/admin + response on success: + { + "admin": false + } + * Set + PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin + request body: + { + "admin": true + } + response on success: + {} + """ + + PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P<user_id>@[^/]*)/admin$"),) + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.handlers = hs.get_handlers() + + @defer.inlineCallbacks + def on_GET(self, request, user_id): + yield assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(user_id) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Only local users can be admins of this homeserver") + + is_admin = yield self.handlers.admin_handler.get_user_server_admin(target_user) + is_admin = bool(is_admin) + + return 200, {"admin": is_admin} + + @defer.inlineCallbacks + def on_PUT(self, request, user_id): + requester = yield self.auth.get_user_by_req(request) + yield assert_user_is_admin(self.auth, requester.user) + auth_user = requester.user + + target_user = UserID.from_string(user_id) + + body = parse_json_object_from_request(request) + + assert_params_in_dict(body, ["admin"]) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Only local users can be admins of this homeserver") + + set_admin_to = bool(body["admin"]) + + if target_user == auth_user and not set_admin_to: + raise SynapseError(400, "You may not demote yourself.") + + yield self.handlers.admin_handler.set_user_server_admin( + target_user, set_admin_to + ) + + return 200, {} diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 4284738021..4ea3666874 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet): dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) - return (200, res) + return 200, res @defer.inlineCallbacks def on_PUT(self, request, room_alias): @@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet): requester, room_alias, room_id, servers ) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, room_alias): @@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet): service.url, room_alias.to_string(), ) - return (200, {}) + return 200, {} except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass @@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet): "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) - return (200, {}) + return 200, {} class ClientDirectoryListServer(RestServlet): @@ -136,7 +136,7 @@ class ClientDirectoryListServer(RestServlet): if room is None: raise NotFoundError("Unknown room") - return (200, {"visibility": "public" if room["is_public"] else "private"}) + return 200, {"visibility": "public" if room["is_public"] else "private"} @defer.inlineCallbacks def on_PUT(self, request, room_id): @@ -149,7 +149,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, visibility ) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, room_id): @@ -159,7 +159,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, "private" ) - return (200, {}) + return 200, {} class ClientAppserviceDirectoryListServer(RestServlet): @@ -193,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet): requester.app_service.id, network_id, room_id, visibility ) - return (200, {}) + return 200, {} diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 53ebed2203..6651b4cf07 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,10 +67,10 @@ class EventStreamRestServlet(RestServlet): is_guest=is_guest, ) - return (200, chunk) + return 200, chunk def on_OPTIONS(self, request): - return (200, {}) + return 200, {} # TODO: Unit test gets, with and without auth, with different kinds of events. @@ -91,9 +91,9 @@ class EventRestServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - return (200, event) + return 200, event else: - return (404, "Event not found.") + return 404, "Event not found." def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 70b8478e90..2da3cd7511 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet): include_archived=include_archived, ) - return (200, content) + return 200, content def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index eeaa72b205..9cddbc752a 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -122,10 +122,10 @@ class LoginRestServlet(RestServlet): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) - return (200, {"flows": flows}) + return 200, {"flows": flows} def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -153,7 +153,7 @@ class LoginRestServlet(RestServlet): well_known_data = self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data - return (200, result) + return 200, result @defer.inlineCallbacks def _do_other_login(self, login_submission): diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 2769f3a189..4785a34d75 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -33,7 +33,7 @@ class LogoutRestServlet(RestServlet): self._device_handler = hs.get_device_handler() def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet): requester.user.to_string(), requester.device_id ) - return (200, {}) + return 200, {} class LogoutAllRestServlet(RestServlet): @@ -62,7 +62,7 @@ class LogoutAllRestServlet(RestServlet): self._device_handler = hs.get_device_handler() def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet): # .. and then delete any access tokens which weren't associated with # devices. yield self._auth_handler.delete_access_tokens_for_user(user_id) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 1eb1068c98..0153525cef 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet): state = yield self.presence_handler.get_state(target_user=user) state = format_user_presence_state(state, self.clock.time_msec()) - return (200, state) + return 200, state @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -88,10 +88,10 @@ class PresenceStatusRestServlet(RestServlet): if self.hs.config.use_presence: yield self.presence_handler.set_state(user, state) - return (200, {}) + return 200, {} def on_OPTIONS(self, request): - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 2657ae45bb..bbce2e2b71 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet): if displayname is not None: ret["displayname"] = displayname - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -61,14 +61,14 @@ class ProfileDisplaynameRestServlet(RestServlet): try: new_name = content["displayname"] except Exception: - return (400, "Unable to parse name") + return 400, "Unable to parse name" yield self.profile_handler.set_displayname(user, requester, new_name, is_admin) - return (200, {}) + return 200, {} def on_OPTIONS(self, request, user_id): - return (200, {}) + return 200, {} class ProfileAvatarURLRestServlet(RestServlet): @@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -110,14 +110,14 @@ class ProfileAvatarURLRestServlet(RestServlet): try: new_name = content["avatar_url"] except Exception: - return (400, "Unable to parse name") + return 400, "Unable to parse name" yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) - return (200, {}) + return 200, {} def on_OPTIONS(self, request, user_id): - return (200, {}) + return 200, {} class ProfileRestServlet(RestServlet): @@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - return (200, ret) + return 200, ret def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c3ae8b98a8..9f8c3d09e3 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet): if "attr" in spec: yield self.set_rule_attr(user_id, spec, content) self.notify_user(user_id) - return (200, {}) + return 200, {} if spec["rule_id"].startswith("."): # Rule ids starting with '.' are reserved for server default rules. @@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet): except RuleNotFoundException as e: raise SynapseError(400, str(e)) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, path): @@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet): try: yield self.store.delete_push_rule(user_id, namespaced_rule_id) self.notify_user(user_id) - return (200, {}) + return 200, {} except StoreError as e: if e.code == 404: raise NotFoundError() @@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet): ) if path[0] == "": - return (200, rules) + return 200, rules elif path[0] == "global": result = _filter_ruleset_with_path(rules["global"], path[1:]) - return (200, result) + return 200, result else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index ebc3dec516..41660682d9 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet): if k not in allowed_keys: del p[k] - return (200, {"pushers": pushers}) + return 200, {"pushers": pushers} def on_OPTIONS(self, _): return 200, {} @@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet): yield self.pusher_pool.remove_pusher( content["app_id"], content["pushkey"], user_id=user.to_string() ) - return (200, {}) + return 200, {} assert_params_in_dict( content, @@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet): self.notifier.on_new_replication_data() - return (200, {}) + return 200, {} def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 4b2344e696..a6a7b3b57e 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -91,14 +91,14 @@ class RoomCreateRestServlet(TransactionRestServlet): requester, self.get_room_config(request) ) - return (200, info) + return 200, info def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) return user_supplied_config def on_OPTIONS(self, request): - return (200, {}) + return 200, {} # TODO: Needs unit testing for generic events @@ -173,9 +173,9 @@ class RoomStateEventRestServlet(TransactionRestServlet): if format == "event": event = format_event_for_client_v2(data.get_dict()) - return (200, event) + return 200, event elif format == "content": - return (200, data.get_dict()["content"]) + return 200, data.get_dict()["content"] @defer.inlineCallbacks def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): @@ -210,7 +210,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): ret = {} if event: ret = {"event_id": event.event_id} - return (200, ret) + return 200, ret # TODO: Needs unit testing for generic events + feedback @@ -244,10 +244,10 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester, event_dict, txn_id=txn_id ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_GET(self, request, room_id, event_type, txn_id): - return (200, "Not implemented") + return 200, "Not implemented" def on_PUT(self, request, room_id, event_type, txn_id): return self.txns.fetch_or_execute_request( @@ -307,7 +307,7 @@ class JoinRoomAliasServlet(TransactionRestServlet): third_party_signed=content.get("third_party_signed", None), ) - return (200, {"room_id": room_id}) + return 200, {"room_id": room_id} def on_PUT(self, request, room_identifier, txn_id): return self.txns.fetch_or_execute_request( @@ -360,7 +360,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit=limit, since_token=since_token ) - return (200, data) + return 200, data @defer.inlineCallbacks def on_POST(self, request): @@ -405,7 +405,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): network_tuple=network_tuple, ) - return (200, data) + return 200, data # TODO: Needs unit testing @@ -456,7 +456,7 @@ class RoomMemberListRestServlet(RestServlet): continue chunk.append(event) - return (200, {"chunk": chunk}) + return 200, {"chunk": chunk} # deprecated in favour of /members?membership=join? @@ -477,7 +477,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): requester, room_id ) - return (200, {"joined": users_with_profile}) + return 200, {"joined": users_with_profile} # TODO: Needs better unit testing @@ -510,7 +510,7 @@ class RoomMessageListRestServlet(RestServlet): event_filter=event_filter, ) - return (200, msgs) + return 200, msgs # TODO: Needs unit testing @@ -531,7 +531,7 @@ class RoomStateRestServlet(RestServlet): user_id=requester.user.to_string(), is_guest=requester.is_guest, ) - return (200, events) + return 200, events # TODO: Needs unit testing @@ -550,7 +550,7 @@ class RoomInitialSyncRestServlet(RestServlet): content = yield self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) - return (200, content) + return 200, content class RoomEventServlet(RestServlet): @@ -581,7 +581,7 @@ class RoomEventServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - return (200, event) + return 200, event return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) @@ -633,7 +633,7 @@ class RoomEventContextServlet(RestServlet): results["state"], time_now ) - return (200, results) + return 200, results class RoomForgetRestServlet(TransactionRestServlet): @@ -652,7 +652,7 @@ class RoomForgetRestServlet(TransactionRestServlet): yield self.room_member_handler.forget(user=requester.user, room_id=room_id) - return (200, {}) + return 200, {} def on_PUT(self, request, room_id, txn_id): return self.txns.fetch_or_execute_request( @@ -701,9 +701,9 @@ class RoomMembershipRestServlet(TransactionRestServlet): content["id_server"], requester, txn_id, + content.get("id_access_token"), ) - return (200, {}) - return + return 200, {} target = requester.user if membership_action in ["invite", "ban", "unban", "kick"]: @@ -729,7 +729,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): if membership_action == "join": return_value["room_id"] = room_id - return (200, return_value) + return 200, return_value def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: @@ -771,7 +771,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): txn_id=txn_id, ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_PUT(self, request, room_id, event_id, txn_id): return self.txns.fetch_or_execute_request( @@ -816,7 +816,7 @@ class RoomTypingRestServlet(RestServlet): target_user=target_user, auth_user=requester.user, room_id=room_id ) - return (200, {}) + return 200, {} class SearchRestServlet(RestServlet): @@ -838,7 +838,7 @@ class SearchRestServlet(RestServlet): requester.user, content, batch ) - return (200, results) + return 200, results class JoinedRoomsRestServlet(RestServlet): @@ -854,7 +854,7 @@ class JoinedRoomsRestServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) room_ids = yield self.store.get_rooms_for_user(requester.user.to_string()) - return (200, {"joined_rooms": list(room_ids)}) + return 200, {"joined_rooms": list(room_ids)} def register_txn_path(servlet, regex_string, http_server, with_get=False): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 497cddf8b8..2afdbb89e5 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -60,7 +60,7 @@ class VoipRestServlet(RestServlet): password = turnPassword else: - return (200, {}) + return 200, {} return ( 200, @@ -73,7 +73,7 @@ class VoipRestServlet(RestServlet): ) def on_OPTIONS(self, request): - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index e3d59ac3ac..8250ae0ae1 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -37,6 +37,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): SRE_Pattern """ patterns = [] + if unstable: unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) @@ -46,6 +47,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): for release in releases: new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) + return patterns diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 7ac456812a..2ea515d2f6 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -18,12 +18,11 @@ import logging from six.moves import http_client -import jinja2 - from twisted.internet import defer from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError, ThreepidValidationError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, @@ -31,8 +30,8 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.push.mailer import Mailer, load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn -from synapse.util.stringutils import random_string from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -50,25 +49,28 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler - if self.config.email_password_reset_behaviour == "local": - from synapse.push.mailer import Mailer, load_jinja2_templates - - templates = load_jinja2_templates( - config=hs.config, - template_html_name=hs.config.email_password_reset_template_html, - template_text_name=hs.config.email_password_reset_template_text, + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + template_html, template_text = load_jinja2_templates( + self.config.email_template_dir, + [ + self.config.email_password_reset_template_html, + self.config.email_password_reset_template_text, + ], + apply_format_ts_filter=True, + apply_mxc_to_http_filter=True, + public_baseurl=self.config.public_baseurl, ) self.mailer = Mailer( hs=self.hs, app_name=self.config.email_app_name, - template_html=templates[0], - template_text=templates[1], + template_html=template_html, + template_text=template_text, ) @defer.inlineCallbacks def on_POST(self, request): - if self.config.email_password_reset_behaviour == "off": - if self.config.password_resets_were_disabled_due_to_email_config: + if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.local_threepid_handling_disabled_due_to_email_config: logger.warn( "User password resets have been disabled due to lack of email config" ) @@ -93,99 +95,45 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid( "email", email ) - if existingUid is None: + if existing_user_id is None: raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - if self.config.email_password_reset_behaviour == "remote": - if "id_server" not in body: - raise SynapseError(400, "Missing 'id_server' param in body") + if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Have the configured identity server handle the request + if not self.hs.config.account_threepid_delegate_email: + logger.warn( + "No upstream email account_threepid_delegate configured on the server to " + "handle this request" + ) + raise SynapseError( + 400, "Password reset by email is not supported on this homeserver" + ) - # Have the identity server handle the password reset flow ret = yield self.identity_handler.requestEmailToken( - body["id_server"], email, client_secret, send_attempt, next_link + self.hs.config.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, ) else: # Send password reset emails from Synapse - sid = yield self.send_password_reset( - email, client_secret, send_attempt, next_link + sid = yield self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_password_reset_mail, + next_link, ) # Wrap the session id in a JSON object ret = {"sid": sid} - return (200, ret) - - @defer.inlineCallbacks - def send_password_reset(self, email, client_secret, send_attempt, next_link=None): - """Send a password reset email - - Args: - email (str): The user's email address - client_secret (str): The provided client secret - send_attempt (int): Which send attempt this is - - Returns: - The new session_id upon success - - Raises: - SynapseError is an error occurred when sending the email - """ - # Check that this email/client_secret/send_attempt combo is new or - # greater than what we've seen previously - session = yield self.datastore.get_threepid_validation_session( - "email", client_secret, address=email, validated=False - ) - - # Check to see if a session already exists and that it is not yet - # marked as validated - if session and session.get("validated_at") is None: - session_id = session["session_id"] - last_send_attempt = session["last_send_attempt"] - - # Check that the send_attempt is higher than previous attempts - if send_attempt <= last_send_attempt: - # If not, just return a success without sending an email - return session_id - else: - # An non-validated session does not exist yet. - # Generate a session id - session_id = random_string(16) - - # Generate a new validation token - token = random_string(32) - - # Send the mail with the link containing the token, client_secret - # and session_id - try: - yield self.mailer.send_password_reset_mail( - email, token, client_secret, session_id - ) - except Exception: - logger.exception("Error sending a password reset email to %s", email) - raise SynapseError( - 500, "An error was encountered when sending the password reset email" - ) - - token_expires = ( - self.hs.clock.time_msec() + self.config.email_validation_token_lifetime - ) - - yield self.datastore.start_or_continue_validation_session( - "email", - email, - session_id, - client_secret, - send_attempt, - next_link, - token, - token_expires, - ) - - return session_id + return 200, ret class MsisdnPasswordRequestTokenRestServlet(RestServlet): @@ -202,11 +150,15 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict( - body, - ["id_server", "client_secret", "country", "phone_number", "send_attempt"], + body, ["client_secret", "country", "phone_number", "send_attempt"] ) + client_secret = body["client_secret"] + country = body["country"] + phone_number = body["phone_number"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param - msisdn = phone_number_to_msisdn(body["country"], body["phone_number"]) + msisdn = phone_number_to_msisdn(country, phone_number) if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( @@ -215,13 +167,33 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn) + existing_user_id = yield self.datastore.get_user_id_by_threepid( + "msisdn", msisdn + ) - if existingUid is None: + if existing_user_id is None: raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) - ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + if not self.hs.config.account_threepid_delegate_msisdn: + logger.warn( + "No upstream msisdn account_threepid_delegate configured on the server to " + "handle this request" + ) + raise SynapseError( + 400, + "Password reset by phone number is not supported on this homeserver", + ) + + ret = yield self.identity_handler.requestMsisdnToken( + self.hs.config.account_threepid_delegate_msisdn, + country, + phone_number, + client_secret, + send_attempt, + next_link, + ) + + return 200, ret class PasswordResetSubmitTokenServlet(RestServlet): @@ -241,31 +213,32 @@ class PasswordResetSubmitTokenServlet(RestServlet): self.auth = hs.get_auth() self.config = hs.config self.clock = hs.get_clock() - self.datastore = hs.get_datastore() + self.store = hs.get_datastore() @defer.inlineCallbacks def on_GET(self, request, medium): + # We currently only handle threepid token submissions for email if medium != "email": raise SynapseError( 400, "This medium is currently not supported for password resets" ) - if self.config.email_password_reset_behaviour == "off": - if self.config.password_resets_were_disabled_due_to_email_config: + if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.local_threepid_handling_disabled_due_to_email_config: logger.warn( - "User password resets have been disabled due to lack of email config" + "Password reset emails have been disabled due to lack of an email config" ) raise SynapseError( - 400, "Email-based password resets have been disabled on this server" + 400, "Email-based password resets are disabled on this server" ) - sid = parse_string(request, "sid") - client_secret = parse_string(request, "client_secret") - token = parse_string(request, "token") + sid = parse_string(request, "sid", required=True) + client_secret = parse_string(request, "client_secret", required=True) + token = parse_string(request, "token", required=True) - # Attempt to validate a 3PID sesssion + # Attempt to validate a 3PID session try: # Mark the session as valid - next_link = yield self.datastore.validate_threepid_session( + next_link = yield self.store.validate_threepid_session( sid, client_secret, token, self.clock.time_msec() ) @@ -282,38 +255,22 @@ class PasswordResetSubmitTokenServlet(RestServlet): return None # Otherwise show the success template - html = self.config.email_password_reset_success_html_content + html = self.config.email_password_reset_template_success_html request.setResponseCode(200) except ThreepidValidationError as e: + request.setResponseCode(e.code) + # Show a failure page with a reason - html = self.load_jinja2_template( + html_template, = load_jinja2_templates( self.config.email_template_dir, - self.config.email_password_reset_failure_template, - template_vars={"failure_reason": e.msg}, + [self.config.email_password_reset_template_failure_html], ) - request.setResponseCode(e.code) + + template_vars = {"failure_reason": e.msg} + html = html_template.render(**template_vars) request.write(html.encode("utf-8")) finish_request(request) - return None - - def load_jinja2_template(self, template_dir, template_filename, template_vars): - """Loads a jinja2 template with variables to insert - - Args: - template_dir (str): The directory where templates are stored - template_filename (str): The name of the template in the template_dir - template_vars (Dict): Dictionary of keys in the template - alongside their values to insert - - Returns: - str containing the contents of the rendered template - """ - loader = jinja2.FileSystemLoader(template_dir) - env = jinja2.Environment(loader=loader) - - template = env.get_template(template_filename) - return template.render(**template_vars) @defer.inlineCallbacks def on_POST(self, request, medium): @@ -325,12 +282,12 @@ class PasswordResetSubmitTokenServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict(body, ["sid", "client_secret", "token"]) - valid, _ = yield self.datastore.validate_threepid_validation_token( + valid, _ = yield self.store.validate_threepid_session( body["sid"], body["client_secret"], body["token"], self.clock.time_msec() ) response_code = 200 if valid else 400 - return (response_code, {"success": valid}) + return response_code, {"success": valid} class PasswordRestServlet(RestServlet): @@ -371,7 +328,6 @@ class PasswordRestServlet(RestServlet): [[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]], body, self.hs.get_ip_from_request(request), - password_servlet=True, ) if LoginType.EMAIL_IDENTITY in result: @@ -399,7 +355,7 @@ class PasswordRestServlet(RestServlet): yield self._set_password_handler.set_password(user_id, new_password, requester) - return (200, {}) + return 200, {} def on_OPTIONS(self, _): return 200, {} @@ -434,7 +390,7 @@ class DeactivateAccountRestServlet(RestServlet): yield self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase ) - return (200, {}) + return 200, {} yield self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -447,17 +403,18 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class EmailThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): - self.hs = hs super(EmailThreepidRequestTokenRestServlet, self).__init__() + self.hs = hs + self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler - self.datastore = self.hs.get_datastore() + self.store = self.hs.get_datastore() @defer.inlineCallbacks def on_POST(self, request): @@ -465,23 +422,30 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): assert_params_in_dict( body, ["id_server", "client_secret", "email", "send_attempt"] ) + id_server = "https://" + body["id_server"] # Assume https + client_secret = body["client_secret"] + email = body["email"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", body["email"]): + if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, "Your email domain is not authorized on this server", Codes.THREEPID_DENIED, ) - existingUid = yield self.datastore.get_user_id_by_threepid( + existing_user_id = yield self.store.get_user_id_by_threepid( "email", body["email"] ) - if existingUid is not None: + if existing_user_id is not None: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - ret = yield self.identity_handler.requestEmailToken(**body) - return (200, ret) + ret = yield self.identity_handler.requestEmailToken( + id_server, email, client_secret, send_attempt, next_link + ) + return 200, ret class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -490,8 +454,8 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): def __init__(self, hs): self.hs = hs super(MsisdnThreepidRequestTokenRestServlet, self).__init__() + self.store = self.hs.get_datastore() self.identity_handler = hs.get_handlers().identity_handler - self.datastore = self.hs.get_datastore() @defer.inlineCallbacks def on_POST(self, request): @@ -500,8 +464,14 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): body, ["id_server", "client_secret", "country", "phone_number", "send_attempt"], ) + id_server = "https://" + body["id_server"] # Assume https + client_secret = body["client_secret"] + country = body["country"] + phone_number = body["phone_number"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param - msisdn = phone_number_to_msisdn(body["country"], body["phone_number"]) + msisdn = phone_number_to_msisdn(country, phone_number) if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( @@ -510,13 +480,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn) + existing_user_id = yield self.store.get_user_id_by_threepid("msisdn", msisdn) - if existingUid is not None: + if existing_user_id is not None: raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) - ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + ret = yield self.identity_handler.requestMsisdnToken( + id_server, country, phone_number, client_secret, send_attempt, next_link + ) + return 200, ret class ThreepidRestServlet(RestServlet): @@ -536,21 +508,23 @@ class ThreepidRestServlet(RestServlet): threepids = yield self.datastore.user_get_threepids(requester.user.to_string()) - return (200, {"threepids": threepids}) + return 200, {"threepids": threepids} @defer.inlineCallbacks def on_POST(self, request): body = parse_json_object_from_request(request) - threePidCreds = body.get("threePidCreds") - threePidCreds = body.get("three_pid_creds", threePidCreds) - if threePidCreds is None: - raise SynapseError(400, "Missing param", Codes.MISSING_PARAM) + threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds") + if threepid_creds is None: + raise SynapseError( + 400, "Missing param three_pid_creds", Codes.MISSING_PARAM + ) requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() - threepid = yield self.identity_handler.threepid_from_creds(threePidCreds) + # Specify None as the identity server to retrieve it from the request body instead + threepid = yield self.identity_handler.threepid_from_creds(None, threepid_creds) if not threepid: raise SynapseError(400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED) @@ -566,9 +540,41 @@ class ThreepidRestServlet(RestServlet): if "bind" in body and body["bind"]: logger.debug("Binding threepid %s to %s", threepid, user_id) - yield self.identity_handler.bind_threepid(threePidCreds, user_id) + yield self.identity_handler.bind_threepid(threepid_creds, user_id) + + return 200, {} + + +class ThreepidUnbindRestServlet(RestServlet): + PATTERNS = client_patterns("/account/3pid/unbind$") + + def __init__(self, hs): + super(ThreepidUnbindRestServlet, self).__init__() + self.hs = hs + self.identity_handler = hs.get_handlers().identity_handler + self.auth = hs.get_auth() + self.datastore = self.hs.get_datastore() + + @defer.inlineCallbacks + def on_POST(self, request): + """Unbind the given 3pid from a specific identity server, or identity servers that are + known to have this 3pid bound + """ + requester = yield self.auth.get_user_by_req(request) + body = parse_json_object_from_request(request) + assert_params_in_dict(body, ["medium", "address"]) + + medium = body.get("medium") + address = body.get("address") + id_server = body.get("id_server") - return (200, {}) + # Attempt to unbind the threepid from an identity server. If id_server is None, try to + # unbind from all identity servers this threepid has been added to in the past + result = yield self.identity_handler.try_unbind_threepid( + requester.user.to_string(), + {"address": address, "medium": medium, "id_server": id_server}, + ) + return 200, {"id_server_unbind_result": "success" if result else "no-support"} class ThreepidDeleteRestServlet(RestServlet): @@ -603,7 +609,7 @@ class ThreepidDeleteRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class WhoamiRestServlet(RestServlet): @@ -617,7 +623,7 @@ class WhoamiRestServlet(RestServlet): def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - return (200, {"user_id": requester.user.to_string()}) + return 200, {"user_id": requester.user.to_string()} def register_servlets(hs, http_server): @@ -629,5 +635,6 @@ def register_servlets(hs, http_server): EmailThreepidRequestTokenRestServlet(hs).register(http_server) MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) + ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 98f2f6f4b5..f0db204ffa 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, user_id, account_data_type): @@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet): if event is None: raise NotFoundError("Account data not found") - return (200, event) + return 200, event class RoomAccountDataServlet(RestServlet): @@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, user_id, room_id, account_data_type): @@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet): if event is None: raise NotFoundError("Room account data not found") - return (200, event) + return 200, event def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index a4fa45fe11..acd58af193 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet): "m.change_password": {"enabled": change_password}, } } - return (200, response) + return 200, response def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 9adf76cc0c..26d0235208 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet): devices = yield self.device_handler.get_devices_by_user( requester.user.to_string() ) - return (200, {"devices": devices}) + return 200, {"devices": devices} class DeleteDevicesRestServlet(RestServlet): @@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet): yield self.device_handler.delete_devices( requester.user.to_string(), body["devices"] ) - return (200, {}) + return 200, {} class DeviceRestServlet(RestServlet): @@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet): device = yield self.device_handler.get_device( requester.user.to_string(), device_id ) - return (200, device) + return 200, device @interactive_auth_handler @defer.inlineCallbacks @@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet): ) yield self.device_handler.delete_device(requester.user.to_string(), device_id) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_PUT(self, request, device_id): @@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet): yield self.device_handler.update_device( requester.user.to_string(), device_id, body ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 22be0ee3c5..c6ddf24c8d 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet): user_localpart=target_user.localpart, filter_id=filter_id ) - return (200, filter.get_filter_json()) + return 200, filter.get_filter_json() except (KeyError, StoreError): raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) @@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet): user_localpart=target_user.localpart, user_filter=content ) - return (200, {"filter_id": str(filter_id)}) + return 200, {"filter_id": str(filter_id)} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index e629c4256d..999a0fa80c 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -47,7 +47,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id ) - return (200, group_description) + return 200, group_description @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -59,7 +59,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id, content ) - return (200, {}) + return 200, {} class GroupSummaryServlet(RestServlet): @@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet): group_id, requester_user_id ) - return (200, get_group_summary) + return 200, get_group_summary class GroupSummaryRoomsCatServlet(RestServlet): @@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): content=content, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): @@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): group_id, requester_user_id, room_id=room_id, category_id=category_id ) - return (200, resp) + return 200, resp class GroupCategoryServlet(RestServlet): @@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - return (200, category) + return 200, category @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): @@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id, content=content ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): @@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - return (200, resp) + return 200, resp class GroupCategoriesServlet(RestServlet): @@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet): group_id, requester_user_id ) - return (200, category) + return 200, category class GroupRoleServlet(RestServlet): @@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - return (200, category) + return 200, category @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): @@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id, content=content ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): @@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - return (200, resp) + return 200, resp class GroupRolesServlet(RestServlet): @@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet): group_id, requester_user_id ) - return (200, category) + return 200, category class GroupSummaryUsersRoleServlet(RestServlet): @@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): content=content, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id, user_id): @@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): group_id, requester_user_id, user_id=user_id, role_id=role_id ) - return (200, resp) + return 200, resp class GroupRoomServlet(RestServlet): @@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupUsersServlet(RestServlet): @@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupInvitedUsersServlet(RestServlet): @@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupSettingJoinPolicyServlet(RestServlet): @@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupCreateServlet(RestServlet): @@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupAdminRoomsServlet(RestServlet): @@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id, content ) - return (200, result) + return 200, result @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): @@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id ) - return (200, result) + return 200, result class GroupAdminRoomsConfigServlet(RestServlet): @@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): group_id, requester_user_id, room_id, config_key, content ) - return (200, result) + return 200, result class GroupAdminUsersInviteServlet(RestServlet): @@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet): group_id, user_id, requester_user_id, config ) - return (200, result) + return 200, result class GroupAdminUsersKickServlet(RestServlet): @@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet): group_id, user_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfLeaveServlet(RestServlet): @@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet): group_id, requester_user_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfJoinServlet(RestServlet): @@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfAcceptInviteServlet(RestServlet): @@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfUpdatePublicityServlet(RestServlet): @@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): publicise = content["publicise"] yield self.store.update_group_publicity(group_id, requester_user_id, publicise) - return (200, {}) + return 200, {} class PublicisedGroupsForUserServlet(RestServlet): @@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_publicised_groups_for_user(user_id) - return (200, result) + return 200, result class PublicisedGroupsForUsersServlet(RestServlet): @@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): result = yield self.groups_handler.bulk_get_publicised_groups(user_ids) - return (200, result) + return 200, result class GroupsForUserServlet(RestServlet): @@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_joined_groups(requester_user_id) - return (200, result) + return 200, result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 6008adec7c..2e680134a0 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -24,6 +24,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import StreamToken from ._base import client_patterns @@ -68,6 +69,7 @@ class KeyUploadServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + @trace(opname="upload_keys") @defer.inlineCallbacks def on_POST(self, request, device_id): requester = yield self.auth.get_user_by_req(request, allow_guest=True) @@ -78,6 +80,14 @@ class KeyUploadServlet(RestServlet): # passing the device_id here is deprecated; however, we allow it # for now for compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: + set_tag("error", True) + log_kv( + { + "message": "Client uploading keys for a different device", + "logged_in_id": requester.device_id, + "key_being_uploaded": device_id, + } + ) logger.warning( "Client uploading keys for a different device " "(logged in as %s, uploading for %s)", @@ -95,7 +105,7 @@ class KeyUploadServlet(RestServlet): result = yield self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) - return (200, result) + return 200, result class KeyQueryServlet(RestServlet): @@ -149,7 +159,7 @@ class KeyQueryServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.query_devices(body, timeout) - return (200, result) + return 200, result class KeyChangesServlet(RestServlet): @@ -178,10 +188,11 @@ class KeyChangesServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") + set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. - parse_string(request, "to") + set_tag("to", parse_string(request, "to")) from_token = StreamToken.from_string(from_token_string) @@ -189,7 +200,7 @@ class KeyChangesServlet(RestServlet): results = yield self.device_handler.get_user_ids_changed(user_id, from_token) - return (200, results) + return 200, results class OneTimeKeyServlet(RestServlet): @@ -224,7 +235,7 @@ class OneTimeKeyServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout) - return (200, result) + return 200, result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index d034863a3c..10c1ad5b07 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,7 +88,7 @@ class NotificationsServlet(RestServlet): returned_push_actions.append(returned_pa) next_token = str(pa["stream_ordering"]) - return (200, {"notifications": returned_push_actions, "next_token": next_token}) + return 200, {"notifications": returned_push_actions, "next_token": next_token} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index d93d6a9f24..b3bf8567e1 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet): event_id=read_marker_event_id, ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 98a97b7059..0dab03d227 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet): room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 05ea1459e3..5c7a5f3579 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -16,7 +16,6 @@ import hmac import logging -from hashlib import sha1 from six import string_types @@ -29,16 +28,20 @@ from synapse.api.errors import ( Codes, LimitExceededError, SynapseError, + ThreepidValidationError, UnrecognizedRequestError, ) +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.server import is_threepid_reserved +from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, parse_string, ) +from synapse.push.mailer import load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.threepids import check_3pid_allowed @@ -71,31 +74,93 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): super(EmailRegisterRequestTokenRestServlet, self).__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler + self.config = hs.config + + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + from synapse.push.mailer import Mailer, load_jinja2_templates + + template_html, template_text = load_jinja2_templates( + self.config.email_template_dir, + [ + self.config.email_registration_template_html, + self.config.email_registration_template_text, + ], + apply_format_ts_filter=True, + apply_mxc_to_http_filter=True, + public_baseurl=self.config.public_baseurl, + ) + self.mailer = Mailer( + hs=self.hs, + app_name=self.config.email_app_name, + template_html=template_html, + template_text=template_text, + ) @defer.inlineCallbacks def on_POST(self, request): + if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.hs.config.local_threepid_handling_disabled_due_to_email_config: + logger.warn( + "Email registration has been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based registration has been disabled on this server" + ) body = parse_json_object_from_request(request) - assert_params_in_dict( - body, ["id_server", "client_secret", "email", "send_attempt"] - ) + assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) + + # Extract params from body + client_secret = body["client_secret"] + email = body["email"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", body["email"]): + if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, "Your email domain is not authorized to register on this server", Codes.THREEPID_DENIED, ) - existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid( "email", body["email"] ) - if existingUid is not None: + if existing_user_id is not None: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - ret = yield self.identity_handler.requestEmailToken(**body) - return (200, ret) + if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + if not self.hs.config.account_threepid_delegate_email: + logger.warn( + "No upstream email account_threepid_delegate configured on the server to " + "handle this request" + ) + raise SynapseError( + 400, "Registration by email is not supported on this homeserver" + ) + + ret = yield self.identity_handler.requestEmailToken( + self.hs.config.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send registration emails from Synapse + sid = yield self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} + + return 200, ret class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -115,11 +180,15 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict( - body, - ["id_server", "client_secret", "country", "phone_number", "send_attempt"], + body, ["client_secret", "country", "phone_number", "send_attempt"] ) + client_secret = body["client_secret"] + country = body["country"] + phone_number = body["phone_number"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param - msisdn = phone_number_to_msisdn(body["country"], body["phone_number"]) + msisdn = phone_number_to_msisdn(country, phone_number) if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( @@ -128,17 +197,112 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid( "msisdn", msisdn ) - if existingUid is not None: + if existing_user_id is not None: raise SynapseError( 400, "Phone number is already in use", Codes.THREEPID_IN_USE ) - ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + if not self.hs.config.account_threepid_delegate_msisdn: + logger.warn( + "No upstream msisdn account_threepid_delegate configured on the server to " + "handle this request" + ) + raise SynapseError( + 400, "Registration by phone number is not supported on this homeserver" + ) + + ret = yield self.identity_handler.requestMsisdnToken( + self.hs.config.account_threepid_delegate_msisdn, + country, + phone_number, + client_secret, + send_attempt, + next_link, + ) + + return 200, ret + + +class RegistrationSubmitTokenServlet(RestServlet): + """Handles registration 3PID validation token submission""" + + PATTERNS = client_patterns( + "/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True + ) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(RegistrationSubmitTokenServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.config = hs.config + self.clock = hs.get_clock() + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, medium): + if medium != "email": + raise SynapseError( + 400, "This medium is currently not supported for registration" + ) + if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.local_threepid_handling_disabled_due_to_email_config: + logger.warn( + "User registration via email has been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based registration is disabled on this server" + ) + + sid = parse_string(request, "sid", required=True) + client_secret = parse_string(request, "client_secret", required=True) + token = parse_string(request, "token", required=True) + + # Attempt to validate a 3PID session + try: + # Mark the session as valid + next_link = yield self.store.validate_threepid_session( + sid, client_secret, token, self.clock.time_msec() + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warn( + "Not redirecting to next_link as it is a local file: address" + ) + else: + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + return None + + # Otherwise show the success template + html = self.config.email_registration_template_success_html_content + + request.setResponseCode(200) + except ThreepidValidationError as e: + # Show a failure page with a reason + request.setResponseCode(e.code) + + # Show a failure page with a reason + html_template, = load_jinja2_templates( + self.config.email_template_dir, + [self.config.email_registration_template_failure_html], + ) + + template_vars = {"failure_reason": e.msg} + html = html_template.render(**template_vars) + + request.write(html.encode("utf-8")) + finish_request(request) class UsernameAvailabilityRestServlet(RestServlet): @@ -178,7 +342,7 @@ class UsernameAvailabilityRestServlet(RestServlet): yield self.registration_handler.check_username(username) - return (200, {"available": True}) + return 200, {"available": True} class RegisterRestServlet(RestServlet): @@ -231,7 +395,6 @@ class RegisterRestServlet(RestServlet): if kind == b"guest": ret = yield self._do_guest_registration(body, address=client_addr) return ret - return elif kind != b"user": raise UnrecognizedRequestError( "Do not understand membership kind: %s" % (kind,) @@ -239,14 +402,12 @@ class RegisterRestServlet(RestServlet): # we do basic sanity checks here because the auth layer will store these # in sessions. Pull out the username/password provided to us. - desired_password = None if "password" in body: if ( not isinstance(body["password"], string_types) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") - desired_password = body["password"] desired_username = None if "username" in body: @@ -261,8 +422,8 @@ class RegisterRestServlet(RestServlet): if self.auth.has_access_token(request): appservice = yield self.auth.get_appservice_by_req(request) - # fork off as soon as possible for ASes and shared secret auth which - # have completely different registration flows to normal users + # fork off as soon as possible for ASes which have completely + # different registration flows to normal users # == Application Service Registration == if appservice: @@ -282,11 +443,10 @@ class RegisterRestServlet(RestServlet): result = yield self._do_appservice_registration( desired_username, access_token, body ) - return (200, result) # we throw for non 200 responses - return + return 200, result # we throw for non 200 responses - # for either shared secret or regular registration, downcase the - # provided username before attempting to register it. This should mean + # for regular registration, downcase the provided username before + # attempting to register it. This should mean # that people who try to register with upper-case in their usernames # don't get a nasty surprise. (Note that we treat username # case-insenstively in login, so they are free to carry on imagining @@ -294,16 +454,6 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() - # == Shared Secret Registration == (e.g. create new user scripts) - if "mac" in body: - # FIXME: Should we really be determining if this is shared secret - # auth based purely on the 'mac' key? - result = yield self._do_shared_secret_registration( - desired_username, desired_password, body - ) - return (200, result) # we throw for non 200 responses - return - # == Normal User Registration == (everyone else) if not self.hs.config.enable_registration: raise SynapseError(403, "Registration has been disabled") @@ -453,11 +603,11 @@ class RegisterRestServlet(RestServlet): medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] - existingUid = yield self.store.get_user_id_by_threepid( + existing_user_id = yield self.store.get_user_id_by_threepid( medium, address ) - if existingUid is not None: + if existing_user_id is not None: raise SynapseError( 400, "%s is already in use" % medium, @@ -496,11 +646,9 @@ class RegisterRestServlet(RestServlet): user_id=registered_user_id, auth_result=auth_result, access_token=return_dict.get("access_token"), - bind_email=params.get("bind_email"), - bind_msisdn=params.get("bind_msisdn"), ) - return (200, return_dict) + return 200, return_dict def on_OPTIONS(self, _): return 200, {} @@ -513,42 +661,6 @@ class RegisterRestServlet(RestServlet): return (yield self._create_registration_details(user_id, body)) @defer.inlineCallbacks - def _do_shared_secret_registration(self, username, password, body): - if not self.hs.config.registration_shared_secret: - raise SynapseError(400, "Shared secret registration is not enabled") - if not username: - raise SynapseError( - 400, "username must be specified", errcode=Codes.BAD_JSON - ) - - # use the username from the original request rather than the - # downcased one in `username` for the mac calculation - user = body["username"].encode("utf-8") - - # str() because otherwise hmac complains that 'unicode' does not - # have the buffer interface - got_mac = str(body["mac"]) - - # FIXME this is different to the /v1/register endpoint, which - # includes the password and admin flag in the hashed text. Why are - # these different? - want_mac = hmac.new( - key=self.hs.config.registration_shared_secret.encode(), - msg=user, - digestmod=sha1, - ).hexdigest() - - if not compare_digest(want_mac, got_mac): - raise SynapseError(403, "HMAC incorrect") - - user_id = yield self.registration_handler.register_user( - localpart=username, password=password - ) - - result = yield self._create_registration_details(user_id, body) - return result - - @defer.inlineCallbacks def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user @@ -603,4 +715,5 @@ def register_servlets(hs, http_server): EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) UsernameAvailabilityRestServlet(hs).register(http_server) + RegistrationSubmitTokenServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 1538b247e5..040b37c504 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -118,7 +118,7 @@ class RelationSendServlet(RestServlet): requester, event_dict=event_dict, txn_id=txn_id ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} class RelationPaginationServlet(RestServlet): @@ -198,7 +198,7 @@ class RelationPaginationServlet(RestServlet): return_value["chunk"] = events return_value["original_event"] = original_event - return (200, return_value) + return 200, return_value class RelationAggregationPaginationServlet(RestServlet): @@ -270,7 +270,7 @@ class RelationAggregationPaginationServlet(RestServlet): to_token=to_token, ) - return (200, pagination_chunk.to_dict()) + return 200, pagination_chunk.to_dict() class RelationAggregationGroupPaginationServlet(RestServlet): @@ -356,7 +356,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): return_value = result.to_dict() return_value["chunk"] = events - return (200, return_value) + return 200, return_value def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 3fdd4584a3..e7449864cd 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet): received_ts=self.clock.time_msec(), ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 10dec96208..df4f44cd36 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet): body = {"rooms": {room_id: body}} yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet): else: room_keys = room_keys["rooms"][room_id] - return (200, room_keys) + return 200, room_keys @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): @@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet): yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - return (200, {}) + return 200, {} class RoomKeysNewVersionServlet(RestServlet): @@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet): info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version(user_id, info) - return (200, {"version": new_version}) + return 200, {"version": new_version} # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning @@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet): except SynapseError as e: if e.code == 404: raise SynapseError(404, "No backup found", Codes.NOT_FOUND) - return (200, info) + return 200, info @defer.inlineCallbacks def on_DELETE(self, request, version): @@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet): user_id = requester.user.to_string() yield self.e2e_room_keys_handler.delete_version(user_id, version) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_PUT(self, request, version): @@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet): ) yield self.e2e_room_keys_handler.update_version(user_id, version, info) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 14ba61a63e..d2c3316eb7 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet): ret = {"replacement_room": new_room_id} - return (200, ret) + return 200, ret def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 2613648d82..d90e52ed1a 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.http import servlet from synapse.http.servlet import parse_json_object_from_request +from synapse.logging.opentracing import set_tag, trace from synapse.rest.client.transactions import HttpTransactionCache from ._base import client_patterns @@ -42,7 +43,10 @@ class SendToDeviceRestServlet(servlet.RestServlet): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() + @trace(opname="sendToDevice") def on_PUT(self, request, message_type, txn_id): + set_tag("message_type", message_type) + set_tag("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self._put, request, message_type, txn_id ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 7b32dd2212..c98c5a3802 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet): time_now, sync_result, requester.access_token_id, filter ) - return (200, response_content) + return 200, response_content @defer.inlineCallbacks def encode_response(self, time_now, sync_result, access_token_id, filter): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index d173544355..3b555669a0 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -45,7 +45,7 @@ class TagListServlet(RestServlet): tags = yield self.store.get_tags_for_room(user_id, room_id) - return (200, {"tags": tags}) + return 200, {"tags": tags} class TagServlet(RestServlet): @@ -76,7 +76,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, user_id, room_id, tag): @@ -88,7 +88,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 158e686b01..2e8d672471 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) protocols = yield self.appservice_handler.get_3pe_protocols() - return (200, protocols) + return 200, protocols class ThirdPartyProtocolServlet(RestServlet): @@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet): only_protocol=protocol ) if protocol in protocols: - return (200, protocols[protocol]) + return 200, protocols[protocol] else: - return (404, {"error": "Unknown protocol"}) + return 404, {"error": "Unknown protocol"} class ThirdPartyUserServlet(RestServlet): @@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet): ThirdPartyEntityKind.USER, protocol, fields ) - return (200, results) + return 200, results class ThirdPartyLocationServlet(RestServlet): @@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet): ThirdPartyEntityKind.LOCATION, protocol, fields ) - return (200, results) + return 200, results def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 7ab2b80e46..2863affbab 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id = requester.user.to_string() if not self.hs.config.user_directory_search_enabled: - return (200, {"limited": False, "results": []}) + return 200, {"limited": False, "results": []} body = parse_json_object_from_request(request) @@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id, search_term, limit ) - return (200, results) + return 200, results def register_servlets(hs, http_server): diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 0e09191632..0058b6b459 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -24,6 +24,10 @@ logger = logging.getLogger(__name__) class VersionsRestServlet(RestServlet): PATTERNS = [re.compile("^/_matrix/client/versions$")] + def __init__(self, hs): + super(VersionsRestServlet, self).__init__() + self.config = hs.config + def on_GET(self, request): return ( 200, @@ -49,5 +53,5 @@ class VersionsRestServlet(RestServlet): ) -def register_servlets(http_server): - VersionsRestServlet().register(http_server) +def register_servlets(hs, http_server): + VersionsRestServlet(hs).register(http_server) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 031a316693..55580bc59e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -13,7 +13,9 @@ # limitations under the License. import logging -from io import BytesIO + +from canonicaljson import encode_canonical_json, json +from signedjson.sign import sign_json from twisted.internet import defer @@ -95,6 +97,7 @@ class RemoteKey(DirectServeResource): self.store = hs.get_datastore() self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist + self.config = hs.config @wrap_json_request_handler async def _async_render_GET(self, request): @@ -214,15 +217,14 @@ class RemoteKey(DirectServeResource): yield self.fetcher.get_keys(cache_misses) yield self.query_keys(request, query, query_remote_on_cache_miss=False) else: - result_io = BytesIO() - result_io.write(b'{"server_keys":') - sep = b"[" - for json_bytes in json_results: - result_io.write(sep) - result_io.write(json_bytes) - sep = b"," - if sep == b"[": - result_io.write(sep) - result_io.write(b"]}") - - respond_with_json_bytes(request, 200, result_io.getvalue()) + signed_keys = [] + for key_json in json_results: + key_json = json.loads(key_json) + for signing_key in self.config.key_server_signing_keys: + key_json = sign_json(key_json, self.config.server_name, signing_key) + + signed_keys.append(key_json) + + results = {"server_keys": signed_keys} + + respond_with_json_bytes(request, 200, encode_canonical_json(results)) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index cf5759e9a6..b972e152a9 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -318,14 +318,14 @@ class MediaRepository(object): responder = yield self.media_storage.fetch_media(file_info) if responder: - return (responder, media_info) + return responder, media_info # Failed to find the file anywhere, lets download it. media_info = yield self._download_remote_file(server_name, media_id, file_id) responder = yield self.media_storage.fetch_media(file_info) - return (responder, media_info) + return responder, media_info @defer.inlineCallbacks def _download_remote_file(self, server_name, media_id, file_id): @@ -526,7 +526,7 @@ class MediaRepository(object): try: file_info = FileInfo( server_name=server_name, - file_id=media_id, + file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index bd40891a7f..7a56cd4b6c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -183,7 +183,6 @@ class PreviewUrlResource(DirectServeResource): if isinstance(og, six.text_type): og = og.encode("utf8") return og - return media_info = yield self._download_url(url, user) diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 90d8e6bffe..c995d7e043 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -78,9 +78,9 @@ class Thumbnailer(object): """ if max_width * self.height < max_height * self.width: - return (max_width, (max_width * self.height) // self.width) + return max_width, (max_width * self.height) // self.width else: - return ((max_height * self.width) // self.height, max_height) + return (max_height * self.width) // self.height, max_height def scale(self, width, height, output_type): """Rescales the image to the given dimensions. diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index 5e8fda4b65..20177b44e7 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -34,7 +34,7 @@ class WellKnownBuilder(object): self._config = hs.config def get_well_known(self): - # if we don't have a public_base_url, we can't help much here. + # if we don't have a public_baseurl, we can't help much here. if self._config.public_baseurl is None: return None diff --git a/synapse/server.py b/synapse/server.py index 9e28dba2b1..1fcc7375d3 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -221,6 +221,7 @@ class HomeServer(object): self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() + self.admin_redaction_ratelimiter = Ratelimiter() self.registration_ratelimiter = Ratelimiter() self.datastore = None @@ -279,6 +280,9 @@ class HomeServer(object): def get_registration_ratelimiter(self): return self.registration_ratelimiter + def get_admin_redaction_ratelimiter(self): + return self.admin_redaction_ratelimiter + def build_federation_client(self): return FederationClient(self) diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 729c097e6d..81c4aff496 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object): if event_id in referenced_events: referenced_events.remove(event.event_id) - return (currently_blocked, referenced_events) + return currently_blocked, referenced_events diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index a0d34f16ea..2b0f4c79ee 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -136,7 +136,6 @@ class StateHandler(object): if event_id: event = yield self.store.get_event(event_id, allow_none=True) return event - return state_map = yield self.store.get_events( list(state.values()), get_prev_content=False diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index e02663f50e..276c271bbe 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -62,7 +62,7 @@ var show_login = function() { $("#sso_flow").show(); } - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas) { + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { $("#no_login_types").show(); } }; diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 489ce82fae..abe16334ec 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1395,14 +1395,22 @@ class SQLBaseStore(object): """ txn.call_after(self._invalidate_state_caches, room_id, members_changed) - # We need to be careful that the size of the `members_changed` list - # isn't so large that it causes problems sending over replication, so we - # send them in chunks. - # Max line length is 16K, and max user ID length is 255, so 50 should - # be safe. - for chunk in batch_iter(members_changed, 50): - keys = itertools.chain([room_id], chunk) - self._send_invalidation_to_replication(txn, _CURRENT_STATE_CACHE_NAME, keys) + if members_changed: + # We need to be careful that the size of the `members_changed` list + # isn't so large that it causes problems sending over replication, so we + # send them in chunks. + # Max line length is 16K, and max user ID length is 255, so 50 should + # be safe. + for chunk in batch_iter(members_changed, 50): + keys = itertools.chain([room_id], chunk) + self._send_invalidation_to_replication( + txn, _CURRENT_STATE_CACHE_NAME, keys + ) + else: + # if no members changed, we still need to invalidate the other caches. + self._send_invalidation_to_replication( + txn, _CURRENT_STATE_CACHE_NAME, [room_id] + ) def _invalidate_state_caches(self, room_id, members_changed): """Invalidates caches that are based on the current state, but does diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 9fa5b4f3d6..6afbfc0d74 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -90,7 +90,7 @@ class AccountDataWorkerStore(SQLBaseStore): room_data = by_room.setdefault(row["room_id"], {}) room_data[row["account_data_type"]] = json.loads(row["content"]) - return (global_account_data, by_room) + return global_account_data, by_room return self.runInteraction( "get_account_data_for_user", get_account_data_for_user_txn @@ -205,7 +205,7 @@ class AccountDataWorkerStore(SQLBaseStore): ) txn.execute(sql, (last_room_id, current_id, limit)) room_results = txn.fetchall() - return (global_results, room_results) + return global_results, room_results return self.runInteraction( "get_all_updated_account_data_txn", get_updated_account_data_txn @@ -244,13 +244,13 @@ class AccountDataWorkerStore(SQLBaseStore): room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = json.loads(row[2]) - return (global_account_data, account_data_by_room) + return global_account_data, account_data_by_room changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: - return ({}, {}) + return {}, {} return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 05d9c05c3f..435b2acd4d 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -165,7 +165,6 @@ class ApplicationServiceTransactionWorkerStore( ) if result: return result.get("state") - return return None def set_appservice_state(self, service, state): @@ -358,7 +357,7 @@ class ApplicationServiceTransactionWorkerStore( events = yield self.get_events_as_list(event_ids) - return (upper_bound, events) + return upper_bound, events class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 79bb0ea46d..6b7458304e 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -19,6 +19,7 @@ from canonicaljson import json from twisted.internet import defer +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.util.caches.expiringcache import ExpiringCache @@ -66,12 +67,13 @@ class DeviceInboxWorkerStore(SQLBaseStore): messages.append(json.loads(row[1])) if len(messages) < limit: stream_pos = current_stream_id - return (messages, stream_pos) + return messages, stream_pos return self.runInteraction( "get_new_messages_for_device", get_new_messages_for_device_txn ) + @trace @defer.inlineCallbacks def delete_messages_for_device(self, user_id, device_id, up_to_stream_id): """ @@ -87,11 +89,15 @@ class DeviceInboxWorkerStore(SQLBaseStore): last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), None ) + + set_tag("last_deleted_stream_id", last_deleted_stream_id) + if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( user_id, last_deleted_stream_id ) if not has_changed: + log_kv({"message": "No changes in cache since last check"}) return 0 def delete_messages_for_device_txn(txn): @@ -107,6 +113,10 @@ class DeviceInboxWorkerStore(SQLBaseStore): "delete_messages_for_device", delete_messages_for_device_txn ) + log_kv( + {"message": "deleted {} messages for device".format(count), "count": count} + ) + # Update the cache, ensuring that we only ever increase the value last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), 0 @@ -117,6 +127,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): return count + @trace def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit ): @@ -132,16 +143,23 @@ class DeviceInboxWorkerStore(SQLBaseStore): in the stream the messages got to. """ + set_tag("destination", destination) + set_tag("last_stream_id", last_stream_id) + set_tag("current_stream_id", current_stream_id) + set_tag("limit", limit) + has_changed = self._device_federation_outbox_stream_cache.has_entity_changed( destination, last_stream_id ) if not has_changed or last_stream_id == current_stream_id: + log_kv({"message": "No new messages in stream"}) return defer.succeed(([], current_stream_id)) if limit <= 0: # This can happen if we run out of room for EDUs in the transaction. return defer.succeed(([], last_stream_id)) + @trace def get_new_messages_for_remote_destination_txn(txn): sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" @@ -156,14 +174,16 @@ class DeviceInboxWorkerStore(SQLBaseStore): stream_pos = row[0] messages.append(json.loads(row[1])) if len(messages) < limit: + log_kv({"message": "Set stream position to current position"}) stream_pos = current_stream_id - return (messages, stream_pos) + return messages, stream_pos return self.runInteraction( "get_new_device_msgs_for_remote", get_new_messages_for_remote_destination_txn, ) + @trace def delete_device_msgs_for_remote(self, destination, up_to_stream_id): """Used to delete messages when the remote destination acknowledges their receipt. @@ -214,6 +234,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): expiry_ms=30 * 60 * 1000, ) + @trace @defer.inlineCallbacks def add_messages_to_device_inbox( self, local_messages_by_user_then_device, remote_messages_by_destination diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 8f72d92895..79a58df591 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -21,6 +21,12 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.opentracing import ( + get_active_span_text_map, + set_tag, + trace, + whitelisted_homeserver, +) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import Cache, SQLBaseStore, db_to_json from synapse.storage.background_updates import BackgroundUpdateStore @@ -73,6 +79,7 @@ class DeviceWorkerStore(SQLBaseStore): return {d["device_id"]: d for d in devices} + @trace @defer.inlineCallbacks def get_devices_by_remote(self, destination, from_stream_id, limit): """Get stream of updates to send to remote servers @@ -88,7 +95,7 @@ class DeviceWorkerStore(SQLBaseStore): destination, int(from_stream_id) ) if not has_changed: - return (now_stream_id, []) + return now_stream_id, [] # We retrieve n+1 devices from the list of outbound pokes where n is # our outbound device update limit. We then check if the very last @@ -111,7 +118,7 @@ class DeviceWorkerStore(SQLBaseStore): # Return an empty list if there are no updates if not updates: - return (now_stream_id, []) + return now_stream_id, [] # if we have exceeded the limit, we need to exclude any results with the # same stream_id as the last row. @@ -127,8 +134,15 @@ class DeviceWorkerStore(SQLBaseStore): # (user_id, device_id) entries into a map, with the value being # the max stream_id across each set of duplicate entries # - # maps (user_id, device_id) -> stream_id + # maps (user_id, device_id) -> (stream_id, opentracing_context) # as long as their stream_id does not match that of the last row + # + # opentracing_context contains the opentracing metadata for the request + # that created the poke + # + # The most recent request's opentracing_context is used as the + # context which created the Edu. + query_map = {} for update in updates: if stream_id_cutoff is not None and update[2] >= stream_id_cutoff: @@ -136,7 +150,14 @@ class DeviceWorkerStore(SQLBaseStore): break key = (update[0], update[1]) - query_map[key] = max(query_map.get(key, 0), update[2]) + + update_context = update[3] + update_stream_id = update[2] + + previous_update_stream_id, _ = query_map.get(key, (0, None)) + + if update_stream_id > previous_update_stream_id: + query_map[key] = (update_stream_id, update_context) # If we didn't find any updates with a stream_id lower than the cutoff, it # means that there are more than limit updates all of which have the same @@ -147,13 +168,13 @@ class DeviceWorkerStore(SQLBaseStore): # skip that stream_id and return an empty list, and continue with the next # stream_id next time. if not query_map: - return (stream_id_cutoff, []) + return stream_id_cutoff, [] results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - return (now_stream_id, results) + return now_stream_id, results def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id, limit @@ -171,7 +192,7 @@ class DeviceWorkerStore(SQLBaseStore): List: List of device updates """ sql = """ - SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes + SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? ORDER BY stream_id LIMIT ? @@ -187,8 +208,9 @@ class DeviceWorkerStore(SQLBaseStore): Args: destination (str): The host the device updates are intended for from_stream_id (int): The minimum stream_id to filter updates by, exclusive - query_map (Dict[(str, str): int]): Dictionary mapping - user_id/device_id to update stream_id + query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping + user_id/device_id to update stream_id and the relevent json-encoded + opentracing context Returns: List[Dict]: List of objects representing an device update EDU @@ -210,12 +232,13 @@ class DeviceWorkerStore(SQLBaseStore): destination, user_id, from_stream_id ) for device_id, device in iteritems(user_devices): - stream_id = query_map[(user_id, device_id)] + stream_id, opentracing_context = query_map[(user_id, device_id)] result = { "user_id": user_id, "device_id": device_id, "prev_id": [prev_id] if prev_id else [], "stream_id": stream_id, + "org.matrix.opentracing_context": opentracing_context, } prev_id = stream_id @@ -299,6 +322,7 @@ class DeviceWorkerStore(SQLBaseStore): def get_device_stream_token(self): return self._device_list_id_gen.get_current_token() + @trace @defer.inlineCallbacks def get_user_devices_from_cache(self, query_list): """Get the devices (and keys if any) for remote users from the cache. @@ -330,7 +354,10 @@ class DeviceWorkerStore(SQLBaseStore): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - return (user_ids_not_in_cache, results) + set_tag("in_cache", results) + set_tag("not_in_cache", user_ids_not_in_cache) + + return user_ids_not_in_cache, results @cachedInlineCallbacks(num_args=2, tree=True) def _get_cached_user_device(self, user_id, device_id): @@ -814,6 +841,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): ], ) + context = get_active_span_text_map() + self._simple_insert_many_txn( txn, table="device_lists_outbound_pokes", @@ -825,6 +854,9 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): "device_id": device_id, "sent": False, "ts": now, + "opentracing_context": json.dumps(context) + if whitelisted_homeserver(destination) + else "{}", } for destination in hosts for device_id in device_ids diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index e966a73f3d..eed7757ed5 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -47,7 +47,6 @@ class DirectoryWorkerStore(SQLBaseStore): if not room_id: return None - return servers = yield self._simple_select_onecol( "room_alias_servers", @@ -58,7 +57,6 @@ class DirectoryWorkerStore(SQLBaseStore): if not servers: return None - return return RoomAliasMapping(room_id, room_alias.to_string(), servers) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index 99128f2df7..be2fe2bab6 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -18,6 +18,7 @@ import json from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.opentracing import log_kv, trace from ._base import SQLBaseStore @@ -82,11 +83,11 @@ class EndToEndRoomKeyStore(SQLBaseStore): table="e2e_room_keys", keyvalues={ "user_id": user_id, + "version": version, "room_id": room_id, "session_id": session_id, }, values={ - "version": version, "first_message_index": room_key["first_message_index"], "forwarded_count": room_key["forwarded_count"], "is_verified": room_key["is_verified"], @@ -94,7 +95,16 @@ class EndToEndRoomKeyStore(SQLBaseStore): }, lock=False, ) + log_kv( + { + "message": "Set room key", + "room_id": room_id, + "session_id": session_id, + "room_key": room_key, + } + ) + @trace @defer.inlineCallbacks def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk get the E2E room keys for a given backup, optionally filtered to a given @@ -153,6 +163,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): return sessions + @trace @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk delete the E2E room keys for a given backup, optionally filtered to a given @@ -236,6 +247,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn ) + @trace def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. @@ -276,6 +288,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) + @trace def update_e2e_room_keys_version(self, user_id, version, info): """Update a given backup version @@ -292,6 +305,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="update_e2e_room_keys_version", ) + @trace def delete_e2e_room_keys_version(self, user_id, version=None): """Delete a given backup version of the user's room keys. Doesn't delete their actual key data. diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 1e07474e70..33e3a84933 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -18,12 +18,14 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json class EndToEndKeyWorkerStore(SQLBaseStore): + @trace @defer.inlineCallbacks def get_e2e_device_keys( self, query_list, include_all_devices=False, include_deleted_devices=False @@ -40,6 +42,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): Dict mapping from user-id to dict mapping from device_id to dict containing "key_json", "device_display_name". """ + set_tag("query_list", query_list) if not query_list: return {} @@ -57,9 +60,13 @@ class EndToEndKeyWorkerStore(SQLBaseStore): return results + @trace def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False ): + set_tag("include_all_devices", include_all_devices) + set_tag("include_deleted_devices", include_deleted_devices) + query_clauses = [] query_params = [] @@ -104,6 +111,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): for user_id, device_id in deleted_devices: result.setdefault(user_id, {})[device_id] = None + log_kv(result) return result @defer.inlineCallbacks @@ -129,8 +137,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): keyvalues={"user_id": user_id, "device_id": device_id}, desc="add_e2e_one_time_keys_check", ) - - return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} + result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} + log_kv({"message": "Fetched one time keys for user", "one_time_keys": result}) + return result @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys): @@ -146,6 +155,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): """ def _add_e2e_one_time_keys(txn): + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("new_keys", new_keys) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -202,6 +214,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): """ def _set_e2e_device_keys_txn(txn): + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("time_now", time_now) + set_tag("device_keys", device_keys) + old_key_json = self._simple_select_one_onecol_txn( txn, table="e2e_device_keys_json", @@ -215,6 +232,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): new_key_json = encode_canonical_json(device_keys).decode("utf-8") if old_key_json == new_key_json: + log_kv({"Message": "Device key already stored."}) return False self._simple_upsert_txn( @@ -223,7 +241,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): keyvalues={"user_id": user_id, "device_id": device_id}, values={"ts_added_ms": time_now, "key_json": new_key_json}, ) - + log_kv({"message": "Device keys stored."}) return True return self.runInteraction("set_e2e_device_keys", _set_e2e_device_keys_txn) @@ -231,6 +249,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def claim_e2e_one_time_keys(self, query_list): """Take a list of one time keys out of the database""" + @trace def _claim_e2e_one_time_keys(txn): sql = ( "SELECT key_id, key_json FROM e2e_one_time_keys_json" @@ -252,7 +271,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): " AND key_id = ?" ) for user_id, device_id, algorithm, key_id in delete: + log_kv( + { + "message": "Executing claim e2e_one_time_keys transaction on database." + } + ) txn.execute(sql, (user_id, device_id, algorithm, key_id)) + log_kv({"message": "finished executing and invalidating cache"}) self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) @@ -262,6 +287,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def delete_e2e_keys_by_device(self, user_id, device_id): def delete_e2e_keys_by_device_txn(txn): + log_kv( + { + "message": "Deleting keys for device", + "device_id": device_id, + "user_id": user_id, + } + ) self._simple_delete_txn( txn, table="e2e_device_keys_json", diff --git a/synapse/storage/events.py b/synapse/storage/events.py index ac876287fc..ddf7ab6479 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -23,7 +23,7 @@ from functools import wraps from six import iteritems, text_type from six.moves import range -from canonicaljson import json +from canonicaljson import encode_canonical_json, json from prometheus_client import Counter, Histogram from twisted.internet import defer @@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.events.utils import prune_event_dict from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.logging.utils import log_function from synapse.metrics import BucketCollector @@ -262,6 +263,14 @@ class EventsStore( hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + def _censor_redactions(): + return run_as_background_process( + "_censor_redactions", self._censor_redactions + ) + + if self.hs.config.redaction_retention_period is not None: + hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000) + @defer.inlineCallbacks def _read_forward_extremities(self): def fetch(txn): @@ -810,7 +819,7 @@ class EventsStore( # If they old and new groups are the same then we don't need to do # anything. if old_state_groups == new_state_groups: - return (None, None) + return None, None if len(new_state_groups) == 1 and len(old_state_groups) == 1: # If we're going from one state group to another, lets check if @@ -827,7 +836,7 @@ class EventsStore( # the current state in memory then lets also return that, # but it doesn't matter if we don't. new_state = state_groups_map.get(new_state_group) - return (new_state, delta_ids) + return new_state, delta_ids # Now that we have calculated new_state_groups we need to get # their state IDs so we can resolve to a single state set. @@ -839,7 +848,7 @@ class EventsStore( if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - return (state_groups_map[new_state_groups.pop()], None) + return state_groups_map[new_state_groups.pop()], None # Ok, we need to defer to the state handler to resolve our state sets. @@ -868,7 +877,7 @@ class EventsStore( state_res_store=StateResolutionStore(self), ) - return (res.state, None) + return res.state, None @defer.inlineCallbacks def _calculate_state_delta(self, room_id, current_state): @@ -891,7 +900,7 @@ class EventsStore( if ev_id != existing_state.get(key) } - return (to_delete, to_insert) + return to_delete, to_insert @log_function def _persist_events_txn( @@ -1302,15 +1311,11 @@ class EventsStore( "event_reference_hashes", "event_search", "event_to_state_groups", - "guest_access", - "history_visibility", "local_invites", - "room_names", "state_events", "rejections", "redactions", "room_memberships", - "topics", ): txn.executemany( "DELETE FROM %s WHERE event_id = ?" % (table,), @@ -1454,10 +1459,10 @@ class EventsStore( for event, _ in events_and_contexts: if event.type == EventTypes.Name: - # Insert into the room_names and event_search tables. + # Insert into the event_search table. self._store_room_name_txn(txn, event) elif event.type == EventTypes.Topic: - # Insert into the topics table and event_search table. + # Insert into the event_search table. self._store_room_topic_txn(txn, event) elif event.type == EventTypes.Message: # Insert into the event_search table. @@ -1465,12 +1470,6 @@ class EventsStore( elif event.type == EventTypes.Redaction: # Insert into the redactions table. self._store_redaction(txn, event) - elif event.type == EventTypes.RoomHistoryVisibility: - # Insert into the event_search table. - self._store_history_visibility_txn(txn, event) - elif event.type == EventTypes.GuestAccess: - # Insert into the event_search table. - self._store_guest_access_txn(txn, event) self._handle_event_relations(txn, event) @@ -1559,6 +1558,98 @@ class EventsStore( ) @defer.inlineCallbacks + def _censor_redactions(self): + """Censors all redactions older than the configured period that haven't + been censored yet. + + By censor we mean update the event_json table with the redacted event. + + Returns: + Deferred + """ + + if self.hs.config.redaction_retention_period is None: + return + + max_pos = yield self.find_first_stream_ordering_after_ts( + self._clock.time_msec() - self.hs.config.redaction_retention_period + ) + + # We fetch all redactions that: + # 1. point to an event we have, + # 2. has a stream ordering from before the cut off, and + # 3. we haven't yet censored. + # + # This is limited to 100 events to ensure that we don't try and do too + # much at once. We'll get called again so this should eventually catch + # up. + # + # We use the range [-max_pos, max_pos] to handle backfilled events, + # which are given negative stream ordering. + sql = """ + SELECT redact_event.event_id, redacts FROM redactions + INNER JOIN events AS redact_event USING (event_id) + INNER JOIN events AS original_event ON ( + redact_event.room_id = original_event.room_id + AND redacts = original_event.event_id + ) + WHERE NOT have_censored + AND ? <= redact_event.stream_ordering AND redact_event.stream_ordering <= ? + ORDER BY redact_event.stream_ordering ASC + LIMIT ? + """ + + rows = yield self._execute( + "_censor_redactions_fetch", None, sql, -max_pos, max_pos, 100 + ) + + updates = [] + + for redaction_id, event_id in rows: + redaction_event = yield self.get_event(redaction_id, allow_none=True) + original_event = yield self.get_event( + event_id, allow_rejected=True, allow_none=True + ) + + # The SQL above ensures that we have both the redaction and + # original event, so if the `get_event` calls return None it + # means that the redaction wasn't allowed. Either way we know that + # the result won't change so we mark the fact that we've checked. + if ( + redaction_event + and original_event + and original_event.internal_metadata.is_redacted() + ): + # Redaction was allowed + pruned_json = encode_canonical_json( + prune_event_dict(original_event.get_dict()) + ) + else: + # Redaction wasn't allowed + pruned_json = None + + updates.append((redaction_id, event_id, pruned_json)) + + def _update_censor_txn(txn): + for redaction_id, event_id, pruned_json in updates: + if pruned_json: + self._simple_update_one_txn( + txn, + table="event_json", + keyvalues={"event_id": event_id}, + updatevalues={"json": pruned_json}, + ) + + self._simple_update_one_txn( + txn, + table="redactions", + keyvalues={"event_id": redaction_id}, + updatevalues={"have_censored": True}, + ) + + yield self.runInteraction("_update_censor_txn", _update_censor_txn) + + @defer.inlineCallbacks def count_daily_messages(self): """ Returns an estimate of the number of messages sent in the last day. @@ -2191,6 +2282,144 @@ class EventsStore( return to_delete, to_dedelta + def purge_room(self, room_id): + """Deletes all record of a room + + Args: + room_id (str): + """ + + return self.runInteraction("purge_room", self._purge_room_txn, room_id) + + def _purge_room_txn(self, txn, room_id): + # first we have to delete the state groups states + logger.info("[purge] removing %s from state_groups_state", room_id) + + txn.execute( + """ + DELETE FROM state_groups_state WHERE state_group IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # ... and the state group edges + logger.info("[purge] removing %s from state_group_edges", room_id) + + txn.execute( + """ + DELETE FROM state_group_edges WHERE state_group IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # ... and the state groups + logger.info("[purge] removing %s from state_groups", room_id) + + txn.execute( + """ + DELETE FROM state_groups WHERE id IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # and then tables which lack an index on room_id but have one on event_id + for table in ( + "event_auth", + "event_edges", + "event_push_actions_staging", + "event_reference_hashes", + "event_relations", + "event_to_state_groups", + "redactions", + "rejections", + "state_events", + ): + logger.info("[purge] removing %s from %s", room_id, table) + + txn.execute( + """ + DELETE FROM %s WHERE event_id IN ( + SELECT event_id FROM events WHERE room_id=? + ) + """ + % (table,), + (room_id,), + ) + + # and finally, the tables with an index on room_id (or no useful index) + for table in ( + "current_state_events", + "event_backward_extremities", + "event_forward_extremities", + "event_json", + "event_push_actions", + "event_search", + "events", + "group_rooms", + "public_room_list_stream", + "receipts_graph", + "receipts_linearized", + "room_aliases", + "room_depth", + "room_memberships", + "room_stats_state", + "room_stats_current", + "room_stats_historical", + "room_stats_earliest_token", + "rooms", + "stream_ordering_to_exterm", + "topics", + "users_in_public_rooms", + "users_who_share_private_rooms", + # no useful index, but let's clear them anyway + "appservice_room_list", + "e2e_room_keys", + "event_push_summary", + "pusher_throttle", + "group_summary_rooms", + "local_invites", + "room_account_data", + "room_tags", + ): + logger.info("[purge] removing %s from %s", room_id, table) + txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,)) + + # Other tables we do NOT need to clear out: + # + # - blocked_rooms + # This is important, to make sure that we don't accidentally rejoin a blocked + # room after it was purged + # + # - user_directory + # This has a room_id column, but it is unused + # + + # Other tables that we might want to consider clearing out include: + # + # - event_reports + # Given that these are intended for abuse management my initial + # inclination is to leave them in place. + # + # - current_state_delta_stream + # - ex_outlier_stream + # - room_tags_revisions + # The problem with these is that they are largeish and there is no room_id + # index on them. In any case we should be clearing out 'stream' tables + # periodically anyway (#5888) + + # TODO: we could probably usefully do a bunch of cache invalidation here + + logger.info("[purge] done") + @defer.inlineCallbacks def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index d20eacda59..e96eed8a6d 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -238,6 +238,13 @@ def _upgrade_existing_database( logger.debug("applied_delta_files: %s", applied_delta_files) + if isinstance(database_engine, PostgresEngine): + specific_engine_extension = ".postgres" + else: + specific_engine_extension = ".sqlite" + + specific_engine_extensions = (".sqlite", ".postgres") + for v in range(start_ver, SCHEMA_VERSION + 1): logger.info("Upgrading schema to v%d", v) @@ -274,15 +281,22 @@ def _upgrade_existing_database( # Sometimes .pyc files turn up anyway even though we've # disabled their generation; e.g. from distribution package # installers. Silently skip it - pass + continue elif ext == ".sql": # A plain old .sql file, just read and execute it logger.info("Applying schema %s", relative_path) executescript(cur, absolute_path) + elif ext == specific_engine_extension and root_name.endswith(".sql"): + # A .sql file specific to our engine; just read and execute it + logger.info("Applying engine-specific schema %s", relative_path) + executescript(cur, absolute_path) + elif ext in specific_engine_extensions and root_name.endswith(".sql"): + # A .sql file for a different engine; skip it. + continue else: # Not a valid delta file. - logger.warn( - "Found directory entry that did not end in .py or" " .sql: %s", + logger.warning( + "Found directory entry that did not end in .py or .sql: %s", relative_path, ) continue @@ -290,7 +304,7 @@ def _upgrade_existing_database( # Mark as done. cur.execute( database_engine.convert_param_style( - "INSERT INTO applied_schema_deltas (version, file)" " VALUES (?,?)" + "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)" ), (v, relative_path), ) @@ -298,7 +312,7 @@ def _upgrade_existing_database( cur.execute("DELETE FROM schema_version") cur.execute( database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded)" " VALUES (?,?)" + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" ), (v, True), ) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 1a0f2d5768..5db6f2d84a 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -90,7 +90,7 @@ class PresenceStore(SQLBaseStore): presence_states, ) - return (stream_orderings[-1], self._presence_id_gen.get_current_token()) + return stream_orderings[-1], self._presence_id_gen.get_current_token() def _update_presence_txn(self, txn, stream_orderings, presence_states): for stream_id, state in zip(stream_orderings, presence_states): diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 8a5d8e9b18..912c1df6be 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -35,7 +35,6 @@ class ProfileWorkerStore(SQLBaseStore): if e.code == 404: # no match return ProfileInfo(None, None) - return else: raise diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index b431d24b8a..3e0e834a62 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -133,7 +133,7 @@ class PusherWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, limit)) deleted = txn.fetchall() - return (updated, deleted) + return updated, deleted return self.runInteraction( "get_all_updated_pushers", get_all_updated_pushers_txn diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 6aa6d98ebb..290ddb30e8 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -478,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore): max_persisted_id = self._receipts_id_gen.get_current_token() - return (stream_id, max_persisted_id) + return stream_id, max_persisted_id def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 1e3c2148f6..cc1b1b73c9 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -57,6 +57,7 @@ class RegistrationWorkerStore(SQLBaseStore): "consent_server_notice_sent", "appservice_id", "creation_ts", + "user_type", ], allow_none=True, desc="get_user_by_id", @@ -273,6 +274,14 @@ class RegistrationWorkerStore(SQLBaseStore): @defer.inlineCallbacks def is_server_admin(self, user): + """Determines if a user is an admin of this homeserver. + + Args: + user (UserID): user ID of the user to test + + Returns (bool): + true iff the user is a server admin, false otherwise. + """ res = yield self._simple_select_one_onecol( table="users", keyvalues={"name": user.to_string()}, @@ -283,6 +292,21 @@ class RegistrationWorkerStore(SQLBaseStore): return res if res else False + def set_server_admin(self, user, admin): + """Sets whether a user is an admin of this homeserver. + + Args: + user (UserID): user ID of the user to test + admin (bool): true iff the user is to be a server admin, + false otherwise. + """ + return self._simple_update_one( + table="users", + keyvalues={"name": user.to_string()}, + updatevalues={"admin": 1 if admin else 0}, + desc="set_server_admin", + ) + def _query_for_auth(self, txn, token): sql = ( "SELECT users.name, users.is_guest, access_tokens.id as token_id," @@ -300,6 +324,19 @@ class RegistrationWorkerStore(SQLBaseStore): return None @cachedInlineCallbacks() + def is_real_user(self, user_id): + """Determines if the user is a real user, ie does not have a 'user_type'. + + Args: + user_id (str): user id to test + + Returns: + Deferred[bool]: True if user 'user_type' is null or empty string + """ + res = yield self.runInteraction("is_real_user", self.is_real_user_txn, user_id) + return res + + @cachedInlineCallbacks() def is_support_user(self, user_id): """Determines if the user is of type UserTypes.SUPPORT @@ -314,6 +351,16 @@ class RegistrationWorkerStore(SQLBaseStore): ) return res + def is_real_user_txn(self, txn, user_id): + res = self._simple_select_one_onecol_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + retcol="user_type", + allow_none=True, + ) + return res is None + def is_support_user_txn(self, txn, user_id): res = self._simple_select_one_onecol_txn( txn=txn, @@ -419,6 +466,20 @@ class RegistrationWorkerStore(SQLBaseStore): return ret @defer.inlineCallbacks + def count_real_users(self): + """Counts all users without a special user_type registered on the homeserver.""" + + def _count_users(txn): + txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null") + rows = self.cursor_to_dict(txn) + if rows: + return rows[0]["users"] + return 0 + + ret = yield self.runInteraction("count_real_users", _count_users) + return ret + + @defer.inlineCallbacks def find_next_generated_user_id_localpart(self): """ Gets the localpart of the next generated user ID. @@ -611,6 +672,85 @@ class RegistrationWorkerStore(SQLBaseStore): # Convert the integer into a boolean. return res == 1 + def get_threepid_validation_session( + self, medium, client_secret, address=None, sid=None, validated=True + ): + """Gets a session_id and last_send_attempt (if available) for a + client_secret/medium/(address|session_id) combo + + Args: + medium (str|None): The medium of the 3PID + address (str|None): The address of the 3PID + sid (str|None): The ID of the validation session + client_secret (str|None): A unique string provided by the client to + help identify this validation attempt + validated (bool|None): Whether sessions should be filtered by + whether they have been validated already or not. None to + perform no filtering + + Returns: + deferred {str, int}|None: A dict containing the + latest session_id and send_attempt count for this 3PID. + Otherwise None if there hasn't been a previous attempt + """ + keyvalues = {"medium": medium, "client_secret": client_secret} + if address: + keyvalues["address"] = address + if sid: + keyvalues["session_id"] = sid + + assert address or sid + + def get_threepid_validation_session_txn(txn): + sql = """ + SELECT address, session_id, medium, client_secret, + last_send_attempt, validated_at + FROM threepid_validation_session WHERE %s + """ % ( + " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)), + ) + + if validated is not None: + sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") + + sql += " LIMIT 1" + + txn.execute(sql, list(keyvalues.values())) + rows = self.cursor_to_dict(txn) + if not rows: + return None + + return rows[0] + + return self.runInteraction( + "get_threepid_validation_session", get_threepid_validation_session_txn + ) + + def delete_threepid_session(self, session_id): + """Removes a threepid validation session from the database. This can + be done after validation has been performed and whatever action was + waiting on it has been carried out + + Args: + session_id (str): The ID of the session to delete + """ + + def delete_threepid_session_txn(txn): + self._simple_delete_txn( + txn, + table="threepid_validation_token", + keyvalues={"session_id": session_id}, + ) + self._simple_delete_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + ) + + return self.runInteraction( + "delete_threepid_session", delete_threepid_session_txn + ) + class RegistrationStore( RegistrationWorkerStore, background_updates.BackgroundUpdateStore @@ -866,6 +1006,17 @@ class RegistrationStore( (user_id_obj.localpart, create_profile_with_displayname), ) + if self.hs.config.stats_enabled: + # we create a new completed user statistics row + + # we don't strictly need current_token since this user really can't + # have any state deltas before now (as it is a new user), but still, + # we include it for completeness. + current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) + self._update_stats_delta_txn( + txn, now, "user", user_id, {}, complete_with_stream_id=current_token + ) + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) txn.call_after(self.is_guest.invalidate, (user_id,)) @@ -1088,60 +1239,6 @@ class RegistrationStore( return 1 - def get_threepid_validation_session( - self, medium, client_secret, address=None, sid=None, validated=True - ): - """Gets a session_id and last_send_attempt (if available) for a - client_secret/medium/(address|session_id) combo - - Args: - medium (str|None): The medium of the 3PID - address (str|None): The address of the 3PID - sid (str|None): The ID of the validation session - client_secret (str|None): A unique string provided by the client to - help identify this validation attempt - validated (bool|None): Whether sessions should be filtered by - whether they have been validated already or not. None to - perform no filtering - - Returns: - deferred {str, int}|None: A dict containing the - latest session_id and send_attempt count for this 3PID. - Otherwise None if there hasn't been a previous attempt - """ - keyvalues = {"medium": medium, "client_secret": client_secret} - if address: - keyvalues["address"] = address - if sid: - keyvalues["session_id"] = sid - - assert address or sid - - def get_threepid_validation_session_txn(txn): - sql = """ - SELECT address, session_id, medium, client_secret, - last_send_attempt, validated_at - FROM threepid_validation_session WHERE %s - """ % ( - " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)), - ) - - if validated is not None: - sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") - - sql += " LIMIT 1" - - txn.execute(sql, list(keyvalues.values())) - rows = self.cursor_to_dict(txn) - if not rows: - return None - - return rows[0] - - return self.runInteraction( - "get_threepid_validation_session", get_threepid_validation_session_txn - ) - def validate_threepid_session(self, session_id, client_secret, token, current_ts): """Attempt to validate a threepid session using a token @@ -1157,6 +1254,7 @@ class RegistrationStore( deferred str|None: A str representing a link to redirect the user to if there is one. """ + # Insert everything into a transaction in order to run atomically def validate_threepid_session_txn(txn): row = self._simple_select_one_txn( @@ -1328,31 +1426,6 @@ class RegistrationStore( self.clock.time_msec(), ) - def delete_threepid_session(self, session_id): - """Removes a threepid validation session from the database. This can - be done after validation has been performed and whatever action was - waiting on it has been carried out - - Args: - session_id (str): The ID of the session to delete - """ - - def delete_threepid_session_txn(txn): - self._simple_delete_txn( - txn, - table="threepid_validation_token", - keyvalues={"session_id": session_id}, - ) - self._simple_delete_txn( - txn, - table="threepid_validation_session", - keyvalues={"session_id": session_id}, - ) - - return self.runInteraction( - "delete_threepid_session", delete_threepid_session_txn - ) - def set_user_deactivated_status_txn(self, txn, user_id, deactivated): self._simple_update_one_txn( txn=txn, diff --git a/synapse/storage/room.py b/synapse/storage/room.py index bc606292b8..08e13f3a3b 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -386,32 +386,12 @@ class RoomStore(RoomWorkerStore, SearchStore): def _store_room_topic_txn(self, txn, event): if hasattr(event, "content") and "topic" in event.content: - self._simple_insert_txn( - txn, - "topics", - { - "event_id": event.event_id, - "room_id": event.room_id, - "topic": event.content["topic"], - }, - ) - self.store_event_search_txn( txn, event, "content.topic", event.content["topic"] ) def _store_room_name_txn(self, txn, event): if hasattr(event, "content") and "name" in event.content: - self._simple_insert_txn( - txn, - "room_names", - { - "event_id": event.event_id, - "room_id": event.room_id, - "name": event.content["name"], - }, - ) - self.store_event_search_txn( txn, event, "content.name", event.content["name"] ) @@ -422,21 +402,6 @@ class RoomStore(RoomWorkerStore, SearchStore): txn, event, "content.body", event.content["body"] ) - def _store_history_visibility_txn(self, txn, event): - self._store_content_index_txn(txn, event, "history_visibility") - - def _store_guest_access_txn(self, txn, event): - self._store_content_index_txn(txn, event, "guest_access") - - def _store_content_index_txn(self, txn, event, key): - if hasattr(event, "content") and key in event.content: - sql = ( - "INSERT INTO %(key)s" - " (event_id, room_id, %(key)s)" - " VALUES (?, ?, ?)" % {"key": key} - ) - txn.execute(sql, (event.event_id, event.room_id, event.content[key])) - def add_event_report( self, room_id, event_id, user_id, reason, content, received_ts ): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index eecb276465..4df8ebdacd 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -24,8 +24,10 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import LoggingTransaction +from synapse.storage.engines import Sqlite3Engine from synapse.storage.events_worker import EventsWorkerStore from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer @@ -74,6 +76,63 @@ class RoomMemberWorkerStore(EventsWorkerStore): self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() + if self.hs.config.metrics_flags.known_servers: + self._known_servers_count = 1 + self.hs.get_clock().looping_call( + run_as_background_process, + 60 * 1000, + "_count_known_servers", + self._count_known_servers, + ) + self.hs.get_clock().call_later( + 1000, + run_as_background_process, + "_count_known_servers", + self._count_known_servers, + ) + LaterGauge( + "synapse_federation_known_servers", + "", + [], + lambda: self._known_servers_count, + ) + + @defer.inlineCallbacks + def _count_known_servers(self): + """ + Count the servers that this server knows about. + + The statistic is stored on the class for the + `synapse_federation_known_servers` LaterGauge to collect. + """ + + def _transact(txn): + if isinstance(self.database_engine, Sqlite3Engine): + query = """ + SELECT COUNT(DISTINCT substr(out.user_id, pos+1)) + FROM ( + SELECT rm.user_id as user_id, instr(rm.user_id, ':') + AS pos FROM room_memberships as rm + INNER JOIN current_state_events as c ON rm.event_id = c.event_id + WHERE c.type = 'm.room.member' + ) as out + """ + else: + query = """ + SELECT COUNT(DISTINCT split_part(state_key, ':', 2)) + FROM current_state_events + WHERE type = 'm.room.member' AND membership = 'join'; + """ + txn.execute(query) + return list(txn)[0][0] + + count = yield self.runInteraction("get_known_servers", _transact) + + # We always know about ourselves, even if we have nothing in + # room_memberships (for example, the server is new). + self._known_servers_count = max([count, 1]) + return self._known_servers_count + def _check_safe_current_state_events_membership_updated_txn(self, txn): """Checks if it is safe to assume the new current_state_events membership column is up to date @@ -112,29 +171,31 @@ class RoomMemberWorkerStore(EventsWorkerStore): @cached(max_entries=100000, iterable=True) def get_users_in_room(self, room_id): - def f(txn): - # If we can assume current_state_events.membership is up to date - # then we can avoid a join, which is a Very Good Thing given how - # frequently this function gets called. - if self._current_state_events_membership_up_to_date: - sql = """ - SELECT state_key FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? AND membership = ? - """ - else: - sql = """ - SELECT state_key FROM room_memberships as m - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? - """ + return self.runInteraction( + "get_users_in_room", self.get_users_in_room_txn, room_id + ) - txn.execute(sql, (room_id, Membership.JOIN)) - return [to_ascii(r[0]) for r in txn] + def get_users_in_room_txn(self, txn, room_id): + # If we can assume current_state_events.membership is up to date + # then we can avoid a join, which is a Very Good Thing given how + # frequently this function gets called. + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT state_key FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? AND membership = ? + """ + else: + sql = """ + SELECT state_key FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? + """ - return self.runInteraction("get_users_in_room", f) + txn.execute(sql, (room_id, Membership.JOIN)) + return [to_ascii(r[0]) for r in txn] @cached(max_entries=100000) def get_room_summary(self, room_id): diff --git a/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql new file mode 100644 index 0000000000..41807eb1e7 --- /dev/null +++ b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql @@ -0,0 +1,20 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Opentracing context data for inclusion in the device_list_update EDUs, as a + * json-encoded dictionary. NULL if opentracing is disabled (or not enabled for this destination). + */ +ALTER TABLE device_lists_outbound_pokes ADD opentracing_context TEXT; diff --git a/synapse/storage/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/schema/delta/56/destinations_failure_ts.sql new file mode 100644 index 0000000000..f00889290b --- /dev/null +++ b/synapse/storage/schema/delta/56/destinations_failure_ts.sql @@ -0,0 +1,25 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Record the timestamp when a given server started failing + */ +ALTER TABLE destinations ADD failure_ts BIGINT; + +/* as a rough approximation, we assume that the server started failing at + * retry_interval before the last retry + */ +UPDATE destinations SET failure_ts = retry_last_ts - retry_interval + WHERE retry_last_ts > 0; diff --git a/synapse/storage/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql new file mode 100644 index 0000000000..9f09922c67 --- /dev/null +++ b/synapse/storage/schema/delta/56/drop_unused_event_tables.sql @@ -0,0 +1,20 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- these tables are never used. +DROP TABLE IF EXISTS room_names; +DROP TABLE IF EXISTS topics; +DROP TABLE IF EXISTS history_visibility; +DROP TABLE IF EXISTS guest_access; diff --git a/synapse/storage/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/delta/56/fix_room_keys_index.sql new file mode 100644 index 0000000000..014cb3b538 --- /dev/null +++ b/synapse/storage/schema/delta/56/fix_room_keys_index.sql @@ -0,0 +1,18 @@ +/* Copyright 2019 Matrix.org Foundation CIC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- version is supposed to be part of the room keys index +CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys(user_id, version, room_id, session_id); +DROP INDEX IF EXISTS e2e_room_keys_idx; diff --git a/synapse/storage/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/delta/56/redaction_censor.sql new file mode 100644 index 0000000000..fe51b02309 --- /dev/null +++ b/synapse/storage/schema/delta/56/redaction_censor.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE redactions ADD COLUMN have_censored BOOL NOT NULL DEFAULT false; +CREATE INDEX redactions_have_censored ON redactions(event_id) WHERE not have_censored; diff --git a/synapse/storage/schema/delta/56/stats_separated.sql b/synapse/storage/schema/delta/56/stats_separated.sql new file mode 100644 index 0000000000..163529c071 --- /dev/null +++ b/synapse/storage/schema/delta/56/stats_separated.sql @@ -0,0 +1,152 @@ +/* Copyright 2018 New Vector Ltd + * Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +----- First clean up from previous versions of room stats. + +-- First remove old stats stuff +DROP TABLE IF EXISTS room_stats; +DROP TABLE IF EXISTS room_state; +DROP TABLE IF EXISTS room_stats_state; +DROP TABLE IF EXISTS user_stats; +DROP TABLE IF EXISTS room_stats_earliest_tokens; +DROP TABLE IF EXISTS _temp_populate_stats_position; +DROP TABLE IF EXISTS _temp_populate_stats_rooms; +DROP TABLE IF EXISTS stats_stream_pos; + +-- Unschedule old background updates if they're still scheduled +DELETE FROM background_updates WHERE update_name IN ( + 'populate_stats_createtables', + 'populate_stats_process_rooms', + 'populate_stats_process_users', + 'populate_stats_cleanup' +); + +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_process_rooms', '{}', ''); + +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_process_users', '{}', 'populate_stats_process_rooms'); + +----- Create tables for our version of room stats. + +-- single-row table to track position of incremental updates +DROP TABLE IF EXISTS stats_incremental_position; +CREATE TABLE stats_incremental_position ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +-- insert a null row and make sure it is the only one. +INSERT INTO stats_incremental_position ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; + +-- represents PRESENT room statistics for a room +-- only holds absolute fields +DROP TABLE IF EXISTS room_stats_current; +CREATE TABLE room_stats_current ( + room_id TEXT NOT NULL PRIMARY KEY, + + -- These are absolute counts + current_state_events INT NOT NULL, + joined_members INT NOT NULL, + invited_members INT NOT NULL, + left_members INT NOT NULL, + banned_members INT NOT NULL, + + local_users_in_room INT NOT NULL, + + -- The maximum delta stream position that this row takes into account. + completed_delta_stream_id BIGINT NOT NULL +); + + +-- represents HISTORICAL room statistics for a room +DROP TABLE IF EXISTS room_stats_historical; +CREATE TABLE room_stats_historical ( + room_id TEXT NOT NULL, + -- These stats cover the time from (end_ts - bucket_size)...end_ts (in ms). + -- Note that end_ts is quantised. + end_ts BIGINT NOT NULL, + bucket_size BIGINT NOT NULL, + + -- These stats are absolute counts + current_state_events BIGINT NOT NULL, + joined_members BIGINT NOT NULL, + invited_members BIGINT NOT NULL, + left_members BIGINT NOT NULL, + banned_members BIGINT NOT NULL, + local_users_in_room BIGINT NOT NULL, + + -- These stats are per time slice + total_events BIGINT NOT NULL, + total_event_bytes BIGINT NOT NULL, + + PRIMARY KEY (room_id, end_ts) +); + +-- We use this index to speed up deletion of ancient room stats. +CREATE INDEX room_stats_historical_end_ts ON room_stats_historical (end_ts); + +-- represents PRESENT statistics for a user +-- only holds absolute fields +DROP TABLE IF EXISTS user_stats_current; +CREATE TABLE user_stats_current ( + user_id TEXT NOT NULL PRIMARY KEY, + + joined_rooms BIGINT NOT NULL, + + -- The maximum delta stream position that this row takes into account. + completed_delta_stream_id BIGINT NOT NULL +); + +-- represents HISTORICAL statistics for a user +DROP TABLE IF EXISTS user_stats_historical; +CREATE TABLE user_stats_historical ( + user_id TEXT NOT NULL, + end_ts BIGINT NOT NULL, + bucket_size BIGINT NOT NULL, + + joined_rooms BIGINT NOT NULL, + + invites_sent BIGINT NOT NULL, + rooms_created BIGINT NOT NULL, + total_events BIGINT NOT NULL, + total_event_bytes BIGINT NOT NULL, + + PRIMARY KEY (user_id, end_ts) +); + +-- We use this index to speed up deletion of ancient user stats. +CREATE INDEX user_stats_historical_end_ts ON user_stats_historical (end_ts); + + +CREATE TABLE room_stats_state ( + room_id TEXT NOT NULL, + name TEXT, + canonical_alias TEXT, + join_rules TEXT, + history_visibility TEXT, + encryption TEXT, + avatar TEXT, + guest_access TEXT, + is_federatable BOOLEAN, + topic TEXT +); + +CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state(room_id); diff --git a/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql new file mode 100644 index 0000000000..149f8be8b6 --- /dev/null +++ b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 Matrix.org Foundation CIC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- this was apparently forgotten when the table was created back in delta 53. +CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id); diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index e13efed417..09190d684e 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2018, 2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,17 +15,22 @@ # limitations under the License. import logging +from itertools import chain from twisted.internet import defer +from twisted.internet.defer import DeferredLock from synapse.api.constants import EventTypes, Membership -from synapse.storage.prepare_database import get_statements +from synapse.storage import PostgresEngine from synapse.storage.state_deltas import StateDeltasStore from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) # these fields track absolutes (e.g. total number of rooms on the server) +# You can think of these as Prometheus Gauges. +# You can draw these stats on a line graph. +# Example: number of users in a room ABSOLUTE_STATS_FIELDS = { "room": ( "current_state_events", @@ -32,14 +38,23 @@ ABSOLUTE_STATS_FIELDS = { "invited_members", "left_members", "banned_members", - "state_events", + "local_users_in_room", ), - "user": ("public_rooms", "private_rooms"), + "user": ("joined_rooms",), } -TYPE_TO_ROOM = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")} +# these fields are per-timeslice and so should be reset to 0 upon a new slice +# You can draw these stats on a histogram. +# Example: number of events sent locally during a time slice +PER_SLICE_FIELDS = { + "room": ("total_events", "total_event_bytes"), + "user": ("invites_sent", "rooms_created", "total_events", "total_event_bytes"), +} + +TYPE_TO_TABLE = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")} -TEMP_TABLE = "_temp_populate_stats" +# these are the tables (& ID columns) which contain our actual subjects +TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")} class StatsStore(StateDeltasStore): @@ -51,136 +66,102 @@ class StatsStore(StateDeltasStore): self.stats_enabled = hs.config.stats_enabled self.stats_bucket_size = hs.config.stats_bucket_size - self.register_background_update_handler( - "populate_stats_createtables", self._populate_stats_createtables - ) + self.stats_delta_processing_lock = DeferredLock() + self.register_background_update_handler( "populate_stats_process_rooms", self._populate_stats_process_rooms ) self.register_background_update_handler( - "populate_stats_cleanup", self._populate_stats_cleanup + "populate_stats_process_users", self._populate_stats_process_users ) + # we no longer need to perform clean-up, but we will give ourselves + # the potential to reintroduce it in the future – so documentation + # will still encourage the use of this no-op handler. + self.register_noop_background_update("populate_stats_cleanup") + self.register_noop_background_update("populate_stats_prepare") - @defer.inlineCallbacks - def _populate_stats_createtables(self, progress, batch_size): - - if not self.stats_enabled: - yield self._end_background_update("populate_stats_createtables") - return 1 - - # Get all the rooms that we want to process. - def _make_staging_area(txn): - # Create the temporary tables - stmts = get_statements( - """ - -- We just recreate the table, we'll be reinserting the - -- correct entries again later anyway. - DROP TABLE IF EXISTS {temp}_rooms; - - CREATE TABLE IF NOT EXISTS {temp}_rooms( - room_id TEXT NOT NULL, - events BIGINT NOT NULL - ); - - CREATE INDEX {temp}_rooms_events - ON {temp}_rooms(events); - CREATE INDEX {temp}_rooms_id - ON {temp}_rooms(room_id); - """.format( - temp=TEMP_TABLE - ).splitlines() - ) - - for statement in stmts: - txn.execute(statement) - - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_position(position TEXT NOT NULL)" - ) - txn.execute(sql) - - # Get rooms we want to process from the database, only adding - # those that we haven't (i.e. those not in room_stats_earliest_token) - sql = """ - INSERT INTO %s_rooms (room_id, events) - SELECT c.room_id, count(*) FROM current_state_events AS c - LEFT JOIN room_stats_earliest_token AS t USING (room_id) - WHERE t.room_id IS NULL - GROUP BY c.room_id - """ % ( - TEMP_TABLE, - ) - txn.execute(sql) + def quantise_stats_time(self, ts): + """ + Quantises a timestamp to be a multiple of the bucket size. - new_pos = yield self.get_max_stream_id_in_current_state_deltas() - yield self.runInteraction("populate_stats_temp_build", _make_staging_area) - yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) - self.get_earliest_token_for_room_stats.invalidate_all() + Args: + ts (int): the timestamp to quantise, in milliseconds since the Unix + Epoch - yield self._end_background_update("populate_stats_createtables") - return 1 + Returns: + int: a timestamp which + - is divisible by the bucket size; + - is no later than `ts`; and + - is the largest such timestamp. + """ + return (ts // self.stats_bucket_size) * self.stats_bucket_size @defer.inlineCallbacks - def _populate_stats_cleanup(self, progress, batch_size): + def _populate_stats_process_users(self, progress, batch_size): """ - Update the user directory stream position, then clean up the old tables. + This is a background update which regenerates statistics for users. """ if not self.stats_enabled: - yield self._end_background_update("populate_stats_cleanup") + yield self._end_background_update("populate_stats_process_users") return 1 - position = yield self._simple_select_one_onecol( - TEMP_TABLE + "_position", None, "position" + last_user_id = progress.get("last_user_id", "") + + def _get_next_batch(txn): + sql = """ + SELECT DISTINCT name FROM users + WHERE name > ? + ORDER BY name ASC + LIMIT ? + """ + txn.execute(sql, (last_user_id, batch_size)) + return [r for r, in txn] + + users_to_work_on = yield self.runInteraction( + "_populate_stats_process_users", _get_next_batch ) - yield self.update_stats_stream_pos(position) - def _delete_staging_area(txn): - txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms") - txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position") + # No more rooms -- complete the transaction. + if not users_to_work_on: + yield self._end_background_update("populate_stats_process_users") + return 1 - yield self.runInteraction("populate_stats_cleanup", _delete_staging_area) + for user_id in users_to_work_on: + yield self._calculate_and_set_initial_state_for_user(user_id) + progress["last_user_id"] = user_id - yield self._end_background_update("populate_stats_cleanup") - return 1 + yield self.runInteraction( + "populate_stats_process_users", + self._background_update_progress_txn, + "populate_stats_process_users", + progress, + ) + + return len(users_to_work_on) @defer.inlineCallbacks def _populate_stats_process_rooms(self, progress, batch_size): - + """ + This is a background update which regenerates statistics for rooms. + """ if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") return 1 - # If we don't have progress filed, delete everything. - if not progress: - yield self.delete_all_stats() + last_room_id = progress.get("last_room_id", "") def _get_next_batch(txn): - # Only fetch 250 rooms, so we don't fetch too many at once, even - # if those 250 rooms have less than batch_size state events. sql = """ - SELECT room_id, events FROM %s_rooms - ORDER BY events DESC - LIMIT 250 - """ % ( - TEMP_TABLE, - ) - txn.execute(sql) - rooms_to_work_on = txn.fetchall() - - if not rooms_to_work_on: - return None - - # Get how many are left to process, so we can give status on how - # far we are in processing - txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") - progress["remaining"] = txn.fetchone()[0] - - return rooms_to_work_on + SELECT DISTINCT room_id FROM current_state_events + WHERE room_id > ? + ORDER BY room_id ASC + LIMIT ? + """ + txn.execute(sql, (last_room_id, batch_size)) + return [r for r, in txn] rooms_to_work_on = yield self.runInteraction( - "populate_stats_temp_read", _get_next_batch + "populate_stats_rooms_get_batch", _get_next_batch ) # No more rooms -- complete the transaction. @@ -188,154 +169,28 @@ class StatsStore(StateDeltasStore): yield self._end_background_update("populate_stats_process_rooms") return 1 - logger.info( - "Processing the next %d rooms of %d remaining", - len(rooms_to_work_on), - progress["remaining"], - ) - - # Number of state events we've processed by going through each room - processed_event_count = 0 - - for room_id, event_count in rooms_to_work_on: - - current_state_ids = yield self.get_current_state_ids(room_id) - - join_rules_id = current_state_ids.get((EventTypes.JoinRules, "")) - history_visibility_id = current_state_ids.get( - (EventTypes.RoomHistoryVisibility, "") - ) - encryption_id = current_state_ids.get((EventTypes.RoomEncryption, "")) - name_id = current_state_ids.get((EventTypes.Name, "")) - topic_id = current_state_ids.get((EventTypes.Topic, "")) - avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) - canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) - - event_ids = [ - join_rules_id, - history_visibility_id, - encryption_id, - name_id, - topic_id, - avatar_id, - canonical_alias_id, - ] - - state_events = yield self.get_events( - [ev for ev in event_ids if ev is not None] - ) - - def _get_or_none(event_id, arg): - event = state_events.get(event_id) - if event: - return event.content.get(arg) - return None - - yield self.update_room_state( - room_id, - { - "join_rules": _get_or_none(join_rules_id, "join_rule"), - "history_visibility": _get_or_none( - history_visibility_id, "history_visibility" - ), - "encryption": _get_or_none(encryption_id, "algorithm"), - "name": _get_or_none(name_id, "name"), - "topic": _get_or_none(topic_id, "topic"), - "avatar": _get_or_none(avatar_id, "url"), - "canonical_alias": _get_or_none(canonical_alias_id, "alias"), - }, - ) - - now = self.hs.get_reactor().seconds() - - # quantise time to the nearest bucket - now = (now // self.stats_bucket_size) * self.stats_bucket_size - - def _fetch_data(txn): - - # Get the current token of the room - current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) - - current_state_events = len(current_state_ids) - - membership_counts = self._get_user_counts_in_room_txn(txn, room_id) - - total_state_events = self._get_total_state_event_counts_txn( - txn, room_id - ) - - self._update_stats_txn( - txn, - "room", - room_id, - now, - { - "bucket_size": self.stats_bucket_size, - "current_state_events": current_state_events, - "joined_members": membership_counts.get(Membership.JOIN, 0), - "invited_members": membership_counts.get(Membership.INVITE, 0), - "left_members": membership_counts.get(Membership.LEAVE, 0), - "banned_members": membership_counts.get(Membership.BAN, 0), - "state_events": total_state_events, - }, - ) - self._simple_insert_txn( - txn, - "room_stats_earliest_token", - {"room_id": room_id, "token": current_token}, - ) - - # We've finished a room. Delete it from the table. - self._simple_delete_one_txn( - txn, TEMP_TABLE + "_rooms", {"room_id": room_id} - ) + for room_id in rooms_to_work_on: + yield self._calculate_and_set_initial_state_for_room(room_id) + progress["last_room_id"] = room_id - yield self.runInteraction("update_room_stats", _fetch_data) - - # Update the remaining counter. - progress["remaining"] -= 1 - yield self.runInteraction( - "populate_stats", - self._background_update_progress_txn, - "populate_stats_process_rooms", - progress, - ) - - processed_event_count += event_count - - if processed_event_count > batch_size: - # Don't process any more rooms, we've hit our batch size. - return processed_event_count + yield self.runInteraction( + "_populate_stats_process_rooms", + self._background_update_progress_txn, + "populate_stats_process_rooms", + progress, + ) - return processed_event_count + return len(rooms_to_work_on) - def delete_all_stats(self): + def get_stats_positions(self): """ - Delete all statistics records. + Returns the stats processor positions. """ - - def _delete_all_stats_txn(txn): - txn.execute("DELETE FROM room_state") - txn.execute("DELETE FROM room_stats") - txn.execute("DELETE FROM room_stats_earliest_token") - txn.execute("DELETE FROM user_stats") - - return self.runInteraction("delete_all_stats", _delete_all_stats_txn) - - def get_stats_stream_pos(self): return self._simple_select_one_onecol( - table="stats_stream_pos", + table="stats_incremental_position", keyvalues={}, retcol="stream_id", - desc="stats_stream_pos", - ) - - def update_stats_stream_pos(self, stream_id): - return self._simple_update_one( - table="stats_stream_pos", - keyvalues={}, - updatevalues={"stream_id": stream_id}, - desc="update_stats_stream_pos", + desc="stats_incremental_position", ) def update_room_state(self, room_id, fields): @@ -361,42 +216,87 @@ class StatsStore(StateDeltasStore): fields[col] = None return self._simple_upsert( - table="room_state", + table="room_stats_state", keyvalues={"room_id": room_id}, values=fields, desc="update_room_state", ) - def get_deltas_for_room(self, room_id, start, size=100): + def get_statistics_for_subject(self, stats_type, stats_id, start, size=100): """ - Get statistics deltas for a given room. + Get statistics for a given subject. Args: - room_id (str) + stats_type (str): The type of subject + stats_id (str): The ID of the subject (e.g. room_id or user_id) start (int): Pagination start. Number of entries, not timestamp. size (int): How many entries to return. Returns: Deferred[list[dict]], where the dict has the keys of - ABSOLUTE_STATS_FIELDS["room"] and "ts". + ABSOLUTE_STATS_FIELDS[stats_type], and "bucket_size" and "end_ts". """ - return self._simple_select_list_paginate( - "room_stats", - {"room_id": room_id}, - "ts", + return self.runInteraction( + "get_statistics_for_subject", + self._get_statistics_for_subject_txn, + stats_type, + stats_id, start, size, - retcols=(list(ABSOLUTE_STATS_FIELDS["room"]) + ["ts"]), + ) + + def _get_statistics_for_subject_txn( + self, txn, stats_type, stats_id, start, size=100 + ): + """ + Transaction-bound version of L{get_statistics_for_subject}. + """ + + table, id_col = TYPE_TO_TABLE[stats_type] + selected_columns = list( + ABSOLUTE_STATS_FIELDS[stats_type] + PER_SLICE_FIELDS[stats_type] + ) + + slice_list = self._simple_select_list_paginate_txn( + txn, + table + "_historical", + {id_col: stats_id}, + "end_ts", + start, + size, + retcols=selected_columns + ["bucket_size", "end_ts"], order_direction="DESC", ) - def get_all_room_state(self): - return self._simple_select_list( - "room_state", None, retcols=("name", "topic", "canonical_alias") + return slice_list + + def get_room_stats_state(self, room_id): + """ + Returns the current room_stats_state for a room. + + Args: + room_id (str): The ID of the room to return state for. + + Returns (dict): + Dictionary containing these keys: + "name", "topic", "canonical_alias", "avatar", "join_rules", + "history_visibility" + """ + return self._simple_select_one( + "room_stats_state", + {"room_id": room_id}, + retcols=( + "name", + "topic", + "canonical_alias", + "avatar", + "join_rules", + "history_visibility", + ), ) @cached() - def get_earliest_token_for_room_stats(self, room_id): + def get_earliest_token_for_stats(self, stats_type, id): """ Fetch the "earliest token". This is used by the room stats delta processor to ignore deltas that have been processed between the @@ -406,79 +306,573 @@ class StatsStore(StateDeltasStore): Returns: Deferred[int] """ + table, id_col = TYPE_TO_TABLE[stats_type] + return self._simple_select_one_onecol( - "room_stats_earliest_token", - {"room_id": room_id}, - retcol="token", + "%s_current" % (table,), + keyvalues={id_col: id}, + retcol="completed_delta_stream_id", allow_none=True, ) - def update_stats(self, stats_type, stats_id, ts, fields): - table, id_col = TYPE_TO_ROOM[stats_type] - return self._simple_upsert( - table=table, - keyvalues={id_col: stats_id, "ts": ts}, - values=fields, - desc="update_stats", + def bulk_update_stats_delta(self, ts, updates, stream_id): + """Bulk update stats tables for a given stream_id and updates the stats + incremental position. + + Args: + ts (int): Current timestamp in ms + updates(dict[str, dict[str, dict[str, Counter]]]): The updates to + commit as a mapping stats_type -> stats_id -> field -> delta. + stream_id (int): Current position. + + Returns: + Deferred + """ + + def _bulk_update_stats_delta_txn(txn): + for stats_type, stats_updates in updates.items(): + for stats_id, fields in stats_updates.items(): + self._update_stats_delta_txn( + txn, + ts=ts, + stats_type=stats_type, + stats_id=stats_id, + fields=fields, + complete_with_stream_id=stream_id, + ) + + self._simple_update_one_txn( + txn, + table="stats_incremental_position", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + ) + + return self.runInteraction( + "bulk_update_stats_delta", _bulk_update_stats_delta_txn ) - def _update_stats_txn(self, txn, stats_type, stats_id, ts, fields): - table, id_col = TYPE_TO_ROOM[stats_type] - return self._simple_upsert_txn( - txn, table=table, keyvalues={id_col: stats_id, "ts": ts}, values=fields + def update_stats_delta( + self, + ts, + stats_type, + stats_id, + fields, + complete_with_stream_id, + absolute_field_overrides=None, + ): + """ + Updates the statistics for a subject, with a delta (difference/relative + change). + + Args: + ts (int): timestamp of the change + stats_type (str): "room" or "user" – the kind of subject + stats_id (str): the subject's ID (room ID or user ID) + fields (dict[str, int]): Deltas of stats values. + complete_with_stream_id (int, optional): + If supplied, converts an incomplete row into a complete row, + with the supplied stream_id marked as the stream_id where the + row was completed. + absolute_field_overrides (dict[str, int]): Current stats values + (i.e. not deltas) of absolute fields. + Does not work with per-slice fields. + """ + + return self.runInteraction( + "update_stats_delta", + self._update_stats_delta_txn, + ts, + stats_type, + stats_id, + fields, + complete_with_stream_id=complete_with_stream_id, + absolute_field_overrides=absolute_field_overrides, ) - def update_stats_delta(self, ts, stats_type, stats_id, field, value): - def _update_stats_delta(txn): - table, id_col = TYPE_TO_ROOM[stats_type] - - sql = ( - "SELECT * FROM %s" - " WHERE %s=? and ts=(" - " SELECT MAX(ts) FROM %s" - " WHERE %s=?" - ")" - ) % (table, id_col, table, id_col) - txn.execute(sql, (stats_id, stats_id)) - rows = self.cursor_to_dict(txn) - if len(rows) == 0: - # silently skip as we don't have anything to apply a delta to yet. - # this tries to minimise any race between the initial sync and - # subsequent deltas arriving. - return - - current_ts = ts - latest_ts = rows[0]["ts"] - if current_ts < latest_ts: - # This one is in the past, but we're just encountering it now. - # Mark it as part of the current bucket. - current_ts = latest_ts - elif ts != latest_ts: - # we have to copy our absolute counters over to the new entry. - values = { - key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type] - } - values[id_col] = stats_id - values["ts"] = ts - values["bucket_size"] = self.stats_bucket_size - - self._simple_insert_txn(txn, table=table, values=values) - - # actually update the new value - if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]: - self._simple_update_txn( - txn, - table=table, - keyvalues={id_col: stats_id, "ts": current_ts}, - updatevalues={field: value}, + def _update_stats_delta_txn( + self, + txn, + ts, + stats_type, + stats_id, + fields, + complete_with_stream_id, + absolute_field_overrides=None, + ): + if absolute_field_overrides is None: + absolute_field_overrides = {} + + table, id_col = TYPE_TO_TABLE[stats_type] + + quantised_ts = self.quantise_stats_time(int(ts)) + end_ts = quantised_ts + self.stats_bucket_size + + # Lets be paranoid and check that all the given field names are known + abs_field_names = ABSOLUTE_STATS_FIELDS[stats_type] + slice_field_names = PER_SLICE_FIELDS[stats_type] + for field in chain(fields.keys(), absolute_field_overrides.keys()): + if field not in abs_field_names and field not in slice_field_names: + # guard against potential SQL injection dodginess + raise ValueError( + "%s is not a recognised field" + " for stats type %s" % (field, stats_type) ) + + # Per slice fields do not get added to the _current table + + # This calculates the deltas (`field = field + ?` values) + # for absolute fields, + # * defaulting to 0 if not specified + # (required for the INSERT part of upserting to work) + # * omitting overrides specified in `absolute_field_overrides` + deltas_of_absolute_fields = { + key: fields.get(key, 0) + for key in abs_field_names + if key not in absolute_field_overrides + } + + # Keep the delta stream ID field up to date + absolute_field_overrides = absolute_field_overrides.copy() + absolute_field_overrides["completed_delta_stream_id"] = complete_with_stream_id + + # first upsert the `_current` table + self._upsert_with_additive_relatives_txn( + txn=txn, + table=table + "_current", + keyvalues={id_col: stats_id}, + absolutes=absolute_field_overrides, + additive_relatives=deltas_of_absolute_fields, + ) + + per_slice_additive_relatives = { + key: fields.get(key, 0) for key in slice_field_names + } + self._upsert_copy_from_table_with_additive_relatives_txn( + txn=txn, + into_table=table + "_historical", + keyvalues={id_col: stats_id}, + extra_dst_insvalues={"bucket_size": self.stats_bucket_size}, + extra_dst_keyvalues={"end_ts": end_ts}, + additive_relatives=per_slice_additive_relatives, + src_table=table + "_current", + copy_columns=abs_field_names, + ) + + def _upsert_with_additive_relatives_txn( + self, txn, table, keyvalues, absolutes, additive_relatives + ): + """Used to update values in the stats tables. + + This is basically a slightly convoluted upsert that *adds* to any + existing rows. + + Args: + txn + table (str): Table name + keyvalues (dict[str, any]): Row-identifying key values + absolutes (dict[str, any]): Absolute (set) fields + additive_relatives (dict[str, int]): Fields that will be added onto + if existing row present. + """ + if self.database_engine.can_native_upsert: + absolute_updates = [ + "%(field)s = EXCLUDED.%(field)s" % {"field": field} + for field in absolutes.keys() + ] + + relative_updates = [ + "%(field)s = EXCLUDED.%(field)s + %(table)s.%(field)s" + % {"table": table, "field": field} + for field in additive_relatives.keys() + ] + + insert_cols = [] + qargs = [] + + for (key, val) in chain( + keyvalues.items(), absolutes.items(), additive_relatives.items() + ): + insert_cols.append(key) + qargs.append(val) + + sql = """ + INSERT INTO %(table)s (%(insert_cols_cs)s) + VALUES (%(insert_vals_qs)s) + ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s + """ % { + "table": table, + "insert_cols_cs": ", ".join(insert_cols), + "insert_vals_qs": ", ".join( + ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives)) + ), + "key_columns": ", ".join(keyvalues), + "updates": ", ".join(chain(absolute_updates, relative_updates)), + } + + txn.execute(sql, qargs) + else: + self.database_engine.lock_table(txn, table) + retcols = list(chain(absolutes.keys(), additive_relatives.keys())) + current_row = self._simple_select_one_txn( + txn, table, keyvalues, retcols, allow_none=True + ) + if current_row is None: + merged_dict = {**keyvalues, **absolutes, **additive_relatives} + self._simple_insert_txn(txn, table, merged_dict) else: - sql = ("UPDATE %s SET %s=%s+? WHERE %s=? AND ts=?") % ( - table, - field, - field, - id_col, + for (key, val) in additive_relatives.items(): + current_row[key] += val + current_row.update(absolutes) + self._simple_update_one_txn(txn, table, keyvalues, current_row) + + def _upsert_copy_from_table_with_additive_relatives_txn( + self, + txn, + into_table, + keyvalues, + extra_dst_keyvalues, + extra_dst_insvalues, + additive_relatives, + src_table, + copy_columns, + ): + """Updates the historic stats table with latest updates. + + This involves copying "absolute" fields from the `_current` table, and + adding relative fields to any existing values. + + Args: + txn: Transaction + into_table (str): The destination table to UPSERT the row into + keyvalues (dict[str, any]): Row-identifying key values + extra_dst_keyvalues (dict[str, any]): Additional keyvalues + for `into_table`. + extra_dst_insvalues (dict[str, any]): Additional values to insert + on new row creation for `into_table`. + additive_relatives (dict[str, any]): Fields that will be added onto + if existing row present. (Must be disjoint from copy_columns.) + src_table (str): The source table to copy from + copy_columns (iterable[str]): The list of columns to copy + """ + if self.database_engine.can_native_upsert: + ins_columns = chain( + keyvalues, + copy_columns, + additive_relatives, + extra_dst_keyvalues, + extra_dst_insvalues, + ) + sel_exprs = chain( + keyvalues, + copy_columns, + ( + "?" + for _ in chain( + additive_relatives, extra_dst_keyvalues, extra_dst_insvalues + ) + ), + ) + keyvalues_where = ("%s = ?" % f for f in keyvalues) + + sets_cc = ("%s = EXCLUDED.%s" % (f, f) for f in copy_columns) + sets_ar = ( + "%s = EXCLUDED.%s + %s.%s" % (f, f, into_table, f) + for f in additive_relatives + ) + + sql = """ + INSERT INTO %(into_table)s (%(ins_columns)s) + SELECT %(sel_exprs)s + FROM %(src_table)s + WHERE %(keyvalues_where)s + ON CONFLICT (%(keyvalues)s) + DO UPDATE SET %(sets)s + """ % { + "into_table": into_table, + "ins_columns": ", ".join(ins_columns), + "sel_exprs": ", ".join(sel_exprs), + "keyvalues_where": " AND ".join(keyvalues_where), + "src_table": src_table, + "keyvalues": ", ".join( + chain(keyvalues.keys(), extra_dst_keyvalues.keys()) + ), + "sets": ", ".join(chain(sets_cc, sets_ar)), + } + + qargs = list( + chain( + additive_relatives.values(), + extra_dst_keyvalues.values(), + extra_dst_insvalues.values(), + keyvalues.values(), ) - txn.execute(sql, (value, stats_id, current_ts)) + ) + txn.execute(sql, qargs) + else: + self.database_engine.lock_table(txn, into_table) + src_row = self._simple_select_one_txn( + txn, src_table, keyvalues, copy_columns + ) + all_dest_keyvalues = {**keyvalues, **extra_dst_keyvalues} + dest_current_row = self._simple_select_one_txn( + txn, + into_table, + keyvalues=all_dest_keyvalues, + retcols=list(chain(additive_relatives.keys(), copy_columns)), + allow_none=True, + ) + + if dest_current_row is None: + merged_dict = { + **keyvalues, + **extra_dst_keyvalues, + **extra_dst_insvalues, + **src_row, + **additive_relatives, + } + self._simple_insert_txn(txn, into_table, merged_dict) + else: + for (key, val) in additive_relatives.items(): + src_row[key] = dest_current_row[key] + val + self._simple_update_txn(txn, into_table, all_dest_keyvalues, src_row) + + def get_changes_room_total_events_and_bytes(self, min_pos, max_pos): + """Fetches the counts of events in the given range of stream IDs. + + Args: + min_pos (int) + max_pos (int) + + Returns: + Deferred[dict[str, dict[str, int]]]: Mapping of room ID to field + changes. + """ + + return self.runInteraction( + "stats_incremental_total_events_and_bytes", + self.get_changes_room_total_events_and_bytes_txn, + min_pos, + max_pos, + ) - return self.runInteraction("update_stats_delta", _update_stats_delta) + def get_changes_room_total_events_and_bytes_txn(self, txn, low_pos, high_pos): + """Gets the total_events and total_event_bytes counts for rooms and + senders, in a range of stream_orderings (including backfilled events). + + Args: + txn + low_pos (int): Low stream ordering + high_pos (int): High stream ordering + + Returns: + tuple[dict[str, dict[str, int]], dict[str, dict[str, int]]]: The + room and user deltas for total_events/total_event_bytes in the + format of `stats_id` -> fields + """ + + if low_pos >= high_pos: + # nothing to do here. + return {}, {} + + if isinstance(self.database_engine, PostgresEngine): + new_bytes_expression = "OCTET_LENGTH(json)" + else: + new_bytes_expression = "LENGTH(CAST(json AS BLOB))" + + sql = """ + SELECT events.room_id, COUNT(*) AS new_events, SUM(%s) AS new_bytes + FROM events INNER JOIN event_json USING (event_id) + WHERE (? < stream_ordering AND stream_ordering <= ?) + OR (? <= stream_ordering AND stream_ordering <= ?) + GROUP BY events.room_id + """ % ( + new_bytes_expression, + ) + + txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos)) + + room_deltas = { + room_id: {"total_events": new_events, "total_event_bytes": new_bytes} + for room_id, new_events, new_bytes in txn + } + + sql = """ + SELECT events.sender, COUNT(*) AS new_events, SUM(%s) AS new_bytes + FROM events INNER JOIN event_json USING (event_id) + WHERE (? < stream_ordering AND stream_ordering <= ?) + OR (? <= stream_ordering AND stream_ordering <= ?) + GROUP BY events.sender + """ % ( + new_bytes_expression, + ) + + txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos)) + + user_deltas = { + user_id: {"total_events": new_events, "total_event_bytes": new_bytes} + for user_id, new_events, new_bytes in txn + if self.hs.is_mine_id(user_id) + } + + return room_deltas, user_deltas + + @defer.inlineCallbacks + def _calculate_and_set_initial_state_for_room(self, room_id): + """Calculate and insert an entry into room_stats_current. + + Args: + room_id (str) + + Returns: + Deferred[tuple[dict, dict, int]]: A tuple of room state, membership + counts and stream position. + """ + + def _fetch_current_state_stats(txn): + pos = self.get_room_max_stream_ordering() + + rows = self._simple_select_many_txn( + txn, + table="current_state_events", + column="type", + iterable=[ + EventTypes.Create, + EventTypes.JoinRules, + EventTypes.RoomHistoryVisibility, + EventTypes.Encryption, + EventTypes.Name, + EventTypes.Topic, + EventTypes.RoomAvatar, + EventTypes.CanonicalAlias, + ], + keyvalues={"room_id": room_id, "state_key": ""}, + retcols=["event_id"], + ) + + event_ids = [row["event_id"] for row in rows] + + txn.execute( + """ + SELECT membership, count(*) FROM current_state_events + WHERE room_id = ? AND type = 'm.room.member' + GROUP BY membership + """, + (room_id,), + ) + membership_counts = {membership: cnt for membership, cnt in txn} + + txn.execute( + """ + SELECT COALESCE(count(*), 0) FROM current_state_events + WHERE room_id = ? + """, + (room_id,), + ) + + current_state_events_count, = txn.fetchone() + + users_in_room = self.get_users_in_room_txn(txn, room_id) + + return ( + event_ids, + membership_counts, + current_state_events_count, + users_in_room, + pos, + ) + + ( + event_ids, + membership_counts, + current_state_events_count, + users_in_room, + pos, + ) = yield self.runInteraction( + "get_initial_state_for_room", _fetch_current_state_stats + ) + + state_event_map = yield self.get_events(event_ids, get_prev_content=False) + + room_state = { + "join_rules": None, + "history_visibility": None, + "encryption": None, + "name": None, + "topic": None, + "avatar": None, + "canonical_alias": None, + "is_federatable": True, + } + + for event in state_event_map.values(): + if event.type == EventTypes.JoinRules: + room_state["join_rules"] = event.content.get("join_rule") + elif event.type == EventTypes.RoomHistoryVisibility: + room_state["history_visibility"] = event.content.get( + "history_visibility" + ) + elif event.type == EventTypes.Encryption: + room_state["encryption"] = event.content.get("algorithm") + elif event.type == EventTypes.Name: + room_state["name"] = event.content.get("name") + elif event.type == EventTypes.Topic: + room_state["topic"] = event.content.get("topic") + elif event.type == EventTypes.RoomAvatar: + room_state["avatar"] = event.content.get("url") + elif event.type == EventTypes.CanonicalAlias: + room_state["canonical_alias"] = event.content.get("alias") + elif event.type == EventTypes.Create: + room_state["is_federatable"] = ( + event.content.get("m.federate", True) is True + ) + + yield self.update_room_state(room_id, room_state) + + local_users_in_room = [u for u in users_in_room if self.hs.is_mine_id(u)] + + yield self.update_stats_delta( + ts=self.clock.time_msec(), + stats_type="room", + stats_id=room_id, + fields={}, + complete_with_stream_id=pos, + absolute_field_overrides={ + "current_state_events": current_state_events_count, + "joined_members": membership_counts.get(Membership.JOIN, 0), + "invited_members": membership_counts.get(Membership.INVITE, 0), + "left_members": membership_counts.get(Membership.LEAVE, 0), + "banned_members": membership_counts.get(Membership.BAN, 0), + "local_users_in_room": len(local_users_in_room), + }, + ) + + @defer.inlineCallbacks + def _calculate_and_set_initial_state_for_user(self, user_id): + def _calculate_and_set_initial_state_for_user_txn(txn): + pos = self._get_max_stream_id_in_current_state_deltas_txn(txn) + + txn.execute( + """ + SELECT COUNT(distinct room_id) FROM current_state_events + WHERE type = 'm.room.member' AND state_key = ? + AND membership = 'join' + """, + (user_id,), + ) + count, = txn.fetchone() + return count, pos + + joined_rooms, pos = yield self.runInteraction( + "calculate_and_set_initial_state_for_user", + _calculate_and_set_initial_state_for_user_txn, + ) + + yield self.update_stats_delta( + ts=self.clock.time_msec(), + stats_type="user", + stats_id=user_id, + fields={}, + complete_with_stream_id=pos, + absolute_field_overrides={"joined_rooms": joined_rooms}, + ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 856c2ee8d8..490454f19a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): the chunk of events returned. """ if from_key == to_key: - return ([], from_key) + return [], from_key from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream @@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not has_changed: - return ([], from_key) + return [], from_key def f(txn): sql = ( @@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # get. key = from_key - return (ret, key) + return ret, key @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): @@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ # Allow a zero limit here, and no-op. if limit == 0: - return ([], end_token) + return [], end_token end_token = RoomStreamToken.parse(end_token) @@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # We want to return the results in ascending order. rows.reverse() - return (rows, token) + return rows, token def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -783,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): events = yield self.get_events_as_list(event_ids) - return (upper_bound, events) + return upper_bound, events def get_federation_out_pos(self, typ): return self._simple_select_one_onecol( diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index b3c3bf55bc..289c117396 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -165,7 +165,7 @@ class TransactionStore(SQLBaseStore): txn, table="destinations", keyvalues={"destination": destination}, - retcols=("destination", "retry_last_ts", "retry_interval"), + retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"), allow_none=True, ) @@ -174,12 +174,15 @@ class TransactionStore(SQLBaseStore): else: return None - def set_destination_retry_timings(self, destination, retry_last_ts, retry_interval): + def set_destination_retry_timings( + self, destination, failure_ts, retry_last_ts, retry_interval + ): """Sets the current retry timings for a given destination. Both timings should be zero if retrying is no longer occuring. Args: destination (str) + failure_ts (int|None) - when the server started failing (ms since epoch) retry_last_ts (int) - time of last retry attempt in unix epoch ms retry_interval (int) - how long until next retry in ms """ @@ -189,12 +192,13 @@ class TransactionStore(SQLBaseStore): "set_destination_retry_timings", self._set_destination_retry_timings, destination, + failure_ts, retry_last_ts, retry_interval, ) def _set_destination_retry_timings( - self, txn, destination, retry_last_ts, retry_interval + self, txn, destination, failure_ts, retry_last_ts, retry_interval ): if self.database_engine.can_native_upsert: @@ -202,9 +206,12 @@ class TransactionStore(SQLBaseStore): # resetting it) or greater than the existing retry interval. sql = """ - INSERT INTO destinations (destination, retry_last_ts, retry_interval) - VALUES (?, ?, ?) + INSERT INTO destinations ( + destination, failure_ts, retry_last_ts, retry_interval + ) + VALUES (?, ?, ?, ?) ON CONFLICT (destination) DO UPDATE SET + failure_ts = EXCLUDED.failure_ts, retry_last_ts = EXCLUDED.retry_last_ts, retry_interval = EXCLUDED.retry_interval WHERE @@ -212,7 +219,7 @@ class TransactionStore(SQLBaseStore): OR destinations.retry_interval < EXCLUDED.retry_interval """ - txn.execute(sql, (destination, retry_last_ts, retry_interval)) + txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) return @@ -225,7 +232,7 @@ class TransactionStore(SQLBaseStore): txn, table="destinations", keyvalues={"destination": destination}, - retcols=("retry_last_ts", "retry_interval"), + retcols=("failure_ts", "retry_last_ts", "retry_interval"), allow_none=True, ) @@ -235,6 +242,7 @@ class TransactionStore(SQLBaseStore): table="destinations", values={ "destination": destination, + "failure_ts": failure_ts, "retry_last_ts": retry_last_ts, "retry_interval": retry_interval, }, @@ -245,31 +253,12 @@ class TransactionStore(SQLBaseStore): "destinations", keyvalues={"destination": destination}, updatevalues={ + "failure_ts": failure_ts, "retry_last_ts": retry_last_ts, "retry_interval": retry_interval, }, ) - def get_destinations_needing_retry(self): - """Get all destinations which are due a retry for sending a transaction. - - Returns: - list: A list of dicts - """ - - return self.runInteraction( - "get_destinations_needing_retry", self._get_destinations_needing_retry - ) - - def _get_destinations_needing_retry(self, txn): - query = ( - "SELECT * FROM destinations" - " WHERE retry_last_ts > 0 and retry_next_ts < ?" - ) - - txn.execute(query, (self._clock.time_msec(),)) - return self.cursor_to_dict(txn) - def _start_cleanup_transactions(self): return run_as_background_process( "cleanup_transactions", self._cleanup_transactions diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index f1c8d99419..cbb0a4810a 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -195,6 +195,6 @@ class ChainedIdGenerator(object): with self._lock: if self._unfinished_ids: stream_id, chained_id = self._unfinished_ids[0] - return (stream_id - 1, chained_id) + return stream_id - 1, chained_id - return (self._current_max, self.chained_generator.get_current_token()) + return self._current_max, self.chained_generator.get_current_token() diff --git a/synapse/streams/config.py b/synapse/streams/config.py index f7f5906a99..02994ab2a5 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -37,7 +37,7 @@ class SourcePaginationConfig(object): self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None def __repr__(self): - return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % ( + return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % ( self.from_key, self.to_key, self.direction, diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 2af8ca43b1..99646c7cf0 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -55,7 +55,7 @@ class TTLCache(object): if e != SENTINEL: self._expiry_list.remove(e) - entry = _CacheEntry(expiry_time=expiry, key=key, value=value) + entry = _CacheEntry(expiry_time=expiry, ttl=ttl, key=key, value=value) self._data[key] = entry self._expiry_list.add(entry) @@ -87,7 +87,8 @@ class TTLCache(object): key: key to look up Returns: - Tuple[Any, float]: the value from the cache, and the expiry time + Tuple[Any, float, float]: the value from the cache, the expiry time + and the TTL Raises: KeyError if the entry is not found @@ -99,7 +100,7 @@ class TTLCache(object): self._metrics.inc_misses() raise self._metrics.inc_hits() - return e.value, e.expiry_time + return e.value, e.expiry_time, e.ttl def pop(self, key, default=SENTINEL): """Remove a value from the cache @@ -158,5 +159,6 @@ class _CacheEntry(object): # expiry_time is the first attribute, so that entries are sorted by expiry. expiry_time = attr.ib() + ttl = attr.ib() key = attr.ib() value = attr.ib() diff --git a/synapse/util/hash.py b/synapse/util/hash.py new file mode 100644 index 0000000000..359168704e --- /dev/null +++ b/synapse/util/hash.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib + +import unpaddedbase64 + + +def sha256_and_url_safe_base64(input_text): + """SHA256 hash an input string, encode the digest as url-safe base64, and + return + + :param input_text: string to hash + :type input_text: str + + :returns a sha256 hashed and url-safe base64 encoded digest + :rtype: str + """ + digest = hashlib.sha256(input_text.encode()).digest() + return unpaddedbase64.encode_base64(digest, urlsafe=True) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 0862b5ca5a..a5f2fbef5c 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -22,6 +22,15 @@ from synapse.api.errors import CodeMessageException logger = logging.getLogger(__name__) +# the intial backoff, after the first transaction fails +MIN_RETRY_INTERVAL = 10 * 60 * 1000 + +# how much we multiply the backoff by after each subsequent fail +RETRY_MULTIPLIER = 5 + +# a cap on the backoff. (Essentially none) +MAX_RETRY_INTERVAL = 2 ** 63 + class NotRetryingDestination(Exception): def __init__(self, retry_last_ts, retry_interval, destination): @@ -71,11 +80,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) # We aren't ready to retry that destination. raise """ + failure_ts = None retry_last_ts, retry_interval = (0, 0) retry_timings = yield store.get_destination_retry_timings(destination) if retry_timings: + failure_ts = retry_timings["failure_ts"] retry_last_ts, retry_interval = ( retry_timings["retry_last_ts"], retry_timings["retry_interval"], @@ -99,6 +110,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) destination, clock, store, + failure_ts, retry_interval, backoff_on_failure=backoff_on_failure, **kwargs @@ -111,10 +123,8 @@ class RetryDestinationLimiter(object): destination, clock, store, + failure_ts, retry_interval, - min_retry_interval=10 * 60 * 1000, - max_retry_interval=24 * 60 * 60 * 1000, - multiplier_retry_interval=5, backoff_on_404=False, backoff_on_failure=True, ): @@ -127,15 +137,11 @@ class RetryDestinationLimiter(object): destination (str) clock (Clock) store (DataStore) + failure_ts (int|None): when this destination started failing (in ms since + the epoch), or zero if the last request was successful retry_interval (int): The next retry interval taken from the database in milliseconds, or zero if the last request was successful. - min_retry_interval (int): The minimum retry interval to use after - a failed request, in milliseconds. - max_retry_interval (int): The maximum retry interval to use after - a failed request, in milliseconds. - multiplier_retry_interval (int): The multiplier to use to increase - the retry interval after a failed request. backoff_on_404 (bool): Back off if we get a 404 backoff_on_failure (bool): set to False if we should not increase the @@ -145,10 +151,8 @@ class RetryDestinationLimiter(object): self.store = store self.destination = destination + self.failure_ts = failure_ts self.retry_interval = retry_interval - self.min_retry_interval = min_retry_interval - self.max_retry_interval = max_retry_interval - self.multiplier_retry_interval = multiplier_retry_interval self.backoff_on_404 = backoff_on_404 self.backoff_on_failure = backoff_on_failure @@ -189,6 +193,7 @@ class RetryDestinationLimiter(object): logger.debug( "Connection to %s was successful; clearing backoff", self.destination ) + self.failure_ts = None retry_last_ts = 0 self.retry_interval = 0 elif not self.backoff_on_failure: @@ -196,13 +201,14 @@ class RetryDestinationLimiter(object): else: # We couldn't connect. if self.retry_interval: - self.retry_interval *= self.multiplier_retry_interval - self.retry_interval *= int(random.uniform(0.8, 1.4)) + self.retry_interval = int( + self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4) + ) - if self.retry_interval >= self.max_retry_interval: - self.retry_interval = self.max_retry_interval + if self.retry_interval >= MAX_RETRY_INTERVAL: + self.retry_interval = MAX_RETRY_INTERVAL else: - self.retry_interval = self.min_retry_interval + self.retry_interval = MIN_RETRY_INTERVAL logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", @@ -213,11 +219,17 @@ class RetryDestinationLimiter(object): ) retry_last_ts = int(self.clock.time_msec()) + if self.failure_ts is None: + self.failure_ts = retry_last_ts + @defer.inlineCallbacks def store_retry_timings(): try: yield self.store.set_destination_retry_timings( - self.destination, retry_last_ts, self.retry_interval + self.destination, + self.failure_ts, + retry_last_ts, + self.retry_interval, ) except Exception: logger.exception("Failed to store destination_retry_timings") |