From 05ee048f2c9ce0bb8a7d2430b21ca3682ef5858b Mon Sep 17 00:00:00 2001 From: BBBSnowball Date: Thu, 1 Oct 2020 19:54:35 +0200 Subject: Add config option for always using "userinfo endpoint" for OIDC (#7658) This allows for connecting to certain IdPs, e.g. GitLab. --- tests/handlers/test_oidc.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index d5087e58be..b6f436c016 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -286,9 +286,15 @@ class OidcHandlerTestCase(HomeserverTestCase): h._validate_metadata, ) - # Tests for configs that the userinfo endpoint + # Tests for configs that require the userinfo endpoint self.assertFalse(h._uses_userinfo) - h._scopes = [] # do not request the openid scope + self.assertEqual(h._user_profile_method, "auto") + h._user_profile_method = "userinfo_endpoint" + self.assertTrue(h._uses_userinfo) + + # Revert the profile method and do not request the "openid" scope. + h._user_profile_method = "auto" + h._scopes = [] self.assertTrue(h._uses_userinfo) self.assertRaisesRegex(ValueError, "userinfo_endpoint", h._validate_metadata) -- cgit 1.5.1 From 6c5d5e507e629cf57ae8c1034879e8ffaef33e9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 09:57:12 +0100 Subject: Add unit test for event persister sharding (#8433) --- changelog.d/8433.misc | 1 + mypy.ini | 3 + stubs/txredisapi.pyi | 20 +- synapse/replication/tcp/handler.py | 6 +- synapse/replication/tcp/redis.py | 40 +++- tests/replication/_base.py | 224 ++++++++++++++++++++-- tests/replication/test_sharded_event_persister.py | 102 ++++++++++ tests/unittest.py | 2 +- 8 files changed, 371 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8433.misc create mode 100644 tests/replication/test_sharded_event_persister.py (limited to 'tests') diff --git a/changelog.d/8433.misc b/changelog.d/8433.misc new file mode 100644 index 0000000000..05f8b5bbf4 --- /dev/null +++ b/changelog.d/8433.misc @@ -0,0 +1 @@ +Add unit test for event persister sharding. diff --git a/mypy.ini b/mypy.ini index c283f15b21..e84ad04e41 100644 --- a/mypy.ini +++ b/mypy.ini @@ -143,3 +143,6 @@ ignore_missing_imports = True [mypy-nacl.*] ignore_missing_imports = True + +[mypy-hiredis] +ignore_missing_imports = True diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index c66413f003..522244bb57 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -16,7 +16,7 @@ """Contains *incomplete* type hints for txredisapi. """ -from typing import List, Optional, Union +from typing import List, Optional, Union, Type class RedisProtocol: def publish(self, channel: str, message: bytes): ... @@ -42,3 +42,21 @@ def lazyConnection( class SubscriberFactory: def buildProtocol(self, addr): ... + +class ConnectionHandler: ... + +class RedisFactory: + continueTrying: bool + handler: RedisProtocol + def __init__( + self, + uuid: str, + dbid: Optional[int], + poolsize: int, + isLazy: bool = False, + handler: Type = ConnectionHandler, + charset: str = "utf-8", + password: Optional[str] = None, + replyTimeout: Optional[int] = None, + convertNumbers: Optional[int] = True, + ): ... diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index b323841f73..e92da7b263 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -251,10 +251,9 @@ class ReplicationCommandHandler: using TCP. """ if hs.config.redis.redis_enabled: - import txredisapi - from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, + lazyConnection, ) logger.info( @@ -271,7 +270,8 @@ class ReplicationCommandHandler: # connection after SUBSCRIBE is called). # First create the connection for sending commands. - outbound_redis_connection = txredisapi.lazyConnection( + outbound_redis_connection = lazyConnection( + reactor=hs.get_reactor(), host=hs.config.redis_host, port=hs.config.redis_port, password=hs.config.redis.redis_password, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index f225e533de..de19705c1f 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -15,7 +15,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import txredisapi @@ -228,3 +228,41 @@ class RedisDirectTcpReplicationClientFactory(txredisapi.SubscriberFactory): p.password = self.password return p + + +def lazyConnection( + reactor, + host: str = "localhost", + port: int = 6379, + dbid: Optional[int] = None, + reconnect: bool = True, + charset: str = "utf-8", + password: Optional[str] = None, + connectTimeout: Optional[int] = None, + replyTimeout: Optional[int] = None, + convertNumbers: bool = True, +) -> txredisapi.RedisProtocol: + """Equivalent to `txredisapi.lazyConnection`, except allows specifying a + reactor. + """ + + isLazy = True + poolsize = 1 + + uuid = "%s:%d" % (host, port) + factory = txredisapi.RedisFactory( + uuid, + dbid, + poolsize, + isLazy, + txredisapi.ConnectionHandler, + charset, + password, + replyTimeout, + convertNumbers, + ) + factory.continueTrying = reconnect + for x in range(poolsize): + reactor.connectTCP(host, port, factory, connectTimeout) + + return factory.handler diff --git a/tests/replication/_base.py b/tests/replication/_base.py index ae60874ec3..81ea985b9f 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -12,13 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Any, Callable, List, Optional, Tuple import attr +import hiredis from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime +from twisted.internet.protocol import Protocol from twisted.internet.task import LoopingCall from twisted.web.http import HTTPChannel @@ -27,7 +28,7 @@ from synapse.app.generic_worker import ( GenericWorkerServer, ) from synapse.http.server import JsonResource -from synapse.http.site import SynapseRequest +from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource, streams from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol @@ -197,19 +198,37 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() + # Fake in memory Redis server that servers can connect to. + self._redis_server = FakeRedisPubSubServer() + store = self.hs.get_datastore() self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" + self.reactor.lookups["localhost"] = "127.0.0.1" + + # A map from a HS instance to the associated HTTP Site to use for + # handling inbound HTTP requests to that instance. + self._hs_to_site = {self.hs: self.site} + + if self.hs.config.redis.redis_enabled: + # Handle attempts to connect to fake redis server. + self.reactor.add_tcp_client_callback( + "localhost", 6379, self.connect_any_redis_attempts, + ) - self._worker_hs_to_resource = {} + self.hs.get_tcp_replication().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). + # + # Register the master replication listener: self.reactor.add_tcp_client_callback( - "1.2.3.4", 8765, self._handle_http_replication_attempt + "1.2.3.4", + 8765, + lambda: self._handle_http_replication_attempt(self.hs, 8765), ) def create_test_json_resource(self): @@ -253,28 +272,63 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): **kwargs ) + # If the instance is in the `instance_map` config then workers may try + # and send HTTP requests to it, so we register it with + # `_handle_http_replication_attempt` like we do with the master HS. + instance_name = worker_hs.get_instance_name() + instance_loc = worker_hs.config.worker.instance_map.get(instance_name) + if instance_loc: + # Ensure the host is one that has a fake DNS entry. + if instance_loc.host not in self.reactor.lookups: + raise Exception( + "Host does not have an IP for instance_map[%r].host = %r" + % (instance_name, instance_loc.host,) + ) + + self.reactor.add_tcp_client_callback( + self.reactor.lookups[instance_loc.host], + instance_loc.port, + lambda: self._handle_http_replication_attempt( + worker_hs, instance_loc.port + ), + ) + store = worker_hs.get_datastore() store.db_pool._db_pool = self.database_pool._db_pool - repl_handler = ReplicationCommandHandler(worker_hs) - client = ClientReplicationStreamProtocol( - worker_hs, "client", "test", self.clock, repl_handler, - ) - server = self.server_factory.buildProtocol(None) + # Set up TCP replication between master and the new worker if we don't + # have Redis support enabled. + if not worker_hs.config.redis_enabled: + repl_handler = ReplicationCommandHandler(worker_hs) + client = ClientReplicationStreamProtocol( + worker_hs, "client", "test", self.clock, repl_handler, + ) + server = self.server_factory.buildProtocol(None) - client_transport = FakeTransport(server, self.reactor) - client.makeConnection(client_transport) + client_transport = FakeTransport(server, self.reactor) + client.makeConnection(client_transport) - server_transport = FakeTransport(client, self.reactor) - server.makeConnection(server_transport) + server_transport = FakeTransport(client, self.reactor) + server.makeConnection(server_transport) # Set up a resource for the worker - resource = ReplicationRestResource(self.hs) + resource = ReplicationRestResource(worker_hs) for servlet in self.servlets: servlet(worker_hs, resource) - self._worker_hs_to_resource[worker_hs] = resource + self._hs_to_site[worker_hs] = SynapseSite( + logger_name="synapse.access.http.fake", + site_tag="{}-{}".format( + worker_hs.config.server.server_name, worker_hs.get_instance_name() + ), + config=worker_hs.config.server.listeners[0], + resource=resource, + server_version_string="1", + ) + + if worker_hs.config.redis.redis_enabled: + worker_hs.get_tcp_replication().start_replication(worker_hs) return worker_hs @@ -285,7 +339,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): return config def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): - render(request, self._worker_hs_to_resource[worker_hs], self.reactor) + render(request, self._hs_to_site[worker_hs].resource, self.reactor) def replicate(self): """Tell the master side of replication that something has happened, and then @@ -294,9 +348,9 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.streamer.on_notifier_poke() self.pump() - def _handle_http_replication_attempt(self): - """Handles a connection attempt to the master replication HTTP - listener. + def _handle_http_replication_attempt(self, hs, repl_port): + """Handles a connection attempt to the given HS replication HTTP + listener on the given port. """ # We should have at least one outbound connection attempt, where the @@ -305,7 +359,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.assertGreaterEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop() self.assertEqual(host, "1.2.3.4") - self.assertEqual(port, 8765) + self.assertEqual(port, repl_port) # Set up client side protocol client_protocol = client_factory.buildProtocol(None) @@ -315,7 +369,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # Set up the server side protocol channel = _PushHTTPChannel(self.reactor) channel.requestFactory = request_factory - channel.site = self.site + channel.site = self._hs_to_site[hs] # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -333,6 +387,32 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # inside `connecTCP` before the connection has been passed back to the # code that requested the TCP connection. + def connect_any_redis_attempts(self): + """If redis is enabled we need to deal with workers connecting to a + redis server. We don't want to use a real Redis server so we use a + fake one. + """ + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "localhost") + self.assertEqual(port, 6379) + + client_protocol = client_factory.buildProtocol(None) + server_protocol = self._redis_server.buildProtocol(None) + + client_to_server_transport = FakeTransport( + server_protocol, self.reactor, client_protocol + ) + client_protocol.makeConnection(client_to_server_transport) + + server_to_client_transport = FakeTransport( + client_protocol, self.reactor, server_protocol + ) + server_protocol.makeConnection(server_to_client_transport) + + return client_to_server_transport, server_to_client_transport + class TestReplicationDataHandler(GenericWorkerReplicationHandler): """Drop-in for ReplicationDataHandler which just collects RDATA rows""" @@ -467,3 +547,105 @@ class _PullToPushProducer: pass self.stopProducing() + + +class FakeRedisPubSubServer: + """A fake Redis server for pub/sub. + """ + + def __init__(self): + self._subscribers = set() + + def add_subscriber(self, conn): + """A connection has called SUBSCRIBE + """ + self._subscribers.add(conn) + + def remove_subscriber(self, conn): + """A connection has called UNSUBSCRIBE + """ + self._subscribers.discard(conn) + + def publish(self, conn, channel, msg) -> int: + """A connection want to publish a message to subscribers. + """ + for sub in self._subscribers: + sub.send(["message", channel, msg]) + + return len(self._subscribers) + + def buildProtocol(self, addr): + return FakeRedisPubSubProtocol(self) + + +class FakeRedisPubSubProtocol(Protocol): + """A connection from a client talking to the fake Redis server. + """ + + def __init__(self, server: FakeRedisPubSubServer): + self._server = server + self._reader = hiredis.Reader() + + def dataReceived(self, data): + self._reader.feed(data) + + # We might get multiple messages in one packet. + while True: + msg = self._reader.gets() + + if msg is False: + # No more messages. + return + + if not isinstance(msg, list): + # Inbound commands should always be a list + raise Exception("Expected redis list") + + self.handle_command(msg[0], *msg[1:]) + + def handle_command(self, command, *args): + """Received a Redis command from the client. + """ + + # We currently only support pub/sub. + if command == b"PUBLISH": + channel, message = args + num_subscribers = self._server.publish(self, channel, message) + self.send(num_subscribers) + elif command == b"SUBSCRIBE": + (channel,) = args + self._server.add_subscriber(self) + self.send(["subscribe", channel, 1]) + else: + raise Exception("Unknown command") + + def send(self, msg): + """Send a message back to the client. + """ + raw = self.encode(msg).encode("utf-8") + + self.transport.write(raw) + self.transport.flush() + + def encode(self, obj): + """Encode an object to its Redis format. + + Supports: strings/bytes, integers and list/tuples. + """ + + if isinstance(obj, bytes): + # We assume bytes are just unicode strings. + obj = obj.decode("utf-8") + + if isinstance(obj, str): + return "${len}\r\n{str}\r\n".format(len=len(obj), str=obj) + if isinstance(obj, int): + return ":{val}\r\n".format(val=obj) + if isinstance(obj, (list, tuple)): + items = "".join(self.encode(a) for a in obj) + return "*{len}\r\n{items}".format(len=len(obj), items=items) + + raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj) + + def connectionLost(self, reason): + self._server.remove_subscriber(self) diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py new file mode 100644 index 0000000000..6068d14905 --- /dev/null +++ b/tests/replication/test_sharded_event_persister.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.utils import USE_POSTGRES_FOR_TESTS + +logger = logging.getLogger(__name__) + + +class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks event persisting sharding works + """ + + # Event persister sharding requires postgres (due to needing + # `MutliWriterIdGenerator`). + if not USE_POSTGRES_FOR_TESTS: + skip = "Requires Postgres" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # Register a user who sends a message that we'll get notified about + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + def default_config(self): + conf = super().default_config() + conf["redis"] = {"enabled": "true"} + conf["stream_writers"] = {"events": ["worker1", "worker2"]} + conf["instance_map"] = { + "worker1": {"host": "testserv", "port": 1001}, + "worker2": {"host": "testserv", "port": 1002}, + } + return conf + + def test_basic(self): + """Simple test to ensure that multiple rooms can be created and joined, + and that different rooms get handled by different instances. + """ + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker1"}, + ) + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker2"}, + ) + + persisted_on_1 = False + persisted_on_2 = False + + store = self.hs.get_datastore() + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Keep making new rooms until we see rooms being persisted on both + # workers. + for _ in range(10): + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room, user=self.other_user_id, tok=self.other_access_token + ) + + # The other user sends some messages + rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token) + event_id = rseponse["event_id"] + + # The event position includes which instance persisted the event. + pos = self.get_success(store.get_position_for_event(event_id)) + + persisted_on_1 |= pos.instance_name == "worker1" + persisted_on_2 |= pos.instance_name == "worker2" + + if persisted_on_1 and persisted_on_2: + break + + self.assertTrue(persisted_on_1) + self.assertTrue(persisted_on_2) diff --git a/tests/unittest.py b/tests/unittest.py index e654c0442d..82ede9de34 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -241,7 +241,7 @@ class HomeserverTestCase(TestCase): # create a site to wrap the resource. self.site = SynapseSite( logger_name="synapse.access.http.fake", - site_tag="test", + site_tag=self.hs.config.server.server_name, config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", -- cgit 1.5.1 From 62894673e69f7beb0d0a748ad01c2e95c5fed106 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 08:23:15 -0400 Subject: Allow background tasks to be run on a separate worker. (#8369) --- changelog.d/8369.feature | 1 + docs/sample_config.yaml | 5 + docs/workers.md | 17 ++ synapse/app/_base.py | 6 + synapse/app/admin_cmd.py | 1 + synapse/app/generic_worker.py | 4 + synapse/app/homeserver.py | 182 ------------------- synapse/app/phone_stats_home.py | 202 +++++++++++++++++++++ synapse/config/workers.py | 18 ++ synapse/handlers/auth.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/server.py | 17 +- synapse/storage/databases/main/__init__.py | 191 ------------------- synapse/storage/databases/main/metrics.py | 195 ++++++++++++++++++++ .../storage/databases/main/monthly_active_users.py | 109 +++++------ synapse/storage/databases/main/room.py | 24 +-- synapse/storage/databases/main/ui_auth.py | 6 +- tests/test_phone_home.py | 2 +- tests/utils.py | 2 +- 19 files changed, 537 insertions(+), 449 deletions(-) create mode 100644 changelog.d/8369.feature create mode 100644 synapse/app/phone_stats_home.py (limited to 'tests') diff --git a/changelog.d/8369.feature b/changelog.d/8369.feature new file mode 100644 index 0000000000..542993110b --- /dev/null +++ b/changelog.d/8369.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b2c1d7a737..7126ade2de 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2504,6 +2504,11 @@ opentracing: # events: worker1 # typing: worker1 +# The worker that is used to run background tasks (e.g. cleaning up expired +# data). If not provided this defaults to the main process. +# +#run_background_tasks_on: worker1 + # Configuration for Redis when using workers. This *must* be enabled when # using workers (unless using old style direct TCP configuration). diff --git a/docs/workers.md b/docs/workers.md index ad4d8ca9f2..84a9759e34 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -319,6 +319,23 @@ stream_writers: events: event_persister1 ``` +#### Background tasks + +There is also *experimental* support for moving background tasks to a separate +worker. Background tasks are run periodically or started via replication. Exactly +which tasks are configured to run depends on your Synapse configuration (e.g. if +stats is enabled). + +To enable this, the worker must have a `worker_name` and can be configured to run +background tasks. For example, to move background tasks to a dedicated worker, +the shared configuration would include: + +```yaml +run_background_tasks_on: background_worker +``` + +You might also wish to investigate the `update_user_directory` and +`media_instance_running_background_jobs` settings. ### `synapse.app.pusher` diff --git a/synapse/app/_base.py b/synapse/app/_base.py index fb476ddaf5..8bb0b142ca 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -28,6 +28,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse from synapse.app import check_bind_error +from synapse.app.phone_stats_home import start_phone_stats_home from synapse.config.server import ListenerConfig from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext @@ -274,6 +275,11 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): setup_sentry(hs) setup_sdnotify(hs) + # If background tasks are running on the main process, start collecting the + # phone home stats. + if hs.config.run_background_tasks: + start_phone_stats_home(hs) + # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 7d309b1bb0..f0d65d08d7 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -208,6 +208,7 @@ def start(config_options): # Explicitly disable background processes config.update_user_directory = False + config.run_background_tasks = False config.start_pushers = False config.send_federation = False diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index c38413c893..fc5188ce95 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -128,11 +128,13 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore +from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore +from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt @@ -454,6 +456,7 @@ class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, + StatsStore, UIAuthWorkerStore, SlavedDeviceInboxStore, SlavedDeviceStore, @@ -476,6 +479,7 @@ class GenericWorkerSlavedStore( SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, + ServerMetricsStore, SearchWorkerStore, BaseSlavedStore, ): diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index dff739e106..4ed4a2c253 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -17,14 +17,10 @@ import gc import logging -import math import os -import resource import sys from typing import Iterable -from prometheus_client import Gauge - from twisted.application import service from twisted.internet import defer, reactor from twisted.python.failure import Failure @@ -60,7 +56,6 @@ from synapse.http.server import ( from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -334,20 +329,6 @@ class SynapseHomeServer(HomeServer): logger.warning("Unrecognized listener type: %s", listener.type) -# Gauges to expose monthly active user control metrics -current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") -current_mau_by_service_gauge = Gauge( - "synapse_admin_mau_current_mau_by_service", - "Current MAU by service", - ["app_service"], -) -max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") -registered_reserved_users_mau_gauge = Gauge( - "synapse_admin_mau:registered_reserved_users", - "Registered users with reserved threepids", -) - - def setup(config_options): """ Args: @@ -389,8 +370,6 @@ def setup(config_options): except UpgradeDatabaseException as e: quit_with_error("Failed to upgrade database: %s" % (e,)) - hs.setup_master() - async def do_acme() -> bool: """ Reprovision an ACME certificate, if it's required. @@ -486,92 +465,6 @@ class SynapseService(service.Service): return self._port.stopListening() -# Contains the list of processes we will be monitoring -# currently either 0 or 1 -_stats_process = [] - - -async def phone_stats_home(hs, stats, stats_process=_stats_process): - logger.info("Gathering stats for reporting") - now = int(hs.get_clock().time()) - uptime = int(now - hs.start_time) - if uptime < 0: - uptime = 0 - - # - # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. - # - old = stats_process[0] - new = (now, resource.getrusage(resource.RUSAGE_SELF)) - stats_process[0] = new - - # Get RSS in bytes - stats["memory_rss"] = new[1].ru_maxrss - - # Get CPU time in % of a single core, not % of all cores - used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( - old[1].ru_utime + old[1].ru_stime - ) - if used_cpu_time == 0 or new[0] == old[0]: - stats["cpu_average"] = 0 - else: - stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) - - # - # General statistics - # - - stats["homeserver"] = hs.config.server_name - stats["server_context"] = hs.config.server_context - stats["timestamp"] = now - stats["uptime_seconds"] = uptime - version = sys.version_info - stats["python_version"] = "{}.{}.{}".format( - version.major, version.minor, version.micro - ) - stats["total_users"] = await hs.get_datastore().count_all_users() - - total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() - stats["total_nonbridged_users"] = total_nonbridged_users - - daily_user_type_results = await hs.get_datastore().count_daily_user_type() - for name, count in daily_user_type_results.items(): - stats["daily_user_type_" + name] = count - - room_count = await hs.get_datastore().get_room_count() - stats["total_room_count"] = room_count - - stats["daily_active_users"] = await hs.get_datastore().count_daily_users() - stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() - stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() - stats["daily_messages"] = await hs.get_datastore().count_daily_messages() - - r30_results = await hs.get_datastore().count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - - daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() - stats["daily_sent_messages"] = daily_sent_messages - stats["cache_factor"] = hs.config.caches.global_factor - stats["event_cache_size"] = hs.config.caches.event_cache_size - - # - # Database version - # - - # This only reports info about the *main* database. - stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ - stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version - - logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) - try: - await hs.get_proxied_http_client().put_json( - hs.config.report_stats_endpoint, stats - ) - except Exception as e: - logger.warning("Error reporting stats: %s", e) - - def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: @@ -597,81 +490,6 @@ def run(hs): ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) - clock = hs.get_clock() - - stats = {} - - def performance_stats_init(): - _stats_process.clear() - _stats_process.append( - (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) - ) - - def start_phone_stats_home(): - return run_as_background_process( - "phone_stats_home", phone_stats_home, hs, stats - ) - - def generate_user_daily_visit_stats(): - return run_as_background_process( - "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits - ) - - # Rather than update on per session basis, batch up the requests. - # If you increase the loop period, the accuracy of user_daily_visits - # table will decrease - clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) - - # monthly active user limiting functionality - def reap_monthly_active_users(): - return run_as_background_process( - "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users - ) - - clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) - reap_monthly_active_users() - - async def generate_monthly_active_users(): - current_mau_count = 0 - current_mau_count_by_service = {} - reserved_users = () - store = hs.get_datastore() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - current_mau_count = await store.get_monthly_active_count() - current_mau_count_by_service = ( - await store.get_monthly_active_count_by_service() - ) - reserved_users = await store.get_registered_reserved_users() - current_mau_gauge.set(float(current_mau_count)) - - for app_service, count in current_mau_count_by_service.items(): - current_mau_by_service_gauge.labels(app_service).set(float(count)) - - registered_reserved_users_mau_gauge.set(float(len(reserved_users))) - max_mau_gauge.set(float(hs.config.max_mau_value)) - - def start_generate_monthly_active_users(): - return run_as_background_process( - "generate_monthly_active_users", generate_monthly_active_users - ) - - start_generate_monthly_active_users() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) - # End of monthly active user settings - - if hs.config.report_stats: - logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) - - # We need to defer this init for the cases that we daemonize - # otherwise the process ID we get is that of the non-daemon process - clock.call_later(0, performance_stats_init) - - # We wait 5 minutes to send the first set of stats as the server can - # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home) - _base.start_reactor( "synapse-homeserver", soft_file_limit=hs.config.soft_file_limit, diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py new file mode 100644 index 0000000000..2c8e14a8c0 --- /dev/null +++ b/synapse/app/phone_stats_home.py @@ -0,0 +1,202 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import resource +import sys + +from prometheus_client import Gauge + +from synapse.metrics.background_process_metrics import run_as_background_process + +logger = logging.getLogger("synapse.app.homeserver") + +# Contains the list of processes we will be monitoring +# currently either 0 or 1 +_stats_process = [] + +# Gauges to expose monthly active user control metrics +current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") +current_mau_by_service_gauge = Gauge( + "synapse_admin_mau_current_mau_by_service", + "Current MAU by service", + ["app_service"], +) +max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids", +) + + +async def phone_stats_home(hs, stats, stats_process=_stats_process): + logger.info("Gathering stats for reporting") + now = int(hs.get_clock().time()) + uptime = int(now - hs.start_time) + if uptime < 0: + uptime = 0 + + # + # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. + # + old = stats_process[0] + new = (now, resource.getrusage(resource.RUSAGE_SELF)) + stats_process[0] = new + + # Get RSS in bytes + stats["memory_rss"] = new[1].ru_maxrss + + # Get CPU time in % of a single core, not % of all cores + used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( + old[1].ru_utime + old[1].ru_stime + ) + if used_cpu_time == 0 or new[0] == old[0]: + stats["cpu_average"] = 0 + else: + stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) + + # + # General statistics + # + + stats["homeserver"] = hs.config.server_name + stats["server_context"] = hs.config.server_context + stats["timestamp"] = now + stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) + stats["total_users"] = await hs.get_datastore().count_all_users() + + total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() + stats["total_nonbridged_users"] = total_nonbridged_users + + daily_user_type_results = await hs.get_datastore().count_daily_user_type() + for name, count in daily_user_type_results.items(): + stats["daily_user_type_" + name] = count + + room_count = await hs.get_datastore().get_room_count() + stats["total_room_count"] = room_count + + stats["daily_active_users"] = await hs.get_datastore().count_daily_users() + stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() + stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() + stats["daily_messages"] = await hs.get_datastore().count_daily_messages() + + r30_results = await hs.get_datastore().count_r30_users() + for name, count in r30_results.items(): + stats["r30_users_" + name] = count + + daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() + stats["daily_sent_messages"] = daily_sent_messages + stats["cache_factor"] = hs.config.caches.global_factor + stats["event_cache_size"] = hs.config.caches.event_cache_size + + # + # Database version + # + + # This only reports info about the *main* database. + stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ + stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version + + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) + try: + await hs.get_proxied_http_client().put_json( + hs.config.report_stats_endpoint, stats + ) + except Exception as e: + logger.warning("Error reporting stats: %s", e) + + +def start_phone_stats_home(hs): + """ + Start the background tasks which report phone home stats. + """ + clock = hs.get_clock() + + stats = {} + + def performance_stats_init(): + _stats_process.clear() + _stats_process.append( + (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) + ) + + def start_phone_stats_home(): + return run_as_background_process( + "phone_stats_home", phone_stats_home, hs, stats + ) + + def generate_user_daily_visit_stats(): + return run_as_background_process( + "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits + ) + + # Rather than update on per session basis, batch up the requests. + # If you increase the loop period, the accuracy of user_daily_visits + # table will decrease + clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) + + # monthly active user limiting functionality + def reap_monthly_active_users(): + return run_as_background_process( + "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users + ) + + clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) + reap_monthly_active_users() + + async def generate_monthly_active_users(): + current_mau_count = 0 + current_mau_count_by_service = {} + reserved_users = () + store = hs.get_datastore() + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + current_mau_count = await store.get_monthly_active_count() + current_mau_count_by_service = ( + await store.get_monthly_active_count_by_service() + ) + reserved_users = await store.get_registered_reserved_users() + current_mau_gauge.set(float(current_mau_count)) + + for app_service, count in current_mau_count_by_service.items(): + current_mau_by_service_gauge.labels(app_service).set(float(count)) + + registered_reserved_users_mau_gauge.set(float(len(reserved_users))) + max_mau_gauge.set(float(hs.config.max_mau_value)) + + def start_generate_monthly_active_users(): + return run_as_background_process( + "generate_monthly_active_users", generate_monthly_active_users + ) + + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + start_generate_monthly_active_users() + clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) + # End of monthly active user settings + + if hs.config.report_stats: + logger.info("Scheduling stats reporting for 3 hour intervals") + clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) + + # We need to defer this init for the cases that we daemonize + # otherwise the process ID we get is that of the non-daemon process + clock.call_later(0, performance_stats_init) + + # We wait 5 minutes to send the first set of stats as the server can + # be quite busy the first few minutes + clock.call_later(5 * 60, start_phone_stats_home) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f23e42cdf9..57ab097eba 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -132,6 +132,19 @@ class WorkerConfig(Config): self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events) + # Whether this worker should run background tasks or not. + # + # As a note for developers, the background tasks guarded by this should + # be able to run on only a single instance (meaning that they don't + # depend on any in-memory state of a particular worker). + # + # No effort is made to ensure only a single instance of these tasks is + # running. + background_tasks_instance = config.get("run_background_tasks_on") or "master" + self.run_background_tasks = ( + self.worker_name is None and background_tasks_instance == "master" + ) or self.worker_name == background_tasks_instance + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Workers ## @@ -167,6 +180,11 @@ class WorkerConfig(Config): #stream_writers: # events: worker1 # typing: worker1 + + # The worker that is used to run background tasks (e.g. cleaning up expired + # data). If not provided this defaults to the main process. + # + #run_background_tasks_on: worker1 """ def read_arguments(self, args): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 00eae92052..7c4b716b28 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -212,7 +212,7 @@ class AuthHandler(BaseHandler): self._clock = self.hs.get_clock() # Expire old UI auth sessions after a period of time. - if hs.config.worker_app is None: + if hs.config.run_background_tasks: self._clock.looping_call( run_as_background_process, 5 * 60 * 1000, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 249ffe2a55..dc62b21c06 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -49,7 +49,7 @@ class StatsHandler: # Guard to ensure we only process deltas one at a time self._is_processing = False - if hs.config.stats_enabled: + if self.stats_enabled and hs.config.run_background_tasks: self.notifier.add_replication_callback(self.notify_new_event) # We kick this off so that we don't have to wait for a change before diff --git a/synapse/server.py b/synapse/server.py index 5e3752c333..aa2273955c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -185,7 +185,10 @@ class HomeServer(metaclass=abc.ABCMeta): we are listening on to provide HTTP services. """ - REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] + REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "auth", + "stats", + ] # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be @@ -251,14 +254,20 @@ class HomeServer(metaclass=abc.ABCMeta): self.datastores = Databases(self.DATASTORE_CLASS, self) logger.info("Finished setting up.") - def setup_master(self) -> None: + # Register background tasks required by this server. This must be done + # somewhat manually due to the background tasks not being registered + # unless handlers are instantiated. + if self.config.run_background_tasks: + self.setup_background_tasks() + + def setup_background_tasks(self) -> None: """ Some handlers have side effects on instantiation (like registering background updates). This function causes them to be fetched, and therefore instantiated, to run those side effects. """ - for i in self.REQUIRED_ON_MASTER_STARTUP: - getattr(self, "get_" + i)() + for i in self.REQUIRED_ON_BACKGROUND_TASK_STARTUP: + getattr(self, "get_" + i + "_handler")() def get_reactor(self) -> twisted.internet.base.ReactorBase: """ diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0cb12f4c61..f823d66709 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,9 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import calendar import logging -import time from typing import Any, Dict, List, Optional, Tuple from synapse.api.constants import PresenceState @@ -268,9 +266,6 @@ class DataStore( self._stream_order_on_start = self.get_room_max_stream_ordering() self._min_stream_order_on_start = self.get_room_min_stream_ordering() - # Used in _generate_user_daily_visits to keep track of progress - self._last_user_visit_update = self._get_start_of_day() - def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() @@ -301,192 +296,6 @@ class DataStore( return [UserPresenceState(**row) for row in rows] - async def count_daily_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 24 hours. - """ - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) - return await self.db_pool.runInteraction( - "count_daily_users", self._count_users, yesterday - ) - - async def count_monthly_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 30 days. - Note this method is intended for phonehome metrics only and is different - from the mau figure in synapse.storage.monthly_active_users which, - amongst other things, includes a 3 day grace period before a user counts. - """ - thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) - return await self.db_pool.runInteraction( - "count_monthly_users", self._count_users, thirty_days_ago - ) - - def _count_users(self, txn, time_from): - """ - Returns number of users seen in the past time_from period - """ - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT user_id FROM user_ips - WHERE last_seen > ? - GROUP BY user_id - ) u - """ - txn.execute(sql, (time_from,)) - (count,) = txn.fetchone() - return count - - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn): - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = txn.fetchone() - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - - def _get_start_of_day(self): - """ - Returns millisecond unixtime for start of UTC day. - """ - now = time.gmtime() - today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) - return today_start * 1000 - - async def generate_user_daily_visits(self) -> None: - """ - Generates daily visit data for use in cohort/ retention analysis - """ - - def _generate_user_daily_visits(txn): - logger.info("Calling _generate_user_daily_visits") - today_start = self._get_start_of_day() - a_day_in_milliseconds = 24 * 60 * 60 * 1000 - now = self.clock.time_msec() - - sql = """ - INSERT INTO user_daily_visits (user_id, device_id, timestamp) - SELECT u.user_id, u.device_id, ? - FROM user_ips AS u - LEFT JOIN ( - SELECT user_id, device_id, timestamp FROM user_daily_visits - WHERE timestamp = ? - ) udv - ON u.user_id = udv.user_id AND u.device_id=udv.device_id - INNER JOIN users ON users.name=u.user_id - WHERE last_seen > ? AND last_seen <= ? - AND udv.timestamp IS NULL AND users.is_guest=0 - AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id - """ - - # This means that the day has rolled over but there could still - # be entries from the previous day. There is an edge case - # where if the user logs in at 23:59 and overwrites their - # last_seen at 00:01 then they will not be counted in the - # previous day's stats - it is important that the query is run - # often to minimise this case. - if today_start > self._last_user_visit_update: - yesterday_start = today_start - a_day_in_milliseconds - txn.execute( - sql, - ( - yesterday_start, - yesterday_start, - self._last_user_visit_update, - today_start, - ), - ) - self._last_user_visit_update = today_start - - txn.execute( - sql, (today_start, today_start, self._last_user_visit_update, now) - ) - # Update _last_user_visit_update to now. The reason to do this - # rather just clamping to the beginning of the day is to limit - # the size of the join - meaning that the query can be run more - # frequently - self._last_user_visit_update = now - - await self.db_pool.runInteraction( - "generate_user_daily_visits", _generate_user_daily_visits - ) - async def get_users(self) -> List[Dict[str, Any]]: """Function to retrieve a list of users in users table. diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 92099f95ce..2c5a4fdbf6 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -12,6 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import calendar +import logging +import time +from typing import Dict from synapse.metrics import GaugeBucketCollector from synapse.metrics.background_process_metrics import run_as_background_process @@ -21,6 +25,8 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) +logger = logging.getLogger(__name__) + # Collect metrics on the number of forward extremities that exist. _extremities_collecter = GaugeBucketCollector( "synapse_forward_extremities", @@ -60,6 +66,9 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + # Used in _generate_user_daily_visits to keep track of progress + self._last_user_visit_update = self._get_start_of_day() + async def _read_forward_extremities(self): def fetch(txn): txn.execute( @@ -137,3 +146,189 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): return count return await self.db_pool.runInteraction("count_daily_active_rooms", _count) + + async def count_daily_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 24 hours. + """ + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + return await self.db_pool.runInteraction( + "count_daily_users", self._count_users, yesterday + ) + + async def count_monthly_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 30 days. + Note this method is intended for phonehome metrics only and is different + from the mau figure in synapse.storage.monthly_active_users which, + amongst other things, includes a 3 day grace period before a user counts. + """ + thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + return await self.db_pool.runInteraction( + "count_monthly_users", self._count_users, thirty_days_ago + ) + + def _count_users(self, txn, time_from): + """ + Returns number of users seen in the past time_from period + """ + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT user_id FROM user_ips + WHERE last_seen > ? + GROUP BY user_id + ) u + """ + txn.execute(sql, (time_from,)) + (count,) = txn.fetchone() + return count + + async def count_r30_users(self) -> Dict[str, int]: + """ + Counts the number of 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days ago + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days apart + + Returns: + A mapping of counts globally as well as broken out by platform. + """ + + def _count_r30_users(txn): + thirty_days_in_secs = 86400 * 30 + now = int(self._clock.time()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + + sql = """ + SELECT platform, COALESCE(count(*), 0) FROM ( + SELECT + users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen, + CASE + WHEN user_agent LIKE '%%Android%%' THEN 'android' + WHEN user_agent LIKE '%%iOS%%' THEN 'ios' + WHEN user_agent LIKE '%%Electron%%' THEN 'electron' + WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' + WHEN user_agent LIKE '%%Gecko%%' THEN 'web' + ELSE 'unknown' + END + AS platform + FROM user_ips + ) uip + ON users.name = uip.user_id + AND users.appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, platform, users.creation_ts + ) u GROUP BY platform + """ + + results = {} + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + for row in txn: + if row[0] == "unknown": + pass + results[row[0]] = row[1] + + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT users.name, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen + FROM user_ips + ) uip + ON users.name = uip.user_id + AND appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, users.creation_ts + ) u + """ + + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + (count,) = txn.fetchone() + results["all"] = count + + return results + + return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) + + def _get_start_of_day(self): + """ + Returns millisecond unixtime for start of UTC day. + """ + now = time.gmtime() + today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) + return today_start * 1000 + + async def generate_user_daily_visits(self) -> None: + """ + Generates daily visit data for use in cohort/ retention analysis + """ + + def _generate_user_daily_visits(txn): + logger.info("Calling _generate_user_daily_visits") + today_start = self._get_start_of_day() + a_day_in_milliseconds = 24 * 60 * 60 * 1000 + now = self._clock.time_msec() + + sql = """ + INSERT INTO user_daily_visits (user_id, device_id, timestamp) + SELECT u.user_id, u.device_id, ? + FROM user_ips AS u + LEFT JOIN ( + SELECT user_id, device_id, timestamp FROM user_daily_visits + WHERE timestamp = ? + ) udv + ON u.user_id = udv.user_id AND u.device_id=udv.device_id + INNER JOIN users ON users.name=u.user_id + WHERE last_seen > ? AND last_seen <= ? + AND udv.timestamp IS NULL AND users.is_guest=0 + AND users.appservice_id IS NULL + GROUP BY u.user_id, u.device_id + """ + + # This means that the day has rolled over but there could still + # be entries from the previous day. There is an edge case + # where if the user logs in at 23:59 and overwrites their + # last_seen at 00:01 then they will not be counted in the + # previous day's stats - it is important that the query is run + # often to minimise this case. + if today_start > self._last_user_visit_update: + yesterday_start = today_start - a_day_in_milliseconds + txn.execute( + sql, + ( + yesterday_start, + yesterday_start, + self._last_user_visit_update, + today_start, + ), + ) + self._last_user_visit_update = today_start + + txn.execute( + sql, (today_start, today_start, self._last_user_visit_update, now) + ) + # Update _last_user_visit_update to now. The reason to do this + # rather just clamping to the beginning of the day is to limit + # the size of the join - meaning that the query can be run more + # frequently + self._last_user_visit_update = now + + await self.db_pool.runInteraction( + "generate_user_daily_visits", _generate_user_daily_visits + ) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e93aad33cd..b2127598ef 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -32,6 +32,9 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs + self._limit_usage_by_mau = hs.config.limit_usage_by_mau + self._max_mau_value = hs.config.max_mau_value + @cached(num_args=0) async def get_monthly_active_count(self) -> int: """Generates current count of monthly active users @@ -124,60 +127,6 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): desc="user_last_seen_monthly_active", ) - -class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - - self._limit_usage_by_mau = hs.config.limit_usage_by_mau - self._mau_stats_only = hs.config.mau_stats_only - self._max_mau_value = hs.config.max_mau_value - - # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( - self.db_pool.new_transaction( - db_conn, - "initialise_mau_threepids", - [], - [], - self._initialise_reserved_users, - hs.config.mau_limits_reserved_threepids[: self._max_mau_value], - ) - - def _initialise_reserved_users(self, txn, threepids): - """Ensures that reserved threepids are accounted for in the MAU table, should - be called on start up. - - Args: - txn (cursor): - threepids (list[dict]): List of threepid dicts to reserve - """ - - # XXX what is this function trying to achieve? It upserts into - # monthly_active_users for each *registered* reserved mau user, but why? - # - # - shouldn't there already be an entry for each reserved user (at least - # if they have been active recently)? - # - # - if it's important that the timestamp is kept up to date, why do we only - # run this at startup? - - for tp in threepids: - user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) - - if user_id: - is_support = self.is_support_user_txn(txn, user_id) - if not is_support: - # We do this manually here to avoid hitting #6791 - self.db_pool.simple_upsert_txn( - txn, - table="monthly_active_users", - keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, - ) - else: - logger.warning("mau limit reserved threepid %s not found in db" % tp) - async def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. @@ -257,6 +206,58 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): "reap_monthly_active_users", _reap_users, reserved_users ) + +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self._mau_stats_only = hs.config.mau_stats_only + + # Do not add more reserved users than the total allowable number + # cur = LoggingTransaction( + self.db_pool.new_transaction( + db_conn, + "initialise_mau_threepids", + [], + [], + self._initialise_reserved_users, + hs.config.mau_limits_reserved_threepids[: self._max_mau_value], + ) + + def _initialise_reserved_users(self, txn, threepids): + """Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve + """ + + # XXX what is this function trying to achieve? It upserts into + # monthly_active_users for each *registered* reserved mau user, but why? + # + # - shouldn't there already be an entry for each reserved user (at least + # if they have been active recently)? + # + # - if it's important that the timestamp is kept up to date, why do we only + # run this at startup? + + for tp in threepids: + user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) + + if user_id: + is_support = self.is_support_user_txn(txn, user_id) + if not is_support: + # We do this manually here to avoid hitting #6791 + self.db_pool.simple_upsert_txn( + txn, + table="monthly_active_users", + keyvalues={"user_id": user_id}, + values={"timestamp": int(self._clock.time_msec())}, + ) + else: + logger.warning("mau limit reserved threepid %s not found in db" % tp) + async def upsert_monthly_active_user(self, user_id: str) -> None: """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3c7630857f..c0f2af0785 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -192,6 +192,18 @@ class RoomWorkerStore(SQLBaseStore): "count_public_rooms", _count_public_rooms_txn ) + async def get_room_count(self) -> int: + """Retrieve the total number of rooms. + """ + + def f(txn): + sql = "SELECT count(*) FROM rooms" + txn.execute(sql) + row = txn.fetchone() + return row[0] or 0 + + return await self.db_pool.runInteraction("get_rooms", f) + async def get_largest_public_rooms( self, network_tuple: Optional[ThirdPartyInstanceID], @@ -1292,18 +1304,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): ) self.hs.get_notifier().on_new_replication_data() - async def get_room_count(self) -> int: - """Retrieve the total number of rooms. - """ - - def f(txn): - sql = "SELECT count(*) FROM rooms" - txn.execute(sql) - row = txn.fetchone() - return row[0] or 0 - - return await self.db_pool.runInteraction("get_rooms", f) - async def add_event_report( self, room_id: str, diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 3b9211a6d2..79b7ece330 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -288,8 +288,6 @@ class UIAuthWorkerStore(SQLBaseStore): ) return [(row["user_agent"], row["ip"]) for row in rows] - -class UIAuthStore(UIAuthWorkerStore): async def delete_old_ui_auth_sessions(self, expiration_time: int) -> None: """ Remove sessions which were last used earlier than the expiration time. @@ -339,3 +337,7 @@ class UIAuthStore(UIAuthWorkerStore): iterable=session_ids, keyvalues={}, ) + + +class UIAuthStore(UIAuthWorkerStore): + pass diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index 7657bddea5..e7aed092c2 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -17,7 +17,7 @@ import resource import mock -from synapse.app.homeserver import phone_stats_home +from synapse.app.phone_stats_home import phone_stats_home from tests.unittest import HomeserverTestCase diff --git a/tests/utils.py b/tests/utils.py index 4673872f88..7a927c7f74 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -276,7 +276,7 @@ def setup_test_homeserver( hs.setup() if homeserverToUse.__name__ == "TestHomeServer": - hs.setup_master() + hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): database = hs.get_datastores().databases[0] -- cgit 1.5.1 From ec10bdd32bb52af73789f5f60b39135578a739b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:09:31 +0100 Subject: Speed up unit tests when using PostgreSQL (#8450) --- changelog.d/8450.misc | 1 + synapse/storage/databases/main/events_worker.py | 13 ++++++++++++- tests/server.py | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8450.misc (limited to 'tests') diff --git a/changelog.d/8450.misc b/changelog.d/8450.misc new file mode 100644 index 0000000000..4e04c523ab --- /dev/null +++ b/changelog.d/8450.misc @@ -0,0 +1 @@ +Speed up unit tests when using PostgreSQL. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f95679ebc4..723ced4ff0 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -74,6 +74,13 @@ class EventRedactBehaviour(Names): class EventsWorkerStore(SQLBaseStore): + # Whether to use dedicated DB threads for event fetching. This is only used + # if there are multiple DB threads available. When used will lock the DB + # thread for periods of time (so unit tests want to disable this when they + # run DB transactions on the main thread). See EVENT_QUEUE_* for more + # options controlling this. + USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True + def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -522,7 +529,11 @@ class EventsWorkerStore(SQLBaseStore): if not event_list: single_threaded = self.database_engine.single_threaded - if single_threaded or i > EVENT_QUEUE_ITERATIONS: + if ( + not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING + or single_threaded + or i > EVENT_QUEUE_ITERATIONS + ): self._event_fetch_ongoing -= 1 return else: diff --git a/tests/server.py b/tests/server.py index b404ad4e2a..f7f5276b21 100644 --- a/tests/server.py +++ b/tests/server.py @@ -372,6 +372,10 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): pool.threadpool = ThreadPool(clock._reactor) pool.running = True + # We've just changed the Databases to run DB transactions on the same + # thread, so we need to disable the dedicated thread behaviour. + server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False + return server -- cgit 1.5.1 From e3debf9682ed59b2972f236fe2982b6af0a9bb9a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:20:45 +0100 Subject: Add logging on startup/shutdown (#8448) This is so we can tell what is going on when things are taking a while to start up. The main change here is to ensure that transactions that are created during startup get correctly logged like normal transactions. --- changelog.d/8448.misc | 1 + scripts/synapse_port_db | 2 +- synapse/app/_base.py | 5 ++ synapse/storage/database.py | 89 ++++++++++++++++++---- synapse/storage/databases/__init__.py | 2 +- synapse/storage/databases/main/__init__.py | 1 - .../storage/databases/main/event_push_actions.py | 8 +- .../storage/databases/main/monthly_active_users.py | 1 - synapse/storage/databases/main/roommember.py | 13 +--- .../databases/main/schema/delta/20/pushers.py | 19 +++-- .../storage/databases/main/schema/delta/25/fts.py | 2 - .../storage/databases/main/schema/delta/27/ts.py | 2 - .../databases/main/schema/delta/30/as_users.py | 6 +- .../databases/main/schema/delta/31/pushers.py | 19 +++-- .../main/schema/delta/31/search_update.py | 2 - .../databases/main/schema/delta/33/event_fields.py | 2 - .../main/schema/delta/33/remote_media_ts.py | 5 +- .../schema/delta/56/unique_user_filter_index.py | 7 +- .../schema/delta/57/local_current_membership.py | 1 - synapse/storage/prepare_database.py | 33 +++----- synapse/storage/types.py | 6 ++ synapse/storage/util/id_generators.py | 8 +- synapse/storage/util/sequence.py | 15 +++- tests/storage/test_appservice.py | 14 ++-- tests/utils.py | 2 + 25 files changed, 152 insertions(+), 113 deletions(-) create mode 100644 changelog.d/8448.misc (limited to 'tests') diff --git a/changelog.d/8448.misc b/changelog.d/8448.misc new file mode 100644 index 0000000000..5ddda1803b --- /dev/null +++ b/changelog.d/8448.misc @@ -0,0 +1 @@ +Add SQL logging on queries that happen during startup. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index ae2887b7d2..7e12f5440c 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -489,7 +489,7 @@ class Porter(object): hs = MockHomeserver(self.hs_config) - with make_conn(db_config, engine) as db_conn: + with make_conn(db_config, engine, "portdb") as db_conn: engine.check_database( db_conn, allow_outdated_version=allow_outdated_version ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 8bb0b142ca..f6f7b2bf42 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -272,6 +272,11 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() + # Log when we start the shut down process. + hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", logger.info, "Shutting down..." + ) + setup_sentry(hs) setup_sdnotify(hs) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 79ec8f119d..0d9d9b7cc0 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -32,6 +32,7 @@ from typing import ( overload, ) +import attr from prometheus_client import Histogram from typing_extensions import Literal @@ -90,13 +91,17 @@ def make_pool( return adbapi.ConnectionPool( db_config.config["name"], cp_reactor=reactor, - cp_openfun=engine.on_new_connection, + cp_openfun=lambda conn: engine.on_new_connection( + LoggingDatabaseConnection(conn, engine, "on_new_connection") + ), **db_config.config.get("args", {}) ) def make_conn( - db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine + db_config: DatabaseConnectionConfig, + engine: BaseDatabaseEngine, + default_txn_name: str, ) -> Connection: """Make a new connection to the database and return it. @@ -109,11 +114,60 @@ def make_conn( for k, v in db_config.config.get("args", {}).items() if not k.startswith("cp_") } - db_conn = engine.module.connect(**db_params) + native_db_conn = engine.module.connect(**db_params) + db_conn = LoggingDatabaseConnection(native_db_conn, engine, default_txn_name) + engine.on_new_connection(db_conn) return db_conn +@attr.s(slots=True) +class LoggingDatabaseConnection: + """A wrapper around a database connection that returns `LoggingTransaction` + as its cursor class. + + This is mainly used on startup to ensure that queries get logged correctly + """ + + conn = attr.ib(type=Connection) + engine = attr.ib(type=BaseDatabaseEngine) + default_txn_name = attr.ib(type=str) + + def cursor( + self, *, txn_name=None, after_callbacks=None, exception_callbacks=None + ) -> "LoggingTransaction": + if not txn_name: + txn_name = self.default_txn_name + + return LoggingTransaction( + self.conn.cursor(), + name=txn_name, + database_engine=self.engine, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, + ) + + def close(self) -> None: + self.conn.close() + + def commit(self) -> None: + self.conn.commit() + + def rollback(self, *args, **kwargs) -> None: + self.conn.rollback(*args, **kwargs) + + def __enter__(self) -> "Connection": + self.conn.__enter__() + return self + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + return self.conn.__exit__(exc_type, exc_value, traceback) + + # Proxy through any unknown lookups to the DB conn class. + def __getattr__(self, name): + return getattr(self.conn, name) + + # The type of entry which goes on our after_callbacks and exception_callbacks lists. # # Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so @@ -247,6 +301,12 @@ class LoggingTransaction: def close(self) -> None: self.txn.close() + def __enter__(self) -> "LoggingTransaction": + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + class PerformanceCounters: def __init__(self): @@ -395,7 +455,7 @@ class DatabasePool: def new_transaction( self, - conn: Connection, + conn: LoggingDatabaseConnection, desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], @@ -418,12 +478,10 @@ class DatabasePool: i = 0 N = 5 while True: - cursor = LoggingTransaction( - conn.cursor(), - name, - self.engine, - after_callbacks, - exception_callbacks, + cursor = conn.cursor( + txn_name=name, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, ) try: r = func(cursor, *args, **kwargs) @@ -584,7 +642,10 @@ class DatabasePool: logger.debug("Reconnecting closed database connection") conn.reconnect() - return func(conn, *args, **kwargs) + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) @@ -1621,7 +1682,7 @@ class DatabasePool: def get_cache_dict( self, - db_conn: Connection, + db_conn: LoggingDatabaseConnection, table: str, entity_column: str, stream_column: str, @@ -1642,9 +1703,7 @@ class DatabasePool: "limit": limit, } - sql = self.engine.convert_param_style(sql) - - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="get_cache_dict") txn.execute(sql, (int(max_value),)) cache = {row[0]: int(row[1]) for row in txn} diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index aa5d490624..0c24325011 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -46,7 +46,7 @@ class Databases: db_name = database_config.name engine = create_engine(database_config.config) - with make_conn(database_config, engine) as db_conn: + with make_conn(database_config, engine, "startup") as db_conn: logger.info("[database config %r]: Checking database server", db_name) engine.check_database(db_conn) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index f823d66709..9b16f45f3e 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -284,7 +284,6 @@ class DataStore( " last_user_sync_ts, status_msg, currently_active FROM presence_stream" " WHERE state != ?" ) - sql = self.database_engine.convert_param_style(sql) txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 62f1738732..80f3b4d740 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -20,7 +20,7 @@ from typing import Dict, List, Optional, Tuple, Union import attr from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json +from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -74,11 +74,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): self.stream_ordering_month_ago = None self.stream_ordering_day_ago = None - cur = LoggingTransaction( - db_conn.cursor(), - name="_find_stream_orderings_for_times_txn", - database_engine=self.database_engine, - ) + cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn") self._find_stream_orderings_for_times_txn(cur) cur.close() diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index b2127598ef..c66f558567 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -214,7 +214,6 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): self._mau_stats_only = hs.config.mau_stats_only # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( self.db_pool.new_transaction( db_conn, "initialise_mau_threepids", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 86ffe2479e..bae1bd22d3 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -21,12 +21,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import ( - LoggingTransaction, - SQLBaseStore, - db_to_json, - make_in_list_sql_clause, -) +from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine @@ -60,10 +55,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): # background update still running? self._current_state_events_membership_up_to_date = False - txn = LoggingTransaction( - db_conn.cursor(), - name="_check_safe_current_state_events_membership_updated", - database_engine=self.database_engine, + txn = db_conn.cursor( + txn_name="_check_safe_current_state_events_membership_updated" ) self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/databases/main/schema/delta/20/pushers.py index 3edfcfd783..45b846e6a7 100644 --- a/synapse/storage/databases/main/schema/delta/20/pushers.py +++ b/synapse/storage/databases/main/schema/delta/20/pushers.py @@ -66,16 +66,15 @@ def run_create(cur, database_engine, *args, **kwargs): row[8] = bytes(row[8]).decode("utf-8") row[11] = bytes(row[11]).decode("utf-8") cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_token, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_token, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/databases/main/schema/delta/25/fts.py index ee675e71ff..21f57825d4 100644 --- a/synapse/storage/databases/main/schema/delta/25/fts.py +++ b/synapse/storage/databases/main/schema/delta/25/fts.py @@ -71,8 +71,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/databases/main/schema/delta/27/ts.py index b7972cfa8e..1c6058063f 100644 --- a/synapse/storage/databases/main/schema/delta/27/ts.py +++ b/synapse/storage/databases/main/schema/delta/27/ts.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_origin_server_ts", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/databases/main/schema/delta/30/as_users.py index b42c02710a..7f08fabe9f 100644 --- a/synapse/storage/databases/main/schema/delta/30/as_users.py +++ b/synapse/storage/databases/main/schema/delta/30/as_users.py @@ -59,9 +59,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): user_chunks = (user_ids[i : i + 100] for i in range(0, len(user_ids), n)) for chunk in user_chunks: cur.execute( - database_engine.convert_param_style( - "UPDATE users SET appservice_id = ? WHERE name IN (%s)" - % (",".join("?" for _ in chunk),) - ), + "UPDATE users SET appservice_id = ? WHERE name IN (%s)" + % (",".join("?" for _ in chunk),), [as_id] + chunk, ) diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/databases/main/schema/delta/31/pushers.py index 9bb504aad5..5be81c806a 100644 --- a/synapse/storage/databases/main/schema/delta/31/pushers.py +++ b/synapse/storage/databases/main/schema/delta/31/pushers.py @@ -65,16 +65,15 @@ def run_create(cur, database_engine, *args, **kwargs): row = list(row) row[12] = token_to_stream_ordering(row[12]) cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_stream_ordering, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_stream_ordering, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/databases/main/schema/delta/31/search_update.py index 63b757ade6..b84c844e3a 100644 --- a/synapse/storage/databases/main/schema/delta/31/search_update.py +++ b/synapse/storage/databases/main/schema/delta/31/search_update.py @@ -55,8 +55,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search_order", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/databases/main/schema/delta/33/event_fields.py index a3e81eeac7..e928c66a8f 100644 --- a/synapse/storage/databases/main/schema/delta/33/event_fields.py +++ b/synapse/storage/databases/main/schema/delta/33/event_fields.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_fields_sender_url", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py index a26057dfb6..ad875c733a 100644 --- a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py +++ b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py @@ -23,8 +23,5 @@ def run_create(cur, database_engine, *args, **kwargs): def run_upgrade(cur, database_engine, *args, **kwargs): cur.execute( - database_engine.convert_param_style( - "UPDATE remote_media_cache SET last_access_ts = ?" - ), - (int(time.time() * 1000),), + "UPDATE remote_media_cache SET last_access_ts = ?", (int(time.time() * 1000),), ) diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py index 1de8b54961..bb7296852a 100644 --- a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py +++ b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py @@ -1,6 +1,8 @@ import logging +from io import StringIO from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import execute_statements_from_stream logger = logging.getLogger(__name__) @@ -46,7 +48,4 @@ def run_create(cur, database_engine, *args, **kwargs): select_clause, ) - if isinstance(database_engine, PostgresEngine): - cur.execute(sql) - else: - cur.executescript(sql) + execute_statements_from_stream(cur, StringIO(sql)) diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py index 63b5acdcf7..44917f0a2e 100644 --- a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py @@ -68,7 +68,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): INNER JOIN room_memberships AS r USING (event_id) WHERE type = 'm.room.member' AND state_key LIKE ? """ - sql = database_engine.convert_param_style(sql) cur.execute(sql, ("%:" + config.server_name,)) cur.execute( diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 4957e77f4c..459754feab 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import imp import logging import os @@ -24,9 +23,10 @@ from typing import Optional, TextIO import attr from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Cursor from synapse.types import Collection logger = logging.getLogger(__name__) @@ -67,7 +67,7 @@ UNAPPLIED_DELTA_ON_WORKER_ERROR = ( def prepare_database( - db_conn: Connection, + db_conn: LoggingDatabaseConnection, database_engine: BaseDatabaseEngine, config: Optional[HomeServerConfig], databases: Collection[str] = ["main", "state"], @@ -89,7 +89,7 @@ def prepare_database( """ try: - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="prepare_database") # sqlite does not automatically start transactions for DDL / SELECT statements, # so we start one before running anything. This ensures that any upgrades @@ -258,9 +258,7 @@ def _setup_new_database(cur, database_engine, databases): executescript(cur, entry.absolute_path) cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (max_current_ver, False), ) @@ -486,17 +484,13 @@ def _upgrade_existing_database( # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)" - ), + "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)", (v, relative_path), ) cur.execute("DELETE FROM schema_version") cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (v, True), ) @@ -532,10 +526,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) schemas to be applied """ cur.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_module_schemas WHERE module_name = ?" - ), - (modname,), + "SELECT file FROM applied_module_schemas WHERE module_name = ?", (modname,), ) applied_deltas = {d for d, in cur} for (name, stream) in names_and_streams: @@ -553,9 +544,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)" - ), + "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)", (modname, name), ) @@ -627,9 +616,7 @@ def _get_or_create_schema_state(txn, database_engine): if current_version: txn.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_schema_deltas WHERE version >= ?" - ), + "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,), ) applied_deltas = [d for d, in txn] diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 2d2b560e74..970bb1b9da 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -61,3 +61,9 @@ class Connection(Protocol): def rollback(self, *args, **kwargs) -> None: ... + + def __enter__(self) -> "Connection": + ... + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + ... diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index c92cd4a6ba..51f680d05d 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -54,7 +54,7 @@ def _load_current_id(db_conn, table, column, step=1): """ # debug logging for https://github.com/matrix-org/synapse/issues/7968 logger.info("initialising stream generator for %s(%s)", table, column) - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_id") if step == 1: cur.execute("SELECT MAX(%s) FROM %s" % (column, table)) else: @@ -269,7 +269,7 @@ class MultiWriterIdGenerator: def _load_current_ids( self, db_conn, table: str, instance_column: str, id_column: str ): - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_ids") # Load the current positions of all writers for the stream. if self._writers: @@ -283,15 +283,12 @@ class MultiWriterIdGenerator: stream_name = ? AND instance_name != ALL(?) """ - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (self._stream_name, self._writers)) sql = """ SELECT instance_name, stream_id FROM stream_positions WHERE stream_name = ? """ - sql = self._db.engine.convert_param_style(sql) - cur.execute(sql, (self._stream_name,)) self._current_positions = { @@ -340,7 +337,6 @@ class MultiWriterIdGenerator: "instance": instance_column, "cmp": "<=" if self._positive else ">=", } - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (min_stream_id * self._return_factor,)) self._persisted_upto_position = min_stream_id diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 2dd95e2709..ff2d038ad2 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -17,6 +17,7 @@ import logging import threading from typing import Callable, List, Optional +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -53,7 +54,11 @@ class SequenceGenerator(metaclass=abc.ABCMeta): @abc.abstractmethod def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): """Should be called during start up to test that the current value of the sequence is greater than or equal to the maximum ID in the table. @@ -82,9 +87,13 @@ class PostgresSequenceGenerator(SequenceGenerator): return [i for (i,) in txn] def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="sequence.check_consistency") # First we get the current max ID from the table. table_sql = "SELECT GREATEST(%(agg)s(%(id)s), 0) FROM %(table)s" % { diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 46f94914ff..c905a38930 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -58,7 +58,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): # must be done after inserts database = hs.get_datastores().databases[0] self.store = ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) def tearDown(self): @@ -132,7 +132,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): db_config = hs.config.get_single_database() self.store = TestTransactionStore( - database, make_conn(db_config, self.engine), hs + database, make_conn(db_config, self.engine, "test"), hs ) def _add_service(self, url, as_token, id): @@ -448,7 +448,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) @defer.inlineCallbacks @@ -467,7 +467,9 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception @@ -491,7 +493,9 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception diff --git a/tests/utils.py b/tests/utils.py index 7a927c7f74..af563ffe0f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -38,6 +38,7 @@ from synapse.http.server import HttpServer from synapse.logging.context import current_context, set_current_context from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import PostgresEngine, create_engine from synapse.storage.prepare_database import prepare_database from synapse.util.ratelimitutils import FederationRateLimiter @@ -88,6 +89,7 @@ def setupdb(): host=POSTGRES_HOST, password=POSTGRES_PASSWORD, ) + db_conn = LoggingDatabaseConnection(db_conn, db_engine, "tests") prepare_database(db_conn, db_engine, None) db_conn.close() -- cgit 1.5.1 From 0991a2da93b6b2010e6ef8f732ffdc3b5b382bab Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 5 Oct 2020 14:57:46 +0100 Subject: Allow ThirdPartyEventRules modules to manipulate public room state (#8292) This PR allows `ThirdPartyEventRules` modules to view, manipulate and block changes to the state of whether a room is published in the public rooms directory. While the idea of whether a room is in the public rooms list is not kept within an event in the room, `ThirdPartyEventRules` generally deal with controlling which modifications can happen to a room. Public rooms fits within that idea, even if its toggle state isn't controlled through a state event. --- UPGRADE.rst | 17 +++++++++ changelog.d/8292.feature | 1 + synapse/events/third_party_rules.py | 51 +++++++++++++++++++++++--- synapse/handlers/directory.py | 10 +++++ synapse/handlers/room.py | 9 +++++ synapse/module_api/__init__.py | 67 ++++++++++++++++++++++++++++++++++ tests/module_api/test_api.py | 56 +++++++++++++++++++++++++++- tests/rest/client/third_party_rules.py | 31 ++++++++++------ 8 files changed, 223 insertions(+), 19 deletions(-) create mode 100644 changelog.d/8292.feature (limited to 'tests') diff --git a/UPGRADE.rst b/UPGRADE.rst index 49e86e628f..5a68312217 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -75,6 +75,23 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.22.0 +==================== + +ThirdPartyEventRules breaking changes +------------------------------------- + +This release introduces a backwards-incompatible change to modules making use of +``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the +``third_party_event_rules`` config option, please make sure it is updated to handle +the below change: + +The ``http_client`` argument is no longer passed to modules as they are initialised. Instead, +modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class. +Modules are now passed a ``module_api`` argument during initialisation, which is an instance of +``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as +the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules. + Upgrading to v1.21.0 ==================== diff --git a/changelog.d/8292.feature b/changelog.d/8292.feature new file mode 100644 index 0000000000..6d0335e2c8 --- /dev/null +++ b/changelog.d/8292.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9d5310851c..fed459198a 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -12,10 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.types import Requester +from synapse.module_api import ModuleApi +from synapse.types import Requester, StateMap class ThirdPartyEventRules: @@ -38,7 +40,7 @@ class ThirdPartyEventRules: if module is not None: self.third_party_rules = module( - config=config, http_client=hs.get_simple_http_client() + config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), ) async def check_event_allowed( @@ -106,6 +108,46 @@ class ThirdPartyEventRules: if self.third_party_rules is None: return True + state_events = await self._get_state_map_for_room(room_id) + + ret = await self.third_party_rules.check_threepid_can_be_invited( + medium, address, state_events + ) + return ret + + async def check_visibility_can_be_modified( + self, room_id: str, new_visibility: str + ) -> bool: + """Check if a room is allowed to be published to, or removed from, the public room + list. + + Args: + room_id: The ID of the room. + new_visibility: The new visibility state. Either "public" or "private". + + Returns: + True if the room's visibility can be modified, False if not. + """ + if self.third_party_rules is None: + return True + + check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + if not check_func or not isinstance(check_func, Callable): + return True + + state_events = await self._get_state_map_for_room(room_id) + + return await check_func(room_id, state_events, new_visibility) + + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: + """Given a room ID, return the state events of that room. + + Args: + room_id: The ID of the room. + + Returns: + A dict mapping (event type, state key) to state event. + """ state_ids = await self.store.get_filtered_current_state_ids(room_id) room_state_events = await self.store.get_events(state_ids.values()) @@ -113,7 +155,4 @@ class ThirdPartyEventRules: for key, event_id in state_ids.items(): state_events[key] = room_state_events[event_id] - ret = await self.third_party_rules.check_threepid_can_be_invited( - medium, address, state_events - ) - return ret + return state_events diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 6f15c68240..ad5683d251 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -46,6 +46,7 @@ class DirectoryHandler(BaseHandler): self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search self.require_membership = hs.config.require_membership_for_aliases + self.third_party_event_rules = hs.get_third_party_event_rules() self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -454,6 +455,15 @@ class DirectoryHandler(BaseHandler): # per alias creation rule? raise SynapseError(403, "Not allowed to publish room") + # Check if publishing is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Not allowed to publish room") + await self.store.set_room_is_public(room_id, making_public) async def edit_published_appservice_room_list( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f1a6699cd4..f14f791586 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -681,6 +681,15 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=is_public, room_version=room_version, ) + # Check whether this visibility value is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Room visibility value not allowed.") + directory_handler = self.hs.get_handlers().directory_handler if room_alias: await directory_handler.create_association( diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index fcbd5378c4..646f09d2bc 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING from twisted.internet import defer +from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID +if TYPE_CHECKING: + from synapse.server import HomeServer + """ This package defines the 'stable' API which can be used by extension modules which are loaded into Synapse. @@ -43,6 +48,27 @@ class ModuleApi: self._auth = hs.get_auth() self._auth_handler = auth_handler + # We expose these as properties below in order to attach a helpful docstring. + self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient + self._public_room_list_manager = PublicRoomListManager(hs) + + @property + def http_client(self): + """Allows making outbound HTTP requests to remote resources. + + An instance of synapse.http.client.SimpleHttpClient + """ + return self._http_client + + @property + def public_room_list_manager(self): + """Allows adding to, removing from and checking the status of rooms in the + public room list. + + An instance of synapse.module_api.PublicRoomListManager + """ + return self._public_room_list_manager + def get_user_by_req(self, req, allow_guest=False): """Check the access_token provided for a request @@ -266,3 +292,44 @@ class ModuleApi: await self._auth_handler.complete_sso_login( registered_user_id, request, client_redirect_url, ) + + +class PublicRoomListManager: + """Contains methods for adding to, removing from and querying whether a room + is in the public room list. + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def room_is_in_public_room_list(self, room_id: str) -> bool: + """Checks whether a room is in the public room list. + + Args: + room_id: The ID of the room. + + Returns: + Whether the room is in the public room list. Returns False if the room does + not exist. + """ + room = await self._store.get_room(room_id) + if not room: + return False + + return room.get("is_public", False) + + async def add_room_to_public_room_list(self, room_id: str) -> None: + """Publishes a room to the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, True) + + async def remove_room_from_public_room_list(self, room_id: str) -> None: + """Removes a room from the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, False) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 04de0b9dbe..54600ad983 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,13 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, room from tests.unittest import HomeserverTestCase class ModuleApiTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) @@ -52,3 +59,50 @@ class ModuleApiTestCase(HomeserverTestCase): # Check that the displayname was assigned displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + + def test_public_rooms(self): + """Tests that a room can be added and removed from the public rooms list, + as well as have its public rooms directory state queried. + """ + # Create a user and room to play with + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # The room should not currently be in the public rooms directory + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) + + # Let's try adding it to the public rooms directory + self.get_success( + self.module_api.public_room_list_manager.add_room_to_public_room_list( + room_id + ) + ) + + # And checking whether it's in there... + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertTrue(is_in_public_rooms) + + # Let's remove it again + self.get_success( + self.module_api.public_room_list_manager.remove_room_from_public_room_list( + room_id + ) + ) + + # Should be gone + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py index 8c24add530..715e87de08 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/third_party_rules.py @@ -12,18 +12,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import Requester from tests import unittest class ThirdPartyRulesTestModule: - def __init__(self, config): + def __init__(self, config, *args, **kwargs): pass - def check_event_allowed(self, event, context): + async def on_create_room( + self, requester: Requester, config: dict, is_requester_admin: bool + ): + return True + + async def check_event_allowed(self, event, context): if event.type == "foo.bar.forbidden": return False else: @@ -51,29 +56,31 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, homeserver): + # Create a user and room to play with during the tests + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + def test_third_party_rules(self): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - room_id = self.helper.create_room_as(user_id, tok=tok) - request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) -- cgit 1.5.1 From e775b5bb5bb2b9e42607f9514ae2e270270a8932 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 18:10:55 +0100 Subject: kill off `send_nonmember_event` This is now redundant, and we can just call `handle_new_client_event` directly. --- synapse/handlers/message.py | 74 ++++++++++++++--------------------------- synapse/handlers/room.py | 4 +-- tests/handlers/test_register.py | 2 +- tests/unittest.py | 4 ++- 4 files changed, 31 insertions(+), 53 deletions(-) (limited to 'tests') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5969a7130f..6d136930bf 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -635,47 +635,6 @@ class EventCreationHandler: msg = self._block_events_without_consent_error % {"consent_uri": consent_uri} raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri) - async def send_nonmember_event( - self, - requester: Requester, - event: EventBase, - context: EventContext, - ratelimit: bool = True, - ignore_shadow_ban: bool = False, - ) -> int: - """ - Persists and notifies local clients and federation of an event. - - Args: - requester: The requester sending the event. - event: The event to send. - context: The context of the event. - ratelimit: Whether to rate limit this send. - ignore_shadow_ban: True if shadow-banned users should be allowed to - send this event. - - Return: - The stream_id of the persisted event. - - - """ - if event.type == EventTypes.Member: - raise SynapseError( - 500, "Tried to send member event through non-member codepath" - ) - - ev = await self.handle_new_client_event( - requester=requester, - event=event, - context=context, - ratelimit=ratelimit, - ignore_shadow_ban=ignore_shadow_ban, - ) - - # we know it was persisted, so must have a stream ordering - assert ev.internal_metadata.stream_ordering - return ev.internal_metadata.stream_ordering - async def deduplicate_state_event( self, event: EventBase, context: EventContext ) -> Optional[EventBase]: @@ -716,7 +675,7 @@ class EventCreationHandler: """ Creates an event, then sends it. - See self.create_event and self.send_nonmember_event. + See self.create_event and self.handle_new_client_event. Args: requester: The requester sending the event. @@ -726,9 +685,19 @@ class EventCreationHandler: ignore_shadow_ban: True if shadow-banned users should be allowed to send this event. + Returns: + The event, and its stream ordering (if state event deduplication happened, + the previous, duplicate event). + Raises: ShadowBanError if the requester has been shadow-banned. """ + + if event_dict["type"] == EventTypes.Member: + raise SynapseError( + 500, "Tried to send member event through non-member codepath" + ) + if not ignore_shadow_ban and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) @@ -754,14 +723,17 @@ class EventCreationHandler: spam_error = "Spam is not permitted here" raise SynapseError(403, spam_error, Codes.FORBIDDEN) - stream_id = await self.send_nonmember_event( - requester, - event, - context, + ev = await self.handle_new_client_event( + requester=requester, + event=event, + context=context, ratelimit=ratelimit, ignore_shadow_ban=ignore_shadow_ban, ) - return event, stream_id + + # we know it was persisted, so must have a stream ordering + assert ev.internal_metadata.stream_ordering + return ev, ev.internal_metadata.stream_ordering @measure_func("create_new_client_event") async def create_new_client_event( @@ -1255,8 +1227,12 @@ class EventCreationHandler: # Since this is a dummy-event it is OK if it is sent by a # shadow-banned user. - await self.send_nonmember_event( - requester, event, context, ratelimit=False, ignore_shadow_ban=True, + await self.handle_new_client_event( + requester=requester, + event=event, + context=context, + ratelimit=False, + ignore_shadow_ban=True, ) return True except ConsentNotGivenError: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 530bf0ab88..d0530a446c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -230,8 +230,8 @@ class RoomCreationHandler(BaseHandler): ) # now send the tombstone - await self.event_creation_handler.send_nonmember_event( - requester, tombstone_event, tombstone_context + await self.event_creation_handler.handle_new_client_event( + requester=requester, event=tombstone_event, context=tombstone_context, ) old_room_state = await tombstone_context.get_current_state_ids() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index cb7c0ed51a..702c6aa089 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -413,7 +413,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): ) ) self.get_success( - event_creation_handler.send_nonmember_event(requester, event, context) + event_creation_handler.handle_new_client_event(requester, event, context) ) # Register a second user, which won't be be in the room (or even have an invite) diff --git a/tests/unittest.py b/tests/unittest.py index 82ede9de34..5c87f6097e 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -608,7 +608,9 @@ class HomeserverTestCase(TestCase): if soft_failed: event.internal_metadata.soft_failed = True - self.get_success(event_creator.send_nonmember_event(requester, event, context)) + self.get_success( + event_creator.handle_new_client_event(requester, event, context) + ) return event.event_id -- cgit 1.5.1 From 4cd1448d0e16d19a1f255ed6746a7372221e84cd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 20:27:14 +0100 Subject: Fix third-party event modules for `check_visibility_can_be_modified` check PR #8292 tried to maintain backwards compat with modules which don't provide a `check_visibility_can_be_modified` method, but the tests weren't being run, and the check didn't work. --- changelog.d/8467.feature | 1 + synapse/events/third_party_rules.py | 4 +- tests/rest/client/test_third_party_rules.py | 86 +++++++++++++++++++++++++++++ tests/rest/client/third_party_rules.py | 86 ----------------------------- 4 files changed, 90 insertions(+), 87 deletions(-) create mode 100644 changelog.d/8467.feature create mode 100644 tests/rest/client/test_third_party_rules.py delete mode 100644 tests/rest/client/third_party_rules.py (limited to 'tests') diff --git a/changelog.d/8467.feature b/changelog.d/8467.feature new file mode 100644 index 0000000000..6d0335e2c8 --- /dev/null +++ b/changelog.d/8467.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index fed459198a..1ca77519d5 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -131,7 +131,9 @@ class ThirdPartyEventRules: if self.third_party_rules is None: return True - check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + check_func = getattr( + self.third_party_rules, "check_visibility_can_be_modified", None + ) if not check_func or not isinstance(check_func, Callable): return True diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py new file mode 100644 index 0000000000..7b322f526c --- /dev/null +++ b/tests/rest/client/test_third_party_rules.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.types import Requester + +from tests import unittest + + +class ThirdPartyRulesTestModule: + def __init__(self, config, *args, **kwargs): + pass + + async def on_create_room( + self, requester: Requester, config: dict, is_requester_admin: bool + ): + return True + + async def check_event_allowed(self, event, context): + if event.type == "foo.bar.forbidden": + return False + else: + return True + + @staticmethod + def parse_config(config): + return config + + +class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["third_party_event_rules"] = { + "module": __name__ + ".ThirdPartyRulesTestModule", + "config": {}, + } + + self.hs = self.setup_test_homeserver(config=config) + return self.hs + + def prepare(self, reactor, clock, homeserver): + # Create a user and room to play with during the tests + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + def test_third_party_rules(self): + """Tests that a forbidden event is forbidden from being sent, but an allowed one + can be sent. + """ + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, + {}, + access_token=self.tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, + {}, + access_token=self.tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py deleted file mode 100644 index 715e87de08..0000000000 --- a/tests/rest/client/third_party_rules.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the 'License'); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from synapse.rest import admin -from synapse.rest.client.v1 import login, room -from synapse.types import Requester - -from tests import unittest - - -class ThirdPartyRulesTestModule: - def __init__(self, config, *args, **kwargs): - pass - - async def on_create_room( - self, requester: Requester, config: dict, is_requester_admin: bool - ): - return True - - async def check_event_allowed(self, event, context): - if event.type == "foo.bar.forbidden": - return False - else: - return True - - @staticmethod - def parse_config(config): - return config - - -class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): - servlets = [ - admin.register_servlets, - login.register_servlets, - room.register_servlets, - ] - - def make_homeserver(self, reactor, clock): - config = self.default_config() - config["third_party_event_rules"] = { - "module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule", - "config": {}, - } - - self.hs = self.setup_test_homeserver(config=config) - return self.hs - - def prepare(self, reactor, clock, homeserver): - # Create a user and room to play with during the tests - self.user_id = self.register_user("kermit", "monkey") - self.tok = self.login("kermit", "monkey") - - self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - - def test_third_party_rules(self): - """Tests that a forbidden event is forbidden from being sent, but an allowed one - can be sent. - """ - request, channel = self.make_request( - "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, - {}, - access_token=self.tok, - ) - self.render(request) - self.assertEquals(channel.result["code"], b"200", channel.result) - - request, channel = self.make_request( - "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, - {}, - access_token=self.tok, - ) - self.render(request) - self.assertEquals(channel.result["code"], b"403", channel.result) -- cgit 1.5.1 From 785437dc0ddfb23012748895e1f8665338e4f3df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 5 Oct 2020 21:40:51 +0100 Subject: Update default room version to 6 (#8461) Per https://github.com/matrix-org/matrix-doc/pull/2788 --- changelog.d/8461.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/rest/client/v1/test_directory.py | 11 ++++++++++- 4 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8461.feature (limited to 'tests') diff --git a/changelog.d/8461.feature b/changelog.d/8461.feature new file mode 100644 index 0000000000..3665d670e1 --- /dev/null +++ b/changelog.d/8461.feature @@ -0,0 +1 @@ +Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7126ade2de..bb64662e28 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -119,7 +119,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "5" +#default_room_version: "6" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index ef6d70e3f8..85aa49c02d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -39,7 +39,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] -DEFAULT_ROOM_VERSION = "5" +DEFAULT_ROOM_VERSION = "6" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 633b7dbda0..ea5a7f3739 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -21,6 +21,7 @@ from synapse.types import RoomAlias from synapse.util.stringutils import random_string from tests import unittest +from tests.unittest import override_config class DirectoryTestCase(unittest.HomeserverTestCase): @@ -67,10 +68,18 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.ensure_user_joined_room() self.set_alias_via_directory(400, alias_length=256) - def test_state_event_in_room(self): + @override_config({"default_room_version": 5}) + def test_state_event_user_in_v5_room(self): + """Test that a regular user can add alias events before room v6""" self.ensure_user_joined_room() self.set_alias_via_state_event(200) + @override_config({"default_room_version": 6}) + def test_state_event_v6_room(self): + """Test that a regular user can *not* add alias events from room v6""" + self.ensure_user_joined_room() + self.set_alias_via_state_event(403) + def test_directory_in_room(self): self.ensure_user_joined_room() self.set_alias_via_directory(200) -- cgit 1.5.1 From a02446113012920c92264f632832308588649ed8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 6 Oct 2020 16:31:31 +0100 Subject: Additional tests for third-party event rules (#8468) * Optimise and test state fetching for 3p event rules Getting all the events at once is much more efficient than getting them individually * Test that 3p event rules can modify events --- changelog.d/8468.misc | 1 + synapse/events/third_party_rules.py | 12 +++-- tests/rest/client/test_third_party_rules.py | 84 ++++++++++++++++++++++++----- 3 files changed, 79 insertions(+), 18 deletions(-) create mode 100644 changelog.d/8468.misc (limited to 'tests') diff --git a/changelog.d/8468.misc b/changelog.d/8468.misc new file mode 100644 index 0000000000..32ba991e64 --- /dev/null +++ b/changelog.d/8468.misc @@ -0,0 +1 @@ +Additional testing for `ThirdPartyEventRules`. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1ca77519d5..e38b8e67fb 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -61,12 +61,14 @@ class ThirdPartyEventRules: prev_state_ids = await context.get_prev_state_ids() # Retrieve the state events from the database. - state_events = {} - for key, event_id in prev_state_ids.items(): - state_events[key] = await self.store.get_event(event_id, allow_none=True) + events = await self.store.get_events(prev_state_ids.values()) + state_events = {(ev.type, ev.state_key): ev for ev in events.values()} - ret = await self.third_party_rules.check_event_allowed(event, state_events) - return ret + # The module can modify the event slightly if it wants, but caution should be + # exercised, and it's likely to go very wrong if applied to events received over + # federation. + + return await self.third_party_rules.check_event_allowed(event, state_events) async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 7b322f526c..c12518c931 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -12,33 +12,43 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import threading + +from mock import Mock + +from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client.v1 import login, room -from synapse.types import Requester +from synapse.types import Requester, StateMap from tests import unittest +thread_local = threading.local() + class ThirdPartyRulesTestModule: - def __init__(self, config, *args, **kwargs): - pass + def __init__(self, config, module_api): + # keep a record of the "current" rules module, so that the test can patch + # it if desired. + thread_local.rules_module = self async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool ): return True - async def check_event_allowed(self, event, context): - if event.type == "foo.bar.forbidden": - return False - else: - return True + async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + return True @staticmethod def parse_config(config): return config +def current_rules_module() -> ThirdPartyRulesTestModule: + return thread_local.rules_module + + class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): servlets = [ admin.register_servlets, @@ -46,15 +56,13 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - config = self.default_config() + def default_config(self): + config = super().default_config() config["third_party_event_rules"] = { "module": __name__ + ".ThirdPartyRulesTestModule", "config": {}, } - - self.hs = self.setup_test_homeserver(config=config) - return self.hs + return config def prepare(self, reactor, clock, homeserver): # Create a user and room to play with during the tests @@ -67,6 +75,14 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ + # patch the rules module with a Mock which will return False for some event + # types + async def check(ev, state): + return ev.type != "foo.bar.forbidden" + + callback = Mock(spec=[], side_effect=check) + current_rules_module().check_event_allowed = callback + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, @@ -76,6 +92,16 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + callback.assert_called_once() + + # there should be various state events in the state arg: do some basic checks + state_arg = callback.call_args[0][1] + for k in (("m.room.create", ""), ("m.room.member", self.user_id)): + self.assertIn(k, state_arg) + ev = state_arg[k] + self.assertEqual(ev.type, k[0]) + self.assertEqual(ev.state_key, k[1]) + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, @@ -84,3 +110,35 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_modify_event(self): + """Tests that the module can successfully tweak an event before it is persisted. + """ + # first patch the event checker so that it will modify the event + async def check(ev: EventBase, state): + ev.content = {"x": "y"} + return True + + current_rules_module().check_event_allowed = check + + # now send the event + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id, + {"x": "x"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + event_id = channel.json_body["event_id"] + + # ... and check that it got modified + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + ev = channel.json_body + self.assertEqual(ev["content"]["x"], "y") -- cgit 1.5.1 From 3cd78bbe9e208d2e93ccebee5d3586ee5f5a5d31 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 6 Oct 2020 13:26:29 -0400 Subject: Add support for MSC2732: olm fallback keys (#8312) --- changelog.d/8312.feature | 1 + scripts/synapse_port_db | 1 + synapse/handlers/e2e_keys.py | 16 ++++ synapse/handlers/sync.py | 8 ++ synapse/rest/client/v2_alpha/sync.py | 1 + synapse/storage/databases/main/end_to_end_keys.py | 100 ++++++++++++++++++++- .../databases/main/schema/delta/58/11fallback.sql | 24 +++++ tests/handlers/test_e2e_keys.py | 65 ++++++++++++++ 8 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8312.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11fallback.sql (limited to 'tests') diff --git a/changelog.d/8312.feature b/changelog.d/8312.feature new file mode 100644 index 0000000000..222a1b032a --- /dev/null +++ b/changelog.d/8312.feature @@ -0,0 +1 @@ +Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7e12f5440c..2d0b59ab53 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -90,6 +90,7 @@ BOOLEAN_COLUMNS = { "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], "users": ["shadow_banned"], + "e2e_fallback_keys_json": ["used"], } diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index dd40fd1299..611742ae72 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -496,6 +496,22 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("org.matrix.msc2732.fallback_keys", None) + if fallback_keys and isinstance(fallback_keys, dict): + log_kv( + { + "message": "Updating fallback_keys for device.", + "user_id": user_id, + "device_id": device_id, + } + ) + await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) + elif fallback_keys: + log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) + else: + log_kv( + {"message": "Did not update fallback_keys", "reason": "no keys given"} + ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a998e6b7f6..dd1f90e359 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -201,6 +201,8 @@ class SyncResult: device_lists: List of user_ids whose devices have changed device_one_time_keys_count: Dict of algorithm to count for one time keys for this device + device_unused_fallback_key_types: List of key types that have an unused fallback + key groups: Group updates, if any """ @@ -213,6 +215,7 @@ class SyncResult: to_device = attr.ib(type=List[JsonDict]) device_lists = attr.ib(type=DeviceLists) device_one_time_keys_count = attr.ib(type=JsonDict) + device_unused_fallback_key_types = attr.ib(type=List[str]) groups = attr.ib(type=Optional[GroupsSyncResult]) def __bool__(self) -> bool: @@ -1014,10 +1017,14 @@ class SyncHandler: logger.debug("Fetching OTK data") device_id = sync_config.device_id one_time_key_counts = {} # type: JsonDict + unused_fallback_key_types = [] # type: List[str] if device_id: one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id ) + unused_fallback_key_types = await self.store.get_e2e_unused_fallback_key_types( + user_id, device_id + ) logger.debug("Fetching group data") await self._generate_sync_entry_for_groups(sync_result_builder) @@ -1041,6 +1048,7 @@ class SyncHandler: device_lists=device_lists, groups=sync_result_builder.groups, device_one_time_keys_count=one_time_key_counts, + device_unused_fallback_key_types=unused_fallback_key_types, next_batch=sync_result_builder.now_token, ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 6779df952f..2b84eb89c0 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -236,6 +236,7 @@ class SyncRestServlet(RestServlet): "leave": sync_result.groups.leave, }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, "next_batch": await sync_result.next_batch.to_string(self.store), } diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 22e1ed15d0..8c97f2af5c 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -367,6 +367,57 @@ class EndToEndKeyWorkerStore(SQLBaseStore): "count_e2e_one_time_keys", _count_e2e_one_time_keys ) + async def set_e2e_fallback_keys( + self, user_id: str, device_id: str, fallback_keys: JsonDict + ) -> None: + """Set the user's e2e fallback keys. + + Args: + user_id: the user whose keys are being set + device_id: the device whose keys are being set + fallback_keys: the keys to set. This is a map from key ID (which is + of the form "algorithm:id") to key data. + """ + # fallback_keys will usually only have one item in it, so using a for + # loop (as opposed to calling simple_upsert_many_txn) won't be too bad + # FIXME: make sure that only one key per algorithm is uploaded + for key_id, fallback_key in fallback_keys.items(): + algorithm, key_id = key_id.split(":", 1) + await self.db_pool.simple_upsert( + "e2e_fallback_keys_json", + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + }, + values={ + "key_id": key_id, + "key_json": json_encoder.encode(fallback_key), + "used": False, + }, + desc="set_e2e_fallback_key", + ) + + @cached(max_entries=10000) + async def get_e2e_unused_fallback_key_types( + self, user_id: str, device_id: str + ) -> List[str]: + """Returns the fallback key types that have an unused key. + + Args: + user_id: the user whose keys are being queried + device_id: the device whose keys are being queried + + Returns: + a list of key types + """ + return await self.db_pool.simple_select_onecol( + "e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id, "used": False}, + retcol="algorithm", + desc="get_e2e_unused_fallback_key_types", + ) + async def get_e2e_cross_signing_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None ) -> Optional[dict]: @@ -701,15 +752,37 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): " WHERE user_id = ? AND device_id = ? AND algorithm = ?" " LIMIT 1" ) + fallback_sql = ( + "SELECT key_id, key_json, used FROM e2e_fallback_keys_json" + " WHERE user_id = ? AND device_id = ? AND algorithm = ?" + " LIMIT 1" + ) result = {} delete = [] + used_fallbacks = [] for user_id, device_id, algorithm in query_list: user_result = result.setdefault(user_id, {}) device_result = user_result.setdefault(device_id, {}) txn.execute(sql, (user_id, device_id, algorithm)) - for key_id, key_json in txn: + otk_row = txn.fetchone() + if otk_row is not None: + key_id, key_json = otk_row device_result[algorithm + ":" + key_id] = key_json delete.append((user_id, device_id, algorithm, key_id)) + else: + # no one-time key available, so see if there's a fallback + # key + txn.execute(fallback_sql, (user_id, device_id, algorithm)) + fallback_row = txn.fetchone() + if fallback_row is not None: + key_id, key_json, used = fallback_row + device_result[algorithm + ":" + key_id] = key_json + if not used: + used_fallbacks.append( + (user_id, device_id, algorithm, key_id) + ) + + # drop any one-time keys that were claimed sql = ( "DELETE FROM e2e_one_time_keys_json" " WHERE user_id = ? AND device_id = ? AND algorithm = ?" @@ -726,6 +799,23 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + # mark fallback keys as used + for user_id, device_id, algorithm, key_id in used_fallbacks: + self.db_pool.simple_update_txn( + txn, + "e2e_fallback_keys_json", + { + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + "key_id": key_id, + }, + {"used": True}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) + return result return await self.db_pool.runInteraction( @@ -754,6 +844,14 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) await self.db_pool.runInteraction( "delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/databases/main/schema/delta/58/11fallback.sql new file mode 100644 index 0000000000..4ed981dbf8 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11fallback.sql @@ -0,0 +1,24 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS e2e_fallback_keys_json ( + user_id TEXT NOT NULL, -- The user this fallback key is for. + device_id TEXT NOT NULL, -- The device this fallback key is for. + algorithm TEXT NOT NULL, -- Which algorithm this fallback key is for. + key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads. + key_json TEXT NOT NULL, -- The key as a JSON blob. + used BOOLEAN NOT NULL DEFAULT FALSE, -- Whether the key has been used or not. + CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm) +); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 366dcfb670..4e9e3dcbc2 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -171,6 +171,71 @@ class E2eKeysHandlerTestCase(unittest.TestCase): }, ) + @defer.inlineCallbacks + def test_fallback_key(self): + local_user = "@boris:" + self.hs.hostname + device_id = "xyz" + fallback_key = {"alg1:k1": "key1"} + otk = {"alg1:k2": "key2"} + + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, + device_id, + {"org.matrix.msc2732.fallback_keys": fallback_key}, + ) + ) + + # claiming an OTK when no OTKs are available should return the fallback + # key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # claiming an OTK again should return the same fallback key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # if the user uploads a one-time key, the next claim should fetch the + # one-time key, and then go back to the fallback + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, device_id, {"one_time_keys": otk} + ) + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, {"failures": {}, "one_time_keys": {local_user: {device_id: otk}}}, + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + @defer.inlineCallbacks def test_replace_master_key(self): """uploading a new signing key should make the old signing key unavailable""" -- cgit 1.5.1 From 4f0637346a194a3343b4fea6cf38c1548e56648d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 7 Oct 2020 12:03:26 +0100 Subject: Combine `SpamCheckerApi` with the more generic `ModuleApi`. (#8464) Lots of different module apis is not easy to maintain. Rather than adding yet another ModuleApi(hs, hs.get_auth_handler()) incantation, first add an hs.get_module_api() method and use it where possible. --- changelog.d/8464.misc | 1 + docs/spam_checker.md | 9 +++----- synapse/app/homeserver.py | 3 +-- synapse/events/spamcheck.py | 5 +++-- synapse/events/third_party_rules.py | 3 +-- synapse/handlers/auth.py | 7 ++++++ synapse/module_api/__init__.py | 29 +++++++++++++++++++++++- synapse/server.py | 5 +++++ synapse/spam_checker_api/__init__.py | 43 ------------------------------------ tests/module_api/test_api.py | 4 ++-- 10 files changed, 51 insertions(+), 58 deletions(-) create mode 100644 changelog.d/8464.misc (limited to 'tests') diff --git a/changelog.d/8464.misc b/changelog.d/8464.misc new file mode 100644 index 0000000000..a552e88f9f --- /dev/null +++ b/changelog.d/8464.misc @@ -0,0 +1 @@ +Combine `SpamCheckerApi` with the more generic `ModuleApi`. diff --git a/docs/spam_checker.md b/docs/spam_checker.md index eb10e115f9..7fc08f1b70 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md @@ -11,7 +11,7 @@ able to be imported by the running Synapse. The Python class is instantiated with two objects: * Any configuration (see below). -* An instance of `synapse.spam_checker_api.SpamCheckerApi`. +* An instance of `synapse.module_api.ModuleApi`. It then implements methods which return a boolean to alter behavior in Synapse. @@ -26,11 +26,8 @@ well as some specific methods: The details of the each of these methods (as well as their inputs and outputs) are documented in the `synapse.events.spamcheck.SpamChecker` class. -The `SpamCheckerApi` class provides a way for the custom spam checker class to -call back into the homeserver internals. It currently implements the following -methods: - -* `get_state_events_in_room` +The `ModuleApi` class provides a way for the custom spam checker class to +call back into the homeserver internals. ### Example diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4ed4a2c253..2b5465417f 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,6 @@ from synapse.http.server import ( from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory @@ -106,7 +105,7 @@ class SynapseHomeServer(HomeServer): additional_resources = listener_config.http_options.additional_resources logger.debug("Configuring additional resources: %r", additional_resources) - module_api = ModuleApi(self, self.get_auth_handler()) + module_api = self.get_module_api() for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index b0fc859a47..bad18f7fdf 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -17,24 +17,25 @@ import inspect from typing import Any, Dict, List, Optional, Tuple -from synapse.spam_checker_api import RegistrationBehaviour, SpamCheckerApi +from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import Collection MYPY = False if MYPY: + import synapse.events import synapse.server class SpamChecker: def __init__(self, hs: "synapse.server.HomeServer"): self.spam_checkers = [] # type: List[Any] + api = hs.get_module_api() for module, config in hs.config.spam_checkers: # Older spam checkers don't accept the `api` argument, so we # try and detect support. spam_args = inspect.getfullargspec(module) if "api" in spam_args.args: - api = SpamCheckerApi(hs) self.spam_checkers.append(module(config=config, api=api)) else: self.spam_checkers.append(module(config=config)) diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index e38b8e67fb..1535cc5339 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -16,7 +16,6 @@ from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.module_api import ModuleApi from synapse.types import Requester, StateMap @@ -40,7 +39,7 @@ class ThirdPartyEventRules: if module is not None: self.third_party_rules = module( - config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), + config=config, module_api=hs.get_module_api(), ) async def check_event_allowed( diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7c4b716b28..f6d17c53b1 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -164,7 +164,14 @@ class AuthHandler(BaseHandler): self.bcrypt_rounds = hs.config.bcrypt_rounds + # we can't use hs.get_module_api() here, because to do so will create an + # import loop. + # + # TODO: refactor this class to separate the lower-level stuff that + # ModuleApi can use from the higher-level stuff that uses ModuleApi, as + # better way to break the loop account_handler = ModuleApi(hs, self) + self.password_providers = [ module(config=config, account_handler=account_handler) for module, config in hs.config.password_providers diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 646f09d2bc..b410e3ad9c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Iterable, Optional, Tuple from twisted.internet import defer from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.storage.state import StateFilter from synapse.types import UserID if TYPE_CHECKING: @@ -293,6 +294,32 @@ class ModuleApi: registered_user_id, request, client_redirect_url, ) + @defer.inlineCallbacks + def get_state_events_in_room( + self, room_id: str, types: Iterable[Tuple[str, Optional[str]]] + ) -> defer.Deferred: + """Gets current state events for the given room. + + (This is exposed for compatibility with the old SpamCheckerApi. We should + probably deprecate it and replace it with an async method in a subclass.) + + Args: + room_id: The room ID to get state events in. + types: The event type and state key (using None + to represent 'any') of the room state to acquire. + + Returns: + twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: + The filtered state events in the room. + """ + state_ids = yield defer.ensureDeferred( + self._store.get_filtered_current_state_ids( + room_id=room_id, state_filter=StateFilter.from_types(types) + ) + ) + state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) + return state.values() + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/server.py b/synapse/server.py index aa2273955c..f83dd6148c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -91,6 +91,7 @@ from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.module_api import ModuleApi from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator from synapse.push.pusherpool import PusherPool @@ -656,6 +657,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter(self.clock, config=self.config.rc_federation) + @cache_in_self + def get_module_api(self) -> ModuleApi: + return ModuleApi(self, self.get_auth_handler()) + async def remove_pusher(self, app_id: str, push_key: str, user_id: str): return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index 395ac5ab02..3ce25bb012 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -12,19 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging from enum import Enum -from twisted.internet import defer - -from synapse.storage.state import StateFilter - -MYPY = False -if MYPY: - import synapse.server - -logger = logging.getLogger(__name__) - class RegistrationBehaviour(Enum): """ @@ -34,35 +23,3 @@ class RegistrationBehaviour(Enum): ALLOW = "allow" SHADOW_BAN = "shadow_ban" DENY = "deny" - - -class SpamCheckerApi: - """A proxy object that gets passed to spam checkers so they can get - access to rooms and other relevant information. - """ - - def __init__(self, hs: "synapse.server.HomeServer"): - self.hs = hs - - self._store = hs.get_datastore() - - @defer.inlineCallbacks - def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred: - """Gets state events for the given room. - - Args: - room_id: The room ID to get state events in. - types: The event type and state key (using None - to represent 'any') of the room state to acquire. - - Returns: - twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: - The filtered state events in the room. - """ - state_ids = yield defer.ensureDeferred( - self._store.get_filtered_current_state_ids( - room_id=room_id, state_filter=StateFilter.from_types(types) - ) - ) - state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) - return state.values() diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 54600ad983..7c790bee7d 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.module_api import ModuleApi + from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -28,7 +28,7 @@ class ModuleApiTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() - self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) + self.module_api = homeserver.get_module_api() def test_can_register_user(self): """Tests that an external module can register a user""" -- cgit 1.5.1 From 4cb44a158549e83d42061b02a8b704e7d5873b21 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Wed, 7 Oct 2020 08:00:17 -0400 Subject: Add support for MSC2697: Dehydrated devices (#8380) This allows a user to store an offline device on the server and then restore it at a subsequent login. --- changelog.d/8380.feature | 1 + synapse/handlers/device.py | 84 ++++++++++++- synapse/rest/client/v2_alpha/devices.py | 134 +++++++++++++++++++++ synapse/rest/client/v2_alpha/keys.py | 37 +++--- synapse/storage/databases/main/devices.py | 78 +++++++++++- synapse/storage/databases/main/end_to_end_keys.py | 7 +- synapse/storage/databases/main/registration.py | 32 ++++- .../main/schema/delta/58/11dehydration.sql | 20 +++ tests/handlers/test_device.py | 82 +++++++++++++ 9 files changed, 454 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8380.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11dehydration.sql (limited to 'tests') diff --git a/changelog.d/8380.feature b/changelog.d/8380.feature new file mode 100644 index 0000000000..05ccea19dc --- /dev/null +++ b/changelog.d/8380.feature @@ -0,0 +1 @@ +Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b9d9098104..e883ed1e37 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,6 +29,7 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( + JsonDict, StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -505,6 +506,85 @@ class DeviceHandler(DeviceWorkerHandler): # receive device updates. Mark this in DB. await self.store.mark_remote_user_device_list_as_unsubscribed(user_id) + async def store_dehydrated_device( + self, + user_id: str, + device_data: JsonDict, + initial_device_display_name: Optional[str] = None, + ) -> str: + """Store a dehydrated device for a user. If the user had a previous + dehydrated device, it is removed. + + Args: + user_id: the user that we are storing the device for + device_data: the dehydrated device information + initial_device_display_name: The display name to use for the device + Returns: + device id of the dehydrated device + """ + device_id = await self.check_device_registered( + user_id, None, initial_device_display_name, + ) + old_device_id = await self.store.store_dehydrated_device( + user_id, device_id, device_data + ) + if old_device_id is not None: + await self.delete_device(user_id, old_device_id) + return device_id + + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + return await self.store.get_dehydrated_device(user_id) + + async def rehydrate_device( + self, user_id: str, access_token: str, device_id: str + ) -> dict: + """Process a rehydration request from the user. + + Args: + user_id: the user who is rehydrating the device + access_token: the access token used for the request + device_id: the ID of the device that will be rehydrated + Returns: + a dict containing {"success": True} + """ + success = await self.store.remove_dehydrated_device(user_id, device_id) + + if not success: + raise errors.NotFoundError() + + # If the dehydrated device was successfully deleted (the device ID + # matched the stored dehydrated device), then modify the access + # token to use the dehydrated device's ID and copy the old device + # display name to the dehydrated device, and destroy the old device + # ID + old_device_id = await self.store.set_device_for_access_token( + access_token, device_id + ) + old_device = await self.store.get_device(user_id, old_device_id) + await self.store.update_device(user_id, device_id, old_device["display_name"]) + # can't call self.delete_device because that will clobber the + # access token so call the storage layer directly + await self.store.delete_device(user_id, old_device_id) + await self.store.delete_e2e_keys_by_device( + user_id=user_id, device_id=old_device_id + ) + + # tell everyone that the old device is gone and that the dehydrated + # device has a new display name + await self.notify_device_update(user_id, [old_device_id, device_id]) + + return {"success": True} + def _update_device_from_client_ips(device, client_ips): ip = client_ips.get((device["user_id"], device["device_id"]), {}) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 7e174de692..af117cb27c 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ from synapse.http.servlet import ( assert_params_in_dict, parse_json_object_from_request, ) +from synapse.http.site import SynapseRequest from ._base import client_patterns, interactive_auth_handler @@ -151,7 +153,139 @@ class DeviceRestServlet(RestServlet): return 200, {} +class DehydratedDeviceServlet(RestServlet): + """Retrieve or store a dehydrated device. + + GET /org.matrix.msc2697.v2/dehydrated_device + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + PUT /org.matrix.msc2697/dehydrated_device + Content-Type: application/json + + { + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + """ + + PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=()) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_GET(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + dehydrated_device = await self.device_handler.get_dehydrated_device( + requester.user.to_string() + ) + if dehydrated_device is not None: + (device_id, device_data) = dehydrated_device + result = {"device_id": device_id, "device_data": device_data} + return (200, result) + else: + raise errors.NotFoundError("No dehydrated device available") + + async def on_PUT(self, request: SynapseRequest): + submission = parse_json_object_from_request(request) + requester = await self.auth.get_user_by_req(request) + + if "device_data" not in submission: + raise errors.SynapseError( + 400, "device_data missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_data"], dict): + raise errors.SynapseError( + 400, + "device_data must be an object", + errcode=errors.Codes.INVALID_PARAM, + ) + + device_id = await self.device_handler.store_dehydrated_device( + requester.user.to_string(), + submission["device_data"], + submission.get("initial_device_display_name", None), + ) + return 200, {"device_id": device_id} + + +class ClaimDehydratedDeviceServlet(RestServlet): + """Claim a dehydrated device. + + POST /org.matrix.msc2697.v2/dehydrated_device/claim + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "success": true, + } + + """ + + PATTERNS = client_patterns( + "/org.matrix.msc2697.v2/dehydrated_device/claim", releases=() + ) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_POST(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + + submission = parse_json_object_from_request(request) + + if "device_id" not in submission: + raise errors.SynapseError( + 400, "device_id missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_id"], str): + raise errors.SynapseError( + 400, "device_id must be a string", errcode=errors.Codes.INVALID_PARAM, + ) + + result = await self.device_handler.rehydrate_device( + requester.user.to_string(), + self.auth.get_access_token_from_request(request), + submission["device_id"], + ) + + return (200, result) + + def register_servlets(hs, http_server): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeviceRestServlet(hs).register(http_server) + DehydratedDeviceServlet(hs).register(http_server) + ClaimDehydratedDeviceServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 55c4606569..b91996c738 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,6 +68,7 @@ class KeyUploadServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.device_handler = hs.get_device_handler() @trace(opname="upload_keys") async def on_POST(self, request, device_id): @@ -75,23 +77,28 @@ class KeyUploadServlet(RestServlet): body = parse_json_object_from_request(request) if device_id is not None: - # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. + # Providing the device_id should only be done for setting keys + # for dehydrated devices; however, we allow it for any device for + # compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: - set_tag("error", True) - log_kv( - { - "message": "Client uploading keys for a different device", - "logged_in_id": requester.device_id, - "key_being_uploaded": device_id, - } - ) - logger.warning( - "Client uploading keys for a different device " - "(logged in as %s, uploading for %s)", - requester.device_id, - device_id, + dehydrated_device = await self.device_handler.get_dehydrated_device( + user_id ) + if dehydrated_device is not None and device_id != dehydrated_device[0]: + set_tag("error", True) + log_kv( + { + "message": "Client uploading keys for a different device", + "logged_in_id": requester.device_id, + "key_being_uploaded": device_id, + } + ) + logger.warning( + "Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, + device_id, + ) else: device_id = requester.device_id diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fdf394c612..317d6cde95 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ from synapse.storage.database import ( make_tuple_comparison_clause, ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key -from synapse.util import json_encoder +from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import Cache, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -698,6 +698,80 @@ class DeviceWorkerStore(SQLBaseStore): _mark_remote_user_device_list_as_unsubscribed_txn, ) + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + # FIXME: make sure device ID still exists in devices table + row = await self.db_pool.simple_select_one( + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcols=["device_id", "device_data"], + allow_none=True, + ) + return ( + (row["device_id"], json_decoder.decode(row["device_data"])) if row else None + ) + + def _store_dehydrated_device_txn( + self, txn, user_id: str, device_id: str, device_data: str + ) -> Optional[str]: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcol="device_id", + allow_none=True, + ) + self.db_pool.simple_upsert_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + values={"device_id": device_id, "device_data": device_data}, + ) + return old_device_id + + async def store_dehydrated_device( + self, user_id: str, device_id: str, device_data: JsonDict + ) -> Optional[str]: + """Store a dehydrated device for a user. + + Args: + user_id: the user that we are storing the device for + device_id: the ID of the dehydrated device + device_data: the dehydrated device information + Returns: + device id of the user's previous dehydrated device, if any + """ + return await self.db_pool.runInteraction( + "store_dehydrated_device_txn", + self._store_dehydrated_device_txn, + user_id, + device_id, + json_encoder.encode(device_data), + ) + + async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool: + """Remove a dehydrated device. + + Args: + user_id: the user that the dehydrated device belongs to + device_id: the ID of the dehydrated device + """ + count = await self.db_pool.simple_delete( + "dehydrated_devices", + {"user_id": user_id, "device_id": device_id}, + desc="remove_dehydrated_device", + ) + return count >= 1 + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 8c97f2af5c..359dc6e968 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -844,6 +844,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) self.db_pool.simple_delete_txn( txn, table="e2e_fallback_keys_json", diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index a83df7759d..16ba545740 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -964,6 +964,36 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): desc="add_access_token_to_user", ) + def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, "access_tokens", {"token": token}, "device_id" + ) + + self.db_pool.simple_update_txn( + txn, "access_tokens", {"token": token}, {"device_id": device_id} + ) + + self._invalidate_cache_and_stream(txn, self.get_user_by_access_token, (token,)) + + return old_device_id + + async def set_device_for_access_token(self, token: str, device_id: str) -> str: + """Sets the device ID associated with an access token. + + Args: + token: The access token to modify. + device_id: The new device ID. + Returns: + The old device ID associated with the access token. + """ + + return await self.db_pool.runInteraction( + "set_device_for_access_token", + self._set_device_for_access_token_txn, + token, + device_id, + ) + async def register_user( self, user_id: str, diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql new file mode 100644 index 0000000000..7851a0a825 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql @@ -0,0 +1,20 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS dehydrated_devices( + user_id TEXT NOT NULL PRIMARY KEY, + device_id TEXT NOT NULL, + device_data TEXT NOT NULL -- JSON-encoded client-defined data +); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 969d44c787..4512c51311 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -224,3 +225,84 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) ) self.reactor.advance(1000) + + +class DehydrationTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + self.handler = hs.get_device_handler() + self.registration = hs.get_registration_handler() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + return hs + + def test_dehydrate_and_rehydrate_device(self): + user_id = "@boris:dehydration" + + self.get_success(self.store.register_user(user_id, "foobar")) + + # First check if we can store and fetch a dehydrated device + stored_dehydrated_device_id = self.get_success( + self.handler.store_dehydrated_device( + user_id=user_id, + device_data={"device_data": {"foo": "bar"}}, + initial_device_display_name="dehydrated device", + ) + ) + + retrieved_device_id, device_data = self.get_success( + self.handler.get_dehydrated_device(user_id=user_id) + ) + + self.assertEqual(retrieved_device_id, stored_dehydrated_device_id) + self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) + + # Create a new login for the user and dehydrated the device + device_id, access_token = self.get_success( + self.registration.register_device( + user_id=user_id, device_id=None, initial_display_name="new device", + ) + ) + + # Trying to claim a nonexistent device should throw an error + self.get_failure( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id="not the right device ID", + ), + synapse.api.errors.NotFoundError, + ) + + # dehydrating the right devices should succeed and change our device ID + # to the dehydrated device's ID + res = self.get_success( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id=retrieved_device_id, + ) + ) + + self.assertEqual(res, {"success": True}) + + # make sure that our device ID has changed + user_info = self.get_success(self.auth.get_user_by_access_token(access_token)) + + self.assertEqual(user_info["device_id"], retrieved_device_id) + + # make sure the device has the display name that was set from the login + res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) + + self.assertEqual(res["display_name"], "new device") + + # make sure that the device ID that we were initially assigned no longer exists + self.get_failure( + self.handler.get_device(user_id, device_id), + synapse.api.errors.NotFoundError, + ) + + # make sure that there's no device available for dehydrating now + ret = self.get_success(self.handler.get_dehydrated_device(user_id=user_id)) + + self.assertIsNone(ret) -- cgit 1.5.1 From ae5b2a72c09d67311c9830f5a6fae1decce03e1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:57 +0100 Subject: Reduce serialization errors in MultiWriterIdGen (#8456) We call `_update_stream_positions_table_txn` a lot, which is an UPSERT that can conflict in `REPEATABLE READ` isolation level. Instead of doing a transaction consisting of a single query we may as well run it outside of a transaction. --- changelog.d/8456.misc | 1 + synapse/storage/database.py | 69 ++++++++++++++++++++++++++++++++--- synapse/storage/engines/_base.py | 17 +++++++++ synapse/storage/engines/postgres.py | 10 ++++- synapse/storage/engines/sqlite.py | 10 +++++ synapse/storage/util/id_generators.py | 12 +++++- tests/storage/test_base.py | 1 + 7 files changed, 112 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8456.misc (limited to 'tests') diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc new file mode 100644 index 0000000000..ccd260069b --- /dev/null +++ b/changelog.d/8456.misc @@ -0,0 +1 @@ +Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0d9d9b7cc0..0ba3a025cf 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -463,6 +463,24 @@ class DatabasePool: *args: Any, **kwargs: Any ) -> R: + """Start a new database transaction with the given connection. + + Note: The given func may be called multiple times under certain + failure modes. This is normally fine when in a standard transaction, + but care must be taken if the connection is in `autocommit` mode that + the function will correctly handle being aborted and retried half way + through its execution. + + Args: + conn + desc + after_callbacks + exception_callbacks + func + *args + **kwargs + """ + start = monotonic_time() txn_id = self._TXN_ID @@ -566,7 +584,12 @@ class DatabasePool: sql_txn_timer.labels(desc).observe(duration) async def runInteraction( - self, desc: str, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + desc: str, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Starts a transaction on the database and runs a given function @@ -576,6 +599,18 @@ class DatabasePool: database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transactions + that are only a single query. + + Currently, this is only implemented for Postgres. SQLite will still + run the function inside a transaction. + + WARNING: This means that if func fails half way through then + the changes will *not* be rolled back. `func` may also get + called multiple times if the transaction is retried, so must + correctly handle that case. + args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -596,6 +631,7 @@ class DatabasePool: exception_callbacks, func, *args, + db_autocommit=db_autocommit, **kwargs ) @@ -609,7 +645,11 @@ class DatabasePool: return cast(R, result) async def runWithConnection( - self, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -618,6 +658,9 @@ class DatabasePool: database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. args: positional args to pass to `func` + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transaction + that are only a single query. Currently only affects postgres. kwargs: named args to pass to `func` Returns: @@ -633,6 +676,13 @@ class DatabasePool: start_time = monotonic_time() def inner_func(conn, *args, **kwargs): + # We shouldn't be in a transaction. If we are then something + # somewhere hasn't committed after doing work. (This is likely only + # possible during startup, as `run*` will ensure changes are + # committed/rolled back before putting the connection back in the + # pool). + assert not self.engine.in_transaction(conn) + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) @@ -642,10 +692,17 @@ class DatabasePool: logger.debug("Reconnecting closed database connection") conn.reconnect() - db_conn = LoggingDatabaseConnection( - conn, self.engine, "runWithConnection" - ) - return func(db_conn, *args, **kwargs) + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 908cbc79e3..d6d632dc10 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -97,3 +97,20 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta): """Gets a string giving the server version. For example: '3.22.0' """ ... + + @abc.abstractmethod + def in_transaction(self, conn: Connection) -> bool: + """Whether the connection is currently in a transaction. + """ + ... + + @abc.abstractmethod + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + """Attempt to set the connections autocommit mode. + + When True queries are run outside of transactions. + + Note: This has no effect on SQLite3, so callers still need to + commit/rollback the connections. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index ff39281f85..7719ac32f7 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,7 +15,8 @@ import logging -from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -119,6 +120,7 @@ class PostgresEngine(BaseDatabaseEngine): cursor.execute("SET synchronous_commit TO OFF") cursor.close() + db_conn.commit() @property def can_native_upsert(self): @@ -171,3 +173,9 @@ class PostgresEngine(BaseDatabaseEngine): return "%i.%i" % (numver / 10000, numver % 10000) else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + + def in_transaction(self, conn: Connection) -> bool: + return conn.status != self.module.extensions.STATUS_READY # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + return conn.set_session(autocommit=autocommit) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 8a0f8c89d1..5db0f0b520 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -17,6 +17,7 @@ import threading import typing from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.types import Connection if typing.TYPE_CHECKING: import sqlite3 # noqa: F401 @@ -86,6 +87,7 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + db_conn.commit() def is_deadlock(self, error): return False @@ -105,6 +107,14 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): """ return "%i.%i.%i" % self.module.sqlite_version_info + def in_transaction(self, conn: Connection) -> bool: + return conn.in_transaction # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + # Twisted doesn't let us set attributes on the connections, so we can't + # set the connection to autocommit mode. + pass + # Following functions taken from: https://github.com/coleifer/peewee diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 51f680d05d..d7e40aaa8b 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -24,6 +24,7 @@ from typing_extensions import Deque from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) @@ -548,7 +549,7 @@ class MultiWriterIdGenerator: # do. break - def _update_stream_positions_table_txn(self, txn): + def _update_stream_positions_table_txn(self, txn: Cursor): """Update the `stream_positions` table with newly persisted position. """ @@ -598,10 +599,13 @@ class _MultiWriterCtxManager: stream_ids = attr.ib(type=List[int], factory=list) async def __aenter__(self) -> Union[int, List[int]]: + # It's safe to run this in autocommit mode as fetching values from a + # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( "_load_next_mult_id", self.id_gen._load_next_mult_id_txn, self.multiple_ids or 1, + db_autocommit=True, ) # Assert the fetched ID is actually greater than any ID we've already @@ -632,10 +636,16 @@ class _MultiWriterCtxManager: # # We only do this on the success path so that the persisted current # position points to a persisted row with the correct instance name. + # + # We do this in autocommit mode as a) the upsert works correctly outside + # transactions and b) reduces the amount of time the rows are locked + # for. If we don't do this then we'll often hit serialization errors due + # to the fact we default to REPEATABLE READ isolation levels. if self.id_gen._writers: await self.id_gen._db.runInteraction( "MultiWriterIdGenerator._update_table", self.id_gen._update_stream_positions_table_txn, + db_autocommit=True, ) return False diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 40ba652248..eac7e4dcd2 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,6 +56,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False + fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db._db_pool = self.db_pool -- cgit 1.5.1 From a97cec18bba42e5cb743f61e79f253d6d95a0c0c Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 8 Oct 2020 13:24:46 -0400 Subject: Invalidate the cache when an olm fallback key is uploaded (#8501) --- changelog.d/8501.feature | 1 + synapse/storage/databases/main/end_to_end_keys.py | 4 ++++ tests/handlers/test_e2e_keys.py | 20 ++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/8501.feature (limited to 'tests') diff --git a/changelog.d/8501.feature b/changelog.d/8501.feature new file mode 100644 index 0000000000..5220ddd482 --- /dev/null +++ b/changelog.d/8501.feature @@ -0,0 +1 @@ +Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 359dc6e968..4415909414 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -398,6 +398,10 @@ class EndToEndKeyWorkerStore(SQLBaseStore): desc="set_e2e_fallback_key", ) + await self.invalidate_cache_and_stream( + "get_e2e_unused_fallback_key_types", (user_id, device_id) + ) + @cached(max_entries=10000) async def get_e2e_unused_fallback_key_types( self, user_id: str, device_id: str diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 4e9e3dcbc2..e79d612f7a 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -33,6 +33,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase): super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eKeysHandler + self.store = None # type: synapse.storage.Storage @defer.inlineCallbacks def setUp(self): @@ -40,6 +41,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase): self.addCleanup, handlers=None, federation_client=mock.Mock() ) self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs) + self.store = self.hs.get_datastore() @defer.inlineCallbacks def test_query_local_devices_no_devices(self): @@ -178,6 +180,12 @@ class E2eKeysHandlerTestCase(unittest.TestCase): fallback_key = {"alg1:k1": "key1"} otk = {"alg1:k2": "key2"} + # we shouldn't have any unused fallback keys yet + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, []) + yield defer.ensureDeferred( self.handler.upload_keys_for_user( local_user, @@ -186,6 +194,12 @@ class E2eKeysHandlerTestCase(unittest.TestCase): ) ) + # we should now have an unused alg1 key + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, ["alg1"]) + # claiming an OTK when no OTKs are available should return the fallback # key res = yield defer.ensureDeferred( @@ -198,6 +212,12 @@ class E2eKeysHandlerTestCase(unittest.TestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) + # we shouldn't have any unused fallback keys again + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, []) + # claiming an OTK again should return the same fallback key res = yield defer.ensureDeferred( self.handler.claim_one_time_keys( -- cgit 1.5.1 From c9c0ad5e204f309f2686dbe250382e481e0f82c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 07:24:34 -0400 Subject: Remove the deprecated Handlers object (#8494) All handlers now available via get_*_handler() methods on the HomeServer. --- changelog.d/8494.misc | 1 + synapse/app/admin_cmd.py | 2 +- synapse/federation/federation_server.py | 7 ++++- synapse/handlers/__init__.py | 33 ----------------------- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/message.py | 4 +-- synapse/handlers/pagination.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_member.py | 6 ++--- synapse/handlers/ui_auth/checkers.py | 2 +- synapse/replication/http/federation.py | 2 +- synapse/replication/http/membership.py | 2 +- synapse/rest/admin/rooms.py | 4 +-- synapse/rest/admin/users.py | 13 +++++---- synapse/rest/client/v1/directory.py | 25 +++++++++-------- synapse/rest/client/v1/login.py | 1 - synapse/rest/client/v1/room.py | 10 +++---- synapse/rest/client/v2_alpha/account.py | 16 +++++------ synapse/rest/client/v2_alpha/register.py | 6 ++--- synapse/server.py | 30 +++++++++++++++++---- tests/api/test_auth.py | 12 +++------ tests/api/test_filtering.py | 5 +--- tests/crypto/test_keyring.py | 6 ++--- tests/handlers/test_admin.py | 2 +- tests/handlers/test_auth.py | 11 ++------ tests/handlers/test_directory.py | 10 +++---- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/handlers/test_profile.py | 7 ----- tests/handlers/test_register.py | 22 ++++++--------- tests/replication/test_federation_sender_shard.py | 2 +- tests/rest/client/test_shadow_banned.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v1/test_rooms.py | 6 ++++- tests/rest/client/v1/test_typing.py | 2 +- tests/test_federation.py | 2 +- 40 files changed, 116 insertions(+), 157 deletions(-) create mode 100644 changelog.d/8494.misc (limited to 'tests') diff --git a/changelog.d/8494.misc b/changelog.d/8494.misc new file mode 100644 index 0000000000..6e56c6b854 --- /dev/null +++ b/changelog.d/8494.misc @@ -0,0 +1 @@ +Remove the deprecated `Handlers` object. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index f0d65d08d7..b4bd4d8e7a 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -89,7 +89,7 @@ async def export_data_command(hs, args): user_id = args.user_id directory = args.output_directory - res = await hs.get_handlers().admin_handler.export_user_data( + res = await hs.get_admin_handler().export_user_data( user_id, FileExfiltrationWriter(user_id, directory=directory) ) print(res) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 1c7ea886c9..e8039e244c 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,10 +99,15 @@ class FederationServer(FederationBase): super().__init__(hs) self.auth = hs.get_auth() - self.handler = hs.get_handlers().federation_handler + self.handler = hs.get_federation_handler() self.state = hs.get_state_handler() self.device_handler = hs.get_device_handler() + + # Ensure the following handlers are loaded since they register callbacks + # with FederationHandlerRegistry. + hs.get_directory_handler() + self._federation_ratelimiter = hs.get_federation_ratelimiter() self._server_linearizer = Linearizer("fed_server") diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index 286f0054be..bfebb0f644 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -12,36 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from .admin import AdminHandler -from .directory import DirectoryHandler -from .federation import FederationHandler -from .identity import IdentityHandler -from .search import SearchHandler - - -class Handlers: - - """ Deprecated. A collection of handlers. - - At some point most of the classes whose name ended "Handler" were - accessed through this class. - - However this makes it painful to unit test the handlers and to run cut - down versions of synapse that only use specific handlers because using a - single handler required creating all of the handlers. So some of the - handlers have been lifted out of the Handlers object and are now accessed - directly through the homeserver object itself. - - Any new handlers should follow the new pattern of being accessed through - the homeserver object and should not be added to the Handlers object. - - The remaining handlers should be moved out of the handlers object. - """ - - def __init__(self, hs): - self.federation_handler = FederationHandler(hs) - self.directory_handler = DirectoryHandler(hs) - self.admin_handler = AdminHandler(hs) - self.identity_handler = IdentityHandler(hs) - self.search_handler = SearchHandler(hs) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f6d17c53b1..1d1ddc2245 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1080,7 +1080,7 @@ class AuthHandler(BaseHandler): if medium == "email": address = canonicalise_email(address) - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() result = await identity_handler.try_unbind_threepid( user_id, {"medium": medium, "address": address, "id_server": id_server} ) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 72a5831531..58c9f12686 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -37,7 +37,7 @@ class DeactivateAccountHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() self._room_member_handler = hs.get_room_member_handler() - self._identity_handler = hs.get_handlers().identity_handler + self._identity_handler = hs.get_identity_handler() self.user_directory_handler = hs.get_user_directory_handler() # Flag that indicates whether the process to part users from rooms is running diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3e9a22e8f3..33d133a4b2 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1014,7 +1014,7 @@ class EventCreationHandler: # Check the alias is currently valid (if it has changed). room_alias_str = event.content.get("alias", None) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() if room_alias_str and room_alias_str != original_alias: await self._validate_canonical_alias( directory_handler, room_alias_str, event.room_id @@ -1040,7 +1040,7 @@ class EventCreationHandler: directory_handler, alias_str, event.room_id ) - federation_handler = self.hs.get_handlers().federation_handler + federation_handler = self.hs.get_federation_handler() if event.type == EventTypes.Member: if event.content["membership"] == Membership.INVITE: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 2c2a633938..085b685959 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -383,7 +383,7 @@ class PaginationHandler: "room_key", leave_token ) - await self.hs.get_handlers().federation_handler.maybe_backfill( + await self.hs.get_federation_handler().maybe_backfill( room_id, curr_topo, limit=pagin_config.limit, ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 538f4b2a61..a6f1d21674 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -48,7 +48,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.identity_handler = self.hs.get_handlers().identity_handler + self.identity_handler = self.hs.get_identity_handler() self.ratelimiter = hs.get_registration_ratelimiter() self.macaroon_gen = hs.get_macaroon_generator() self._server_notices_mxid = hs.config.server_notices_mxid diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d0530a446c..1d04d41e98 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -691,7 +691,7 @@ class RoomCreationHandler(BaseHandler): if not allowed_by_third_party_rules: raise SynapseError(403, "Room visibility value not allowed.") - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() if room_alias: await directory_handler.create_association( requester=requester, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index fd8114a64d..ffbc62ff44 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -64,9 +64,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.state_handler = hs.get_state_handler() self.config = hs.config - self.federation_handler = hs.get_handlers().federation_handler - self.directory_handler = hs.get_handlers().directory_handler - self.identity_handler = hs.get_handlers().identity_handler + self.federation_handler = hs.get_federation_handler() + self.directory_handler = hs.get_directory_handler() + self.identity_handler = hs.get_identity_handler() self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 9146dc1a3b..3d66bf305e 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -143,7 +143,7 @@ class _BaseThreepidAuthChecker: threepid_creds = authdict["threepid_creds"] - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 5393b9a9e7..b4f4a68b5c 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -62,7 +62,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): self.store = hs.get_datastore() self.storage = hs.get_storage() self.clock = hs.get_clock() - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() @staticmethod async def _serialize_payload(store, room_id, event_and_contexts, backfilled): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 30680baee8..e7cc74a5d2 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -47,7 +47,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): def __init__(self, hs): super().__init__(hs) - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 09726d52d6..f5304ff43d 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -138,7 +138,7 @@ class ListRoomRestServlet(RestServlet): def __init__(self, hs): self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) @@ -273,7 +273,7 @@ class JoinRoomAliasServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.room_member_handler = hs.get_room_member_handler() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.state_handler = hs.get_state_handler() async def on_POST(self, request, room_identifier): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 20dc1d0e05..8efefbc0a0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -45,7 +45,7 @@ class UsersRestServlet(RestServlet): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) @@ -82,7 +82,7 @@ class UsersRestServletV2(RestServlet): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request): await assert_requester_is_admin(self.auth, request) @@ -135,7 +135,7 @@ class UserRestServletV2(RestServlet): def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() @@ -448,7 +448,7 @@ class WhoisRestServlet(RestServlet): def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() - self.handlers = hs.get_handlers() + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) @@ -461,7 +461,7 @@ class WhoisRestServlet(RestServlet): if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only whois a local user") - ret = await self.handlers.admin_handler.get_whois(target_user) + ret = await self.admin_handler.get_whois(target_user) return 200, ret @@ -591,7 +591,6 @@ class SearchUsersRestServlet(RestServlet): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.handlers = hs.get_handlers() async def on_GET(self, request, target_user_id): """Get request to search user table for specific users according to @@ -612,7 +611,7 @@ class SearchUsersRestServlet(RestServlet): term = parse_string(request, "term", required=True) logger.info("term: %s ", term) - ret = await self.handlers.store.search_users(term) + ret = await self.store.search_users(term) return 200, ret diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index faabeeb91c..e5af26b176 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -42,14 +42,13 @@ class ClientDirectoryServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() async def on_GET(self, request, room_alias): room_alias = RoomAlias.from_string(room_alias) - dir_handler = self.handlers.directory_handler - res = await dir_handler.get_association(room_alias) + res = await self.directory_handler.get_association(room_alias) return 200, res @@ -79,19 +78,19 @@ class ClientDirectoryServer(RestServlet): requester = await self.auth.get_user_by_req(request) - await self.handlers.directory_handler.create_association( + await self.directory_handler.create_association( requester, room_alias, room_id, servers ) return 200, {} async def on_DELETE(self, request, room_alias): - dir_handler = self.handlers.directory_handler - try: service = self.auth.get_appservice_by_req(request) room_alias = RoomAlias.from_string(room_alias) - await dir_handler.delete_appservice_association(service, room_alias) + await self.directory_handler.delete_appservice_association( + service, room_alias + ) logger.info( "Application service at %s deleted alias %s", service.url, @@ -107,7 +106,7 @@ class ClientDirectoryServer(RestServlet): room_alias = RoomAlias.from_string(room_alias) - await dir_handler.delete_association(requester, room_alias) + await self.directory_handler.delete_association(requester, room_alias) logger.info( "User %s deleted alias %s", user.to_string(), room_alias.to_string() @@ -122,7 +121,7 @@ class ClientDirectoryListServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() async def on_GET(self, request, room_id): @@ -138,7 +137,7 @@ class ClientDirectoryListServer(RestServlet): content = parse_json_object_from_request(request) visibility = content.get("visibility", "public") - await self.handlers.directory_handler.edit_published_room_list( + await self.directory_handler.edit_published_room_list( requester, room_id, visibility ) @@ -147,7 +146,7 @@ class ClientDirectoryListServer(RestServlet): async def on_DELETE(self, request, room_id): requester = await self.auth.get_user_by_req(request) - await self.handlers.directory_handler.edit_published_room_list( + await self.directory_handler.edit_published_room_list( requester, room_id, "private" ) @@ -162,7 +161,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() def on_PUT(self, request, network_id, room_id): @@ -180,7 +179,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): 403, "Only appservices can edit the appservice published room list" ) - await self.handlers.directory_handler.edit_published_appservice_room_list( + await self.directory_handler.edit_published_appservice_room_list( requester.app_service.id, network_id, room_id, visibility ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 3d1693d7ac..d7deb9300d 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -67,7 +67,6 @@ class LoginRestServlet(RestServlet): self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() - self.handlers = hs.get_handlers() self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( clock=hs.get_clock(), diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index b63389e5fe..00b4397082 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -112,7 +112,6 @@ class RoomCreateRestServlet(TransactionRestServlet): class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): super().__init__(hs) - self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() @@ -798,7 +797,6 @@ class RoomMembershipRestServlet(TransactionRestServlet): class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): super().__init__(hs) - self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -903,7 +901,7 @@ class RoomAliasListServlet(RestServlet): def __init__(self, hs: "synapse.server.HomeServer"): super().__init__() self.auth = hs.get_auth() - self.directory_handler = hs.get_handlers().directory_handler + self.directory_handler = hs.get_directory_handler() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request) @@ -920,7 +918,7 @@ class SearchRestServlet(RestServlet): def __init__(self, hs): super().__init__() - self.handlers = hs.get_handlers() + self.search_handler = hs.get_search_handler() self.auth = hs.get_auth() async def on_POST(self, request): @@ -929,9 +927,7 @@ class SearchRestServlet(RestServlet): content = parse_json_object_from_request(request) batch = parse_string(request, "next_batch") - results = await self.handlers.search_handler.search( - requester.user, content, batch - ) + results = await self.search_handler.search(requester.user, content, batch) return 200, results diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ab5815e7f7..e857cff176 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -56,7 +56,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.hs = hs self.datastore = hs.get_datastore() self.config = hs.config - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( @@ -327,7 +327,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): super().__init__() self.hs = hs self.config = hs.config - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastore() if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: @@ -424,7 +424,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): self.hs = hs super().__init__() self.store = self.hs.get_datastore() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): body = parse_json_object_from_request(request) @@ -574,7 +574,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastore() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): if not self.config.account_threepid_delegate_msisdn: @@ -604,7 +604,7 @@ class ThreepidRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() self.datastore = self.hs.get_datastore() @@ -660,7 +660,7 @@ class ThreepidAddRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -711,7 +711,7 @@ class ThreepidBindRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() async def on_POST(self, request): @@ -740,7 +740,7 @@ class ThreepidUnbindRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.datastore = self.hs.get_datastore() diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index ffa2dfce42..395b6a82a9 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -78,7 +78,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): """ super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.config = hs.config if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: @@ -176,7 +176,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): """ super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): body = parse_json_object_from_request(request) @@ -370,7 +370,7 @@ class RegisterRestServlet(RestServlet): self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.room_member_handler = hs.get_room_member_handler() self.macaroon_gen = hs.get_macaroon_generator() self.ratelimiter = hs.get_registration_ratelimiter() diff --git a/synapse/server.py b/synapse/server.py index f83dd6148c..e793793cdc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -54,19 +54,22 @@ from synapse.federation.sender import FederationSender from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler -from synapse.handlers import Handlers from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler +from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator from synapse.handlers.cas_handler import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler +from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler +from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler +from synapse.handlers.identity import IdentityHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler @@ -84,6 +87,7 @@ from synapse.handlers.room import ( from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler +from synapse.handlers.search import SearchHandler from synapse.handlers.set_password import SetPasswordHandler from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler @@ -318,10 +322,6 @@ class HomeServer(metaclass=abc.ABCMeta): def get_federation_server(self) -> FederationServer: return FederationServer(self) - @cache_in_self - def get_handlers(self) -> Handlers: - return Handlers(self) - @cache_in_self def get_notifier(self) -> Notifier: return Notifier(self) @@ -408,6 +408,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_device_message_handler(self) -> DeviceMessageHandler: return DeviceMessageHandler(self) + @cache_in_self + def get_directory_handler(self) -> DirectoryHandler: + return DirectoryHandler(self) + @cache_in_self def get_e2e_keys_handler(self) -> E2eKeysHandler: return E2eKeysHandler(self) @@ -420,6 +424,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_acme_handler(self) -> AcmeHandler: return AcmeHandler(self) + @cache_in_self + def get_admin_handler(self) -> AdminHandler: + return AdminHandler(self) + @cache_in_self def get_application_service_api(self) -> ApplicationServiceApi: return ApplicationServiceApi(self) @@ -440,6 +448,14 @@ class HomeServer(metaclass=abc.ABCMeta): def get_event_stream_handler(self) -> EventStreamHandler: return EventStreamHandler(self) + @cache_in_self + def get_federation_handler(self) -> FederationHandler: + return FederationHandler(self) + + @cache_in_self + def get_identity_handler(self) -> IdentityHandler: + return IdentityHandler(self) + @cache_in_self def get_initial_sync_handler(self) -> InitialSyncHandler: return InitialSyncHandler(self) @@ -459,6 +475,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_deactivate_account_handler(self) -> DeactivateAccountHandler: return DeactivateAccountHandler(self) + @cache_in_self + def get_search_handler(self) -> SearchHandler: + return SearchHandler(self) + @cache_in_self def get_set_password_handler(self) -> SetPasswordHandler: return SetPasswordHandler(self) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 8ab56ec94c..cb6f29d670 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -19,7 +19,6 @@ import pymacaroons from twisted.internet import defer -import synapse.handlers.auth from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import ( @@ -36,20 +35,15 @@ from tests import unittest from tests.utils import mock_getRawHeaders, setup_test_homeserver -class TestHandlers: - def __init__(self, hs): - self.auth_handler = synapse.handlers.auth.AuthHandler(hs) - - class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.state_handler = Mock() self.store = Mock() - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) + self.hs = yield setup_test_homeserver(self.addCleanup) self.hs.get_datastore = Mock(return_value=self.store) - self.hs.handlers = TestHandlers(self.hs) + self.hs.get_auth_handler().store = self.store self.auth = Auth(self.hs) # AuthBlocking reads from the hs' config on initialization. We need to @@ -283,7 +277,7 @@ class AuthTestCase(unittest.TestCase): self.store.get_device = Mock(return_value=defer.succeed(None)) token = yield defer.ensureDeferred( - self.hs.handlers.auth_handler.get_access_token_for_user_id( + self.hs.get_auth_handler().get_access_token_for_user_id( USER_ID, "DEVICE", valid_until_ms=None ) ) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index d2d535d23c..c98ae75974 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -50,10 +50,7 @@ class FilteringTestCase(unittest.TestCase): self.mock_http_client.put_json = DeferredMockCallable() hs = yield setup_test_homeserver( - self.addCleanup, - handlers=None, - http_client=self.mock_http_client, - keyring=Mock(), + self.addCleanup, http_client=self.mock_http_client, keyring=Mock(), ) self.filtering = hs.get_filtering() diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8ff1460c0d..697916a019 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -315,7 +315,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + hs = self.setup_test_homeserver(http_client=self.http_client) return hs def test_get_keys_from_server(self): @@ -395,9 +395,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): } ] - return self.setup_test_homeserver( - handlers=None, http_client=self.http_client, config=config - ) + return self.setup_test_homeserver(http_client=self.http_client, config=config) def build_perspectives_response( self, server_name: str, signing_key: SigningKey, valid_until_ts: int, diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index fc37c4328c..5c2b4de1a6 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -35,7 +35,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.user1 = self.register_user("user1", "password") self.token1 = self.login("user1", "password") diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 97877c2e42..b5055e018c 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -21,24 +21,17 @@ from twisted.internet import defer import synapse import synapse.api.errors from synapse.api.errors import ResourceLimitError -from synapse.handlers.auth import AuthHandler from tests import unittest from tests.test_utils import make_awaitable from tests.utils import setup_test_homeserver -class AuthHandlers: - def __init__(self, hs): - self.auth_handler = AuthHandler(hs) - - class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) - self.hs.handlers = AuthHandlers(self.hs) - self.auth_handler = self.hs.handlers.auth_handler + self.hs = yield setup_test_homeserver(self.addCleanup) + self.auth_handler = self.hs.get_auth_handler() self.macaroon_generator = self.hs.get_macaroon_generator() # MAU tests diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index bc0c5aefdc..2ce6dc9528 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -48,7 +48,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): federation_registry=self.mock_registry, ) - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.store = hs.get_datastore() @@ -110,7 +110,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() # Create user self.admin_user = self.register_user("admin", "pass", admin=True) @@ -173,7 +173,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() # Create user @@ -289,7 +289,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() # Create user @@ -442,7 +442,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.assertEquals(200, channel.code, channel.result) self.room_list_handler = hs.get_room_list_handler() - self.directory_handler = hs.get_handlers().directory_handler + self.directory_handler = hs.get_directory_handler() return hs diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index e79d612f7a..924f29f051 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -38,7 +38,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( - self.addCleanup, handlers=None, federation_client=mock.Mock() + self.addCleanup, federation_client=mock.Mock() ) self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs) self.store = self.hs.get_datastore() diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 7adde9b9de..45f201a399 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -54,7 +54,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( - self.addCleanup, handlers=None, replication_layer=mock.Mock() + self.addCleanup, replication_layer=mock.Mock() ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) self.local_user = "@boris:" + self.hs.hostname diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 96fea58673..9ef80fe502 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -38,7 +38,7 @@ class FederationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(http_client=None) - self.handler = hs.get_handlers().federation_handler + self.handler = hs.get_federation_handler() self.store = hs.get_datastore() return hs diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 306dcfe944..914c82e7a8 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -470,7 +470,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.federation_sender = hs.get_federation_sender() self.event_builder_factory = hs.get_event_builder_factory() - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() self.presence_handler = hs.get_presence_handler() # self.event_builder_for_2 = EventBuilderFactory(hs) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 8e95e53d9e..a69fa28b41 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -20,7 +20,6 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import AuthError, SynapseError -from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID from tests import unittest @@ -28,11 +27,6 @@ from tests.test_utils import make_awaitable from tests.utils import setup_test_homeserver -class ProfileHandlers: - def __init__(self, hs): - self.profile_handler = MasterProfileHandler(hs) - - class ProfileTestCase(unittest.TestCase): """ Tests profile management. """ @@ -51,7 +45,6 @@ class ProfileTestCase(unittest.TestCase): hs = yield setup_test_homeserver( self.addCleanup, http_client=None, - handlers=None, resource_for_federation=Mock(), federation_client=self.mock_federation, federation_server=Mock(), diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 702c6aa089..bdf3d0a8a2 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -18,7 +18,6 @@ from mock import Mock from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import Codes, ResourceLimitError, SynapseError -from synapse.handlers.register import RegistrationHandler from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, UserID, create_requester @@ -29,11 +28,6 @@ from tests.utils import mock_getRawHeaders from .. import unittest -class RegistrationHandlers: - def __init__(self, hs): - self.registration_handler = RegistrationHandler(hs) - - class RegistrationTestCase(unittest.HomeserverTestCase): """ Tests the RegistrationHandler. """ @@ -154,7 +148,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): room_alias_str = "#room:test" user_id = self.get_success(self.handler.register_user(localpart="jeff")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -193,7 +187,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = self.get_success(self.handler.register_user(localpart="support")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) self.get_failure(directory_handler.get_association(room_alias), SynapseError) @@ -205,7 +199,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -237,7 +231,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -266,7 +260,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -304,7 +298,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -347,7 +341,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): ) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -384,7 +378,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 1d7edee5ba..9c4a9c3563 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -207,7 +207,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): def create_room_with_remote_server(self, user, token, remote_server="other_server"): room = self.helper.create_room_as(user, tok=token) store = self.hs.get_datastore() - federation = self.hs.get_handlers().federation_handler + federation = self.hs.get_federation_handler() prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room)) room_version = self.get_success(store.get_room_version(room)) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index dfe4bf7762..6bb02b9630 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -78,7 +78,7 @@ class RoomTestCase(_ShadowBannedBase): def test_invite_3pid(self): """Ensure that a 3PID invite does not attempt to contact the identity server.""" - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() identity_handler.lookup_3pid = Mock( side_effect=AssertionError("This should not get called") ) diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index f75520877f..3397ba5579 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -42,7 +42,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver(config=config) - hs.get_handlers().federation_handler = Mock() + hs.get_federation_handler = Mock() return hs diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 0d809d25d5..9ba5f9d943 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -32,6 +32,7 @@ from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string from tests import unittest +from tests.test_utils import make_awaitable PATH_PREFIX = b"/_matrix/client/api/v1" @@ -47,7 +48,10 @@ class RoomBase(unittest.HomeserverTestCase): "red", http_client=None, federation_client=Mock(), ) - self.hs.get_federation_handler = Mock(return_value=Mock()) + self.hs.get_federation_handler = Mock() + self.hs.get_federation_handler.return_value.maybe_backfill = Mock( + return_value=make_awaitable(None) + ) async def _insert_client_ip(*args, **kwargs): return None diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 94d2bf2eb1..cd58ee7792 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -44,7 +44,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): self.event_source = hs.get_event_sources().sources["typing"] - hs.get_handlers().federation_handler = Mock() + hs.get_federation_handler = Mock() async def get_user_by_access_token(token=None, allow_guest=False): return { diff --git a/tests/test_federation.py b/tests/test_federation.py index 27a7fc9ed7..d39e792580 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -75,7 +75,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): } ) - self.handler = self.homeserver.get_handlers().federation_handler + self.handler = self.homeserver.get_federation_handler() self.handler.do_auth = lambda origin, event, context, auth_events: succeed( context ) -- cgit 1.5.1 From 66ac4b1e34193f5aadfbe1966427093fde09befb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 9 Oct 2020 13:46:36 +0100 Subject: Allow modules to create and send events into rooms (#8479) This PR allows Synapse modules making use of the `ModuleApi` to create and send non-membership events into a room. This can useful to have modules send messages, or change power levels in a room etc. Note that they must send event through a user that's already in the room. The non-membership event limitation is currently arbitrary, as it's another chunk of work and not necessary at the moment. --- changelog.d/8479.feature | 1 + synapse/handlers/message.py | 11 ++-- synapse/module_api/__init__.py | 30 ++++++++- tests/module_api/test_api.py | 95 +++++++++++++++++++++++++++++ tests/rest/client/test_third_party_rules.py | 28 ++++++++- 5 files changed, 157 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8479.feature (limited to 'tests') diff --git a/changelog.d/8479.feature b/changelog.d/8479.feature new file mode 100644 index 0000000000..11adeec8a9 --- /dev/null +++ b/changelog.d/8479.feature @@ -0,0 +1 @@ +Add the ability to send non-membership events into a room via the `ModuleApi`. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 33d133a4b2..ad0b7bd868 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -59,6 +59,7 @@ from synapse.visibility import filter_events_for_client from ._base import BaseHandler if TYPE_CHECKING: + from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -393,7 +394,9 @@ class EventCreationHandler: self.action_generator = hs.get_action_generator() self.spam_checker = hs.get_spam_checker() - self.third_party_event_rules = hs.get_third_party_event_rules() + self.third_party_event_rules = ( + self.hs.get_third_party_event_rules() + ) # type: ThirdPartyEventRules self._block_events_without_consent_error = ( self.config.block_events_without_consent_error @@ -1229,11 +1232,7 @@ class EventCreationHandler: # Since this is a dummy-event it is OK if it is sent by a # shadow-banned user. await self.handle_new_client_event( - requester=requester, - event=event, - context=context, - ratelimit=False, - ignore_shadow_ban=True, + requester, event, context, ratelimit=False, ignore_shadow_ban=True, ) return True except ConsentNotGivenError: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index b410e3ad9c..0142542852 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -18,11 +18,12 @@ from typing import TYPE_CHECKING, Iterable, Optional, Tuple from twisted.internet import defer +from synapse.events import EventBase from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.state import StateFilter -from synapse.types import UserID +from synapse.types import JsonDict, UserID, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -320,6 +321,33 @@ class ModuleApi: state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) return state.values() + async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase: + """Create and send an event into a room. Membership events are currently not supported. + + Args: + event_dict: A dictionary representing the event to send. + Required keys are `type`, `room_id`, `sender` and `content`. + + Returns: + The event that was sent. If state event deduplication happened, then + the previous, duplicate event instead. + + Raises: + SynapseError if the event was not allowed. + """ + # Create a requester object + requester = create_requester(event_dict["sender"]) + + # Create and send the event + ( + event, + _, + ) = await self._hs.get_event_creation_handler().create_and_send_nonmember_event( + requester, event_dict, ratelimit=False, ignore_shadow_ban=True, + ) + + return event + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 7c790bee7d..9b573ac24d 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,9 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock +from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import create_requester from tests.unittest import HomeserverTestCase @@ -29,6 +32,7 @@ class ModuleApiTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = homeserver.get_module_api() + self.event_creation_handler = homeserver.get_event_creation_handler() def test_can_register_user(self): """Tests that an external module can register a user""" @@ -60,6 +64,97 @@ class ModuleApiTestCase(HomeserverTestCase): displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + def test_sending_events_into_room(self): + """Tests that a module can send events into a room""" + # Mock out create_and_send_nonmember_event to check whether events are being sent + self.event_creation_handler.create_and_send_nonmember_event = Mock( + spec=[], + side_effect=self.event_creation_handler.create_and_send_nonmember_event, + ) + + # Create a user and room to play with + user_id = self.register_user("summer", "monkey") + tok = self.login("summer", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # Create and send a non-state event + content = {"body": "I am a puppet", "msgtype": "m.text"} + event_dict = { + "room_id": room_id, + "type": "m.room.message", + "content": content, + "sender": user_id, + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.message") + self.assertEqual(event.room_id, room_id) + self.assertFalse(hasattr(event, "state_key")) + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), + event_dict, + ratelimit=False, + ignore_shadow_ban=True, + ) + + # Create and send a state event + content = { + "events_default": 0, + "users": {user_id: 100}, + "state_default": 50, + "users_default": 0, + "events": {"test.event.type": 25}, + } + event_dict = { + "room_id": room_id, + "type": "m.room.power_levels", + "content": content, + "sender": user_id, + "state_key": "", + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.power_levels") + self.assertEqual(event.room_id, room_id) + self.assertEqual(event.state_key, "") + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), + { + "type": "m.room.power_levels", + "content": content, + "room_id": room_id, + "sender": user_id, + "state_key": "", + }, + ratelimit=False, + ignore_shadow_ban=True, + ) + + # Check that we can't send membership events + content = { + "membership": "leave", + } + event_dict = { + "room_id": room_id, + "type": "m.room.member", + "content": content, + "sender": user_id, + "state_key": user_id, + } + self.get_failure( + self.module_api.create_and_send_event_into_room(event_dict), Exception + ) + def test_public_rooms(self): """Tests that a room can be added and removed from the public rooms list, as well as have its public rooms directory state queried. diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index c12518c931..d03e121664 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import threading +from typing import Dict from mock import Mock from synapse.events import EventBase +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client.v1 import login, room from synapse.types import Requester, StateMap @@ -27,10 +29,11 @@ thread_local = threading.local() class ThirdPartyRulesTestModule: - def __init__(self, config, module_api): + def __init__(self, config: Dict, module_api: ModuleApi): # keep a record of the "current" rules module, so that the test can patch # it if desired. thread_local.rules_module = self + self.module_api = module_api async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool @@ -142,3 +145,26 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"200", channel.result) ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") + + def test_send_event(self): + """Tests that the module can send an event into a room via the module api""" + content = { + "msgtype": "m.text", + "body": "Hello!", + } + event_dict = { + "room_id": self.room_id, + "type": "m.room.message", + "content": content, + "sender": self.user_id, + } + event = self.get_success( + current_rules_module().module_api.create_and_send_event_into_room( + event_dict + ) + ) # type: EventBase + + self.assertEquals(event.sender, self.user_id) + self.assertEquals(event.room_id, self.room_id) + self.assertEquals(event.type, "m.room.message") + self.assertEquals(event.content, content) -- cgit 1.5.1 From 9789b1fba541a5ae01b946770416729e5b7e5b7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 9 Oct 2020 17:22:25 +0100 Subject: Fix threadsafety in ThreadedMemoryReactorClock (#8497) This could, very occasionally, cause: ``` tests.test_visibility.FilterEventsForServerTestCase.test_large_room =============================================================================== [ERROR] Traceback (most recent call last): File "/src/tests/rest/media/v1/test_media_storage.py", line 86, in test_ensure_media_is_in_local_cache self.wait_on_thread(x) File "/src/tests/unittest.py", line 296, in wait_on_thread self.reactor.advance(0.01) File "/src/.tox/py35/lib/python3.5/site-packages/twisted/internet/task.py", line 826, in advance self._sortCalls() File "/src/.tox/py35/lib/python3.5/site-packages/twisted/internet/task.py", line 787, in _sortCalls self.calls.sort(key=lambda a: a.getTime()) builtins.ValueError: list modified during sort tests.rest.media.v1.test_media_storage.MediaStorageTests.test_ensure_media_is_in_local_cache ``` --- changelog.d/8497.misc | 1 + tests/server.py | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8497.misc (limited to 'tests') diff --git a/changelog.d/8497.misc b/changelog.d/8497.misc new file mode 100644 index 0000000000..8bc05e8df6 --- /dev/null +++ b/changelog.d/8497.misc @@ -0,0 +1 @@ +Fix a threadsafety bug in unit tests. diff --git a/tests/server.py b/tests/server.py index f7f5276b21..422c8b42ca 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1,8 +1,11 @@ import json import logging +from collections import deque from io import SEEK_END, BytesIO +from typing import Callable import attr +from typing_extensions import Deque from zope.interface import implementer from twisted.internet import address, threads, udp @@ -251,6 +254,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): self._tcp_callbacks = {} self._udp = [] lookups = self.lookups = {} + self._thread_callbacks = deque() # type: Deque[Callable[[], None]]() @implementer(IResolverSimple) class FakeResolver: @@ -272,10 +276,10 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): """ Make the callback fire in the next reactor iteration. """ - d = Deferred() - d.addCallback(lambda x: callback(*args, **kwargs)) - self.callLater(0, d.callback, True) - return d + cb = lambda: callback(*args, **kwargs) + # it's not safe to call callLater() here, so we append the callback to a + # separate queue. + self._thread_callbacks.append(cb) def getThreadPool(self): return self.threadpool @@ -303,6 +307,30 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): return conn + def advance(self, amount): + # first advance our reactor's time, and run any "callLater" callbacks that + # makes ready + super().advance(amount) + + # now run any "callFromThread" callbacks + while True: + try: + callback = self._thread_callbacks.popleft() + except IndexError: + break + callback() + + # check for more "callLater" callbacks added by the thread callback + # This isn't required in a regular reactor, but it ends up meaning that + # our database queries can complete in a single call to `advance` [1] which + # simplifies tests. + # + # [1]: we replace the threadpool backing the db connection pool with a + # mock ThreadPool which doesn't really use threads; but we still use + # reactor.callFromThread to feed results back from the db functions to the + # main thread. + super().advance(0) + class ThreadPool: """ -- cgit 1.5.1 From d35a451399d5bb15ba0b452c26719474371298d7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 14:19:29 -0400 Subject: Clean-up some broken/unused code in the test framework (#8514) --- changelog.d/8514.misc | 1 + tests/server.py | 2 - tests/utils.py | 122 ++++++++++++++++++++++---------------------------- 3 files changed, 55 insertions(+), 70 deletions(-) create mode 100644 changelog.d/8514.misc (limited to 'tests') diff --git a/changelog.d/8514.misc b/changelog.d/8514.misc new file mode 100644 index 0000000000..0e7ac4f220 --- /dev/null +++ b/changelog.d/8514.misc @@ -0,0 +1 @@ +Remove unused code from the test framework. diff --git a/tests/server.py b/tests/server.py index 422c8b42ca..4d33b84097 100644 --- a/tests/server.py +++ b/tests/server.py @@ -367,8 +367,6 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): """ server = _sth(cleanup_func, *args, **kwargs) - database = server.config.database.get_single_database() - # Make the thread pool synchronous. clock = server.get_clock() diff --git a/tests/utils.py b/tests/utils.py index af563ffe0f..0c09f5457f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -192,7 +192,6 @@ class TestHomeServer(HomeServer): def setup_test_homeserver( cleanup_func, name="test", - datastore=None, config=None, reactor=None, homeserverToUse=TestHomeServer, @@ -249,7 +248,7 @@ def setup_test_homeserver( # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() - if datastore is None and isinstance(db_engine, PostgresEngine): + if isinstance(db_engine, PostgresEngine): db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER, @@ -265,79 +264,66 @@ def setup_test_homeserver( cur.close() db_conn.close() - if datastore is None: - hs = homeserverToUse( - name, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs - ) + hs = homeserverToUse( + name, + config=config, + version_string="Synapse/tests", + tls_server_context_factory=Mock(), + tls_client_options_factory=Mock(), + reactor=reactor, + **kargs + ) - hs.setup() - if homeserverToUse.__name__ == "TestHomeServer": - hs.setup_background_tasks() + hs.setup() + if homeserverToUse.__name__ == "TestHomeServer": + hs.setup_background_tasks() - if isinstance(db_engine, PostgresEngine): - database = hs.get_datastores().databases[0] + if isinstance(db_engine, PostgresEngine): + database = hs.get_datastores().databases[0] - # We need to do cleanup on PostgreSQL - def cleanup(): - import psycopg2 + # We need to do cleanup on PostgreSQL + def cleanup(): + import psycopg2 - # Close all the db pools - database._db_pool.close() + # Close all the db pools + database._db_pool.close() - dropped = False + dropped = False - # Drop the test database - db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, - user=POSTGRES_USER, - host=POSTGRES_HOST, - password=POSTGRES_PASSWORD, - ) - db_conn.autocommit = True - cur = db_conn.cursor() - - # Try a few times to drop the DB. Some things may hold on to the - # database for a few more seconds due to flakiness, preventing - # us from dropping it when the test is over. If we can't drop - # it, warn and move on. - for x in range(5): - try: - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() - dropped = True - except psycopg2.OperationalError as e: - warnings.warn( - "Couldn't drop old db: " + str(e), category=UserWarning - ) - time.sleep(0.5) - - cur.close() - db_conn.close() - - if not dropped: - warnings.warn("Failed to drop old DB.", category=UserWarning) - - if not LEAVE_DB: - # Register the cleanup hook - cleanup_func(cleanup) + # Drop the test database + db_conn = db_engine.module.connect( + database=POSTGRES_BASE_DB, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, + ) + db_conn.autocommit = True + cur = db_conn.cursor() - else: - hs = homeserverToUse( - name, - datastore=datastore, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs - ) + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for x in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + + cur.close() + db_conn.close() + + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + + if not LEAVE_DB: + # Register the cleanup hook + cleanup_func(cleanup) # bcrypt is far too slow to be doing in unit tests # Need to let the HS build an auth handler and then mess with it -- cgit 1.5.1 From 8de3703d214c814ad637793a0cc2220e20579ffa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Oct 2020 15:51:41 +0100 Subject: Make event persisters periodically announce position over replication. (#8499) Currently background proccesses stream the events stream use the "minimum persisted position" (i.e. `get_current_token()`) rather than the vector clock style tokens. This is broadly fine as it doesn't matter if the background processes lag a small amount. However, in extreme cases (i.e. SyTests) where we only write to one event persister the background processes will never make progress. This PR changes it so that the `MultiWriterIDGenerator` keeps the current position of a given instance as up to date as possible (i.e using the latest token it sees if its not in the process of persisting anything), and then periodically announces that over replication. This then allows the "minimum persisted position" to advance, albeit with a small lag. --- changelog.d/8499.misc | 1 + docs/tcp_replication.md | 13 +++++++--- synapse/replication/tcp/client.py | 4 +++ synapse/replication/tcp/commands.py | 36 +++++++++++++++++++-------- synapse/replication/tcp/handler.py | 24 ++++++++++-------- synapse/replication/tcp/resource.py | 47 ++++++++++++++++++++++++++++++++++- synapse/storage/util/id_generators.py | 10 ++++++++ synapse/storage/util/sequence.py | 2 ++ tests/storage/test_id_generators.py | 25 ++++++++++++------- 9 files changed, 128 insertions(+), 34 deletions(-) create mode 100644 changelog.d/8499.misc (limited to 'tests') diff --git a/changelog.d/8499.misc b/changelog.d/8499.misc new file mode 100644 index 0000000000..237cb3b311 --- /dev/null +++ b/changelog.d/8499.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index db318baa9d..ad145439b4 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -15,7 +15,7 @@ example flow would be (where '>' indicates master to worker and > SERVER example.com < REPLICATE - > POSITION events master 53 + > POSITION events master 53 53 > RDATA events master 54 ["$foo1:bar.com", ...] > RDATA events master 55 ["$foo4:bar.com", ...] @@ -138,9 +138,9 @@ the wire: < NAME synapse.app.appservice < PING 1490197665618 < REPLICATE - > POSITION events master 1 - > POSITION backfill master 1 - > POSITION caches master 1 + > POSITION events master 1 1 + > POSITION backfill master 1 1 + > POSITION caches master 1 1 > RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513] > RDATA events master 14 ["$149019767112vOHxz:localhost:8823", "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null] @@ -185,6 +185,11 @@ client (C): updates via HTTP API, rather than via the DB, then processes should make the request to the appropriate process. + Two positions are included, the "new" position and the last position sent respectively. + This allows servers to tell instances that the positions have advanced but no + data has been written, without clients needlessly checking to see if they + have missed any updates. + #### ERROR (S, C) There was an error diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e165429cad..e27ee216f0 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -191,6 +191,10 @@ class ReplicationDataHandler: async def on_position(self, stream_name: str, instance_name: str, token: int): self.store.process_replication_rows(stream_name, instance_name, token, []) + # We poke the generic "replication" notifier to wake anything up that + # may be streaming. + self.notifier.notify_replication() + def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 8cd47770c1..ac532ed588 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -141,15 +141,23 @@ class RdataCommand(Command): class PositionCommand(Command): - """Sent by the server to tell the client the stream position without - needing to send an RDATA. + """Sent by an instance to tell others the stream position without needing to + send an RDATA. + + Two tokens are sent, the new position and the last position sent by the + instance (in an RDATA or other POSITION). The tokens are chosen so that *no* + rows were written by the instance between the `prev_token` and `new_token`. + (If an instance hasn't sent a position before then the new position can be + used for both.) Format:: - POSITION + POSITION - On receipt of a POSITION command clients should check if they have missed - any updates, and if so then fetch them out of band. + On receipt of a POSITION command instances should check if they have missed + any updates, and if so then fetch them out of band. Instances can check this + by comparing their view of the current token for the sending instance with + the included `prev_token`. The `` is the process that sent the command and is the source of the stream. @@ -157,18 +165,26 @@ class PositionCommand(Command): NAME = "POSITION" - def __init__(self, stream_name, instance_name, token): + def __init__(self, stream_name, instance_name, prev_token, new_token): self.stream_name = stream_name self.instance_name = instance_name - self.token = token + self.prev_token = prev_token + self.new_token = new_token @classmethod def from_line(cls, line): - stream_name, instance_name, token = line.split(" ", 2) - return cls(stream_name, instance_name, int(token)) + stream_name, instance_name, prev_token, new_token = line.split(" ", 3) + return cls(stream_name, instance_name, int(prev_token), int(new_token)) def to_line(self): - return " ".join((self.stream_name, self.instance_name, str(self.token))) + return " ".join( + ( + self.stream_name, + self.instance_name, + str(self.prev_token), + str(self.new_token), + ) + ) class ErrorCommand(_SimpleCommand): diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index e92da7b263..95e5502bf2 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -101,8 +101,9 @@ class ReplicationCommandHandler: self._streams_to_replicate = [] # type: List[Stream] for stream in self._streams.values(): - if stream.NAME == CachesStream.NAME: - # All workers can write to the cache invalidation stream. + if hs.config.redis.redis_enabled and stream.NAME == CachesStream.NAME: + # All workers can write to the cache invalidation stream when + # using redis. self._streams_to_replicate.append(stream) continue @@ -313,11 +314,14 @@ class ReplicationCommandHandler: # We respond with current position of all streams this instance # replicates. for stream in self.get_streams_to_replicate(): + # Note that we use the current token as the prev token here (rather + # than stream.last_token), as we can't be sure that there have been + # no rows written between last token and the current token (since we + # might be racing with the replication sending bg process). + current_token = stream.current_token(self._instance_name) self.send_command( PositionCommand( - stream.NAME, - self._instance_name, - stream.current_token(self._instance_name), + stream.NAME, self._instance_name, current_token, current_token, ) ) @@ -511,16 +515,16 @@ class ReplicationCommandHandler: # If the position token matches our current token then we're up to # date and there's nothing to do. Otherwise, fetch all updates # between then and now. - missing_updates = cmd.token != current_token + missing_updates = cmd.prev_token != current_token while missing_updates: logger.info( "Fetching replication rows for '%s' between %i and %i", stream_name, current_token, - cmd.token, + cmd.new_token, ) (updates, current_token, missing_updates) = await stream.get_updates_since( - cmd.instance_name, current_token, cmd.token + cmd.instance_name, current_token, cmd.new_token ) # TODO: add some tests for this @@ -536,11 +540,11 @@ class ReplicationCommandHandler: [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.token) + logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( - cmd.stream_name, cmd.instance_name, cmd.token + cmd.stream_name, cmd.instance_name, cmd.new_token ) self._streams_by_connection.setdefault(conn, set()).add(stream_name) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 687984e7a8..666c13fdb7 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -23,7 +23,9 @@ from prometheus_client import Counter from twisted.internet.protocol import Factory from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol +from synapse.replication.tcp.streams import EventsStream from synapse.util.metrics import Measure stream_updates_counter = Counter( @@ -84,6 +86,23 @@ class ReplicationStreamer: # Set of streams to replicate. self.streams = self.command_handler.get_streams_to_replicate() + # If we have streams then we must have redis enabled or on master + assert ( + not self.streams + or hs.config.redis.redis_enabled + or not hs.config.worker.worker_app + ) + + # If we are replicating an event stream we want to periodically check if + # we should send updated POSITIONs. We do this as a looping call rather + # explicitly poking when the position advances (without new data to + # replicate) to reduce replication traffic (otherwise each writer would + # likely send a POSITION for each new event received over replication). + # + # Note that if the position hasn't advanced then we won't send anything. + if any(EventsStream.NAME == s.NAME for s in self.streams): + self.clock.looping_call(self.on_notifier_poke, 1000) + def on_notifier_poke(self): """Checks if there is actually any new data and sends it to the connections if there are. @@ -91,7 +110,7 @@ class ReplicationStreamer: This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed """ - if not self.command_handler.connected(): + if not self.command_handler.connected() or not self.streams: # Don't bother if nothing is listening. We still need to advance # the stream tokens otherwise they'll fall behind forever for stream in self.streams: @@ -136,6 +155,8 @@ class ReplicationStreamer: self._replication_torture_level / 1000.0 ) + last_token = stream.last_token + logger.debug( "Getting stream: %s: %s -> %s", stream.NAME, @@ -159,6 +180,30 @@ class ReplicationStreamer: ) stream_updates_counter.labels(stream.NAME).inc(len(updates)) + else: + # The token has advanced but there is no data to + # send, so we send a `POSITION` to inform other + # workers of the updated position. + if stream.NAME == EventsStream.NAME: + # XXX: We only do this for the EventStream as it + # turns out that e.g. account data streams share + # their "current token" with each other, meaning + # that it is *not* safe to send a POSITION. + logger.info( + "Sending position: %s -> %s", + stream.NAME, + current_token, + ) + self.command_handler.send_command( + PositionCommand( + stream.NAME, + self._instance_name, + last_token, + current_token, + ) + ) + continue + # Some streams return multiple rows with the same stream IDs, # we need to make sure they get sent out in batches. We do # this by setting the current token to all but the last of diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d7e40aaa8b..3d8da48f2d 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -524,6 +524,16 @@ class MultiWriterIdGenerator: heapq.heappush(self._known_persisted_positions, new_id) + # If we're a writer and we don't have any active writes we update our + # current position to the latest position seen. This allows the instance + # to report a recent position when asked, rather than a potentially old + # one (if this instance hasn't written anything for a while). + our_current_position = self._current_positions.get(self._instance_name) + if our_current_position and not self._unfinished_ids: + self._current_positions[self._instance_name] = max( + our_current_position, new_id + ) + # We move the current min position up if the minimum current positions # of all instances is higher (since by definition all positions less # that that have been persisted). diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index ff2d038ad2..4386b6101e 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -126,6 +126,8 @@ class PostgresSequenceGenerator(SequenceGenerator): if max_stream_id > last_value: logger.warning( "Postgres sequence %s is behind table %s: %d < %d", + self._sequence_name, + table, last_value, max_stream_id, ) diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 392b08832b..cc0612cf65 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -199,10 +199,17 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) - self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) + # The first ID gen will notice that it can advance its token to 7 as it + # has no in progress writes... + self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 7}) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) + # ... but the second ID gen doesn't know that. + self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. @@ -211,7 +218,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(stream_id, 8) self.assertEqual( - first_id_gen.get_positions(), {"first": 3, "second": 7} + first_id_gen.get_positions(), {"first": 7, "second": 7} ) self.get_success(_get_next_async()) @@ -279,7 +286,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first", writers=["first", "second"]) + id_gen = self._create_id_generator("worker", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -319,14 +326,14 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): id_gen = self._create_id_generator("first", writers=["first", "second"]) - self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) + self.assertEqual(id_gen.get_positions(), {"first": 5, "second": 5}) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_persisted_upto_position(), 5) async def _get_next_async(): async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 6) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_persisted_upto_position(), 5) self.get_success(_get_next_async()) @@ -388,7 +395,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_row_with_id("second", 5) # Initial config has two writers - id_gen = self._create_id_generator("first", writers=["first", "second"]) + id_gen = self._create_id_generator("worker", writers=["first", "second"]) self.assertEqual(id_gen.get_persisted_upto_position(), 3) self.assertEqual(id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(id_gen.get_current_token_for_writer("second"), 5) @@ -568,7 +575,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(_get_next_async2()) - self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -2}) + self.assertEqual(id_gen_1.get_positions(), {"first": -2, "second": -2}) self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -2) self.assertEqual(id_gen_2.get_persisted_upto_position(), -2) -- cgit 1.5.1 From b2486f6656bec2307e62de19d2830994a42b879d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Oct 2020 12:07:56 +0100 Subject: Fix message duplication if something goes wrong after persisting the event (#8476) Should fix #3365. --- changelog.d/8476.bugfix | 1 + synapse/handlers/federation.py | 9 +- synapse/handlers/message.py | 48 +++++-- synapse/handlers/room_member.py | 13 +- synapse/replication/http/send_event.py | 16 ++- synapse/storage/databases/main/events.py | 31 ++++ synapse/storage/databases/main/events_worker.py | 83 ++++++++++- synapse/storage/databases/main/registration.py | 6 +- .../databases/main/schema/delta/58/19txn_id.sql | 40 ++++++ synapse/storage/persist_events.py | 96 +++++++++++-- tests/handlers/test_message.py | 157 +++++++++++++++++++++ tests/rest/client/test_third_party_rules.py | 2 +- tests/unittest.py | 11 +- 13 files changed, 481 insertions(+), 32 deletions(-) create mode 100644 changelog.d/8476.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/19txn_id.sql create mode 100644 tests/handlers/test_message.py (limited to 'tests') diff --git a/changelog.d/8476.bugfix b/changelog.d/8476.bugfix new file mode 100644 index 0000000000..993a269979 --- /dev/null +++ b/changelog.d/8476.bugfix @@ -0,0 +1 @@ +Fix message duplication if something goes wrong after persisting the event. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5ac2fc5656..455acd7669 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2966,17 +2966,20 @@ class FederationHandler(BaseHandler): return result["max_stream_id"] else: assert self.storage.persistence - max_stream_token = await self.storage.persistence.persist_events( + + # Note that this returns the events that were persisted, which may not be + # the same as were passed in if some were deduplicated due to transaction IDs. + events, max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) if self._ephemeral_messages_enabled: - for (event, context) in event_and_contexts: + for event in events: # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) if not backfilled: # Never notify for backfilled events - for event, _ in event_and_contexts: + for event in events: await self._notify_persisted_event(event, max_stream_token) return max_stream_token.stream diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ad0b7bd868..b0da938aa9 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -689,7 +689,7 @@ class EventCreationHandler: send this event. Returns: - The event, and its stream ordering (if state event deduplication happened, + The event, and its stream ordering (if deduplication happened, the previous, duplicate event). Raises: @@ -712,6 +712,19 @@ class EventCreationHandler: # extremities to pile up, which in turn leads to state resolution # taking longer. with (await self.limiter.queue(event_dict["room_id"])): + if txn_id and requester.access_token_id: + existing_event_id = await self.store.get_event_id_from_transaction_id( + event_dict["room_id"], + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) + if existing_event_id: + event = await self.store.get_event(existing_event_id) + # we know it was persisted, so must have a stream ordering + assert event.internal_metadata.stream_ordering + return event, event.internal_metadata.stream_ordering + event, context = await self.create_event( requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id ) @@ -913,10 +926,20 @@ class EventCreationHandler: extra_users=extra_users, ) stream_id = result["stream_id"] - event.internal_metadata.stream_ordering = stream_id + event_id = result["event_id"] + if event_id != event.event_id: + # If we get a different event back then it means that its + # been de-duplicated, so we replace the given event with the + # one already persisted. + event = await self.store.get_event(event_id) + else: + # If we newly persisted the event then we need to update its + # stream_ordering entry manually (as it was persisted on + # another worker). + event.internal_metadata.stream_ordering = stream_id return event - stream_id = await self.persist_and_notify_client_event( + event = await self.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) @@ -965,11 +988,16 @@ class EventCreationHandler: context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], - ) -> int: + ) -> EventBase: """Called when we have fully built the event, have already calculated the push actions for the event, and checked auth. This should only be run on the instance in charge of persisting events. + + Returns: + The persisted event. This may be different than the given event if + it was de-duplicated (e.g. because we had already persisted an + event with the same transaction ID.) """ assert self.storage.persistence is not None assert self._events_shard_config.should_handle( @@ -1137,9 +1165,13 @@ class EventCreationHandler: if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") - event_pos, max_stream_token = await self.storage.persistence.persist_event( - event, context=context - ) + # Note that this returns the event that was persisted, which may not be + # the same as we passed in if it was deduplicated due transaction IDs. + ( + event, + event_pos, + max_stream_token, + ) = await self.storage.persistence.persist_event(event, context=context) if self._ephemeral_events_enabled: # If there's an expiry timestamp on the event, schedule its expiry. @@ -1160,7 +1192,7 @@ class EventCreationHandler: # matters as sometimes presence code can take a while. run_in_background(self._bump_active_time, requester.user) - return event_pos.stream + return event async def _bump_active_time(self, user: UserID) -> None: try: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ffbc62ff44..0080eeaf8d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -171,6 +171,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if requester.is_guest: content["kind"] = "guest" + # Check if we already have an event with a matching transaction ID. (We + # do this check just before we persist an event as well, but may as well + # do it up front for efficiency.) + if txn_id and requester.access_token_id: + existing_event_id = await self.store.get_event_id_from_transaction_id( + room_id, requester.user.to_string(), requester.access_token_id, txn_id, + ) + if existing_event_id: + event_pos = await self.store.get_position_for_event(existing_event_id) + return existing_event_id, event_pos.stream + event, context = await self.event_creation_handler.create_event( requester, { @@ -679,7 +690,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if is_blocked: raise SynapseError(403, "This room has been blocked on this server") - await self.event_creation_handler.handle_new_client_event( + event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target_user], ratelimit=ratelimit ) diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 9a3a694d5d..fc129dbaa7 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -46,6 +46,12 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): "ratelimit": true, "extra_users": [], } + + 200 OK + + { "stream_id": 12345, "event_id": "$abcdef..." } + + The returned event ID may not match the sent event if it was deduplicated. """ NAME = "send_event" @@ -116,11 +122,17 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): "Got event to send with ID: %s into room: %s", event.event_id, event.room_id ) - stream_id = await self.event_creation_handler.persist_and_notify_client_event( + event = await self.event_creation_handler.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return 200, {"stream_id": stream_id} + return ( + 200, + { + "stream_id": event.internal_metadata.stream_ordering, + "event_id": event.event_id, + }, + ) def register_servlets(hs, http_server): diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b19c424ba9..fdb17745f6 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -361,6 +361,8 @@ class PersistEventsStore: self._store_event_txn(txn, events_and_contexts=events_and_contexts) + self._persist_transaction_ids_txn(txn, events_and_contexts) + # Insert into event_to_state_groups. self._store_event_state_mappings_txn(txn, events_and_contexts) @@ -405,6 +407,35 @@ class PersistEventsStore: # room_memberships, where applicable. self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) + def _persist_transaction_ids_txn( + self, + txn: LoggingTransaction, + events_and_contexts: List[Tuple[EventBase, EventContext]], + ): + """Persist the mapping from transaction IDs to event IDs (if defined). + """ + + to_insert = [] + for event, _ in events_and_contexts: + token_id = getattr(event.internal_metadata, "token_id", None) + txn_id = getattr(event.internal_metadata, "txn_id", None) + if token_id and txn_id: + to_insert.append( + { + "event_id": event.event_id, + "room_id": event.room_id, + "user_id": event.sender, + "token_id": token_id, + "txn_id": txn_id, + "inserted_ts": self._clock.time_msec(), + } + ) + + if to_insert: + self.db_pool.simple_insert_many_txn( + txn, table="event_txn_id", values=to_insert, + ) + def _update_current_state_txn( self, txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4e74fafe43..3ec4d1d9c2 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import itertools import logging import threading @@ -137,6 +136,15 @@ class EventsWorkerStore(SQLBaseStore): db_conn, "events", "stream_ordering", step=-1 ) + if not hs.config.worker.worker_app: + # We periodically clean out old transaction ID mappings + self._clock.looping_call( + run_as_background_process, + 5 * 60 * 1000, + "_cleanup_old_transaction_ids", + self._cleanup_old_transaction_ids, + ) + self._get_event_cache = Cache( "*getEvent*", keylen=3, @@ -1308,3 +1316,76 @@ class EventsWorkerStore(SQLBaseStore): return await self.db_pool.runInteraction( desc="get_next_event_to_expire", func=get_next_event_to_expire_txn ) + + async def get_event_id_from_transaction_id( + self, room_id: str, user_id: str, token_id: int, txn_id: str + ) -> Optional[str]: + """Look up if we have already persisted an event for the transaction ID, + returning the event ID if so. + """ + return await self.db_pool.simple_select_one_onecol( + table="event_txn_id", + keyvalues={ + "room_id": room_id, + "user_id": user_id, + "token_id": token_id, + "txn_id": txn_id, + }, + retcol="event_id", + allow_none=True, + desc="get_event_id_from_transaction_id", + ) + + async def get_already_persisted_events( + self, events: Iterable[EventBase] + ) -> Dict[str, str]: + """Look up if we have already persisted an event for the transaction ID, + returning a mapping from event ID in the given list to the event ID of + an existing event. + + Also checks if there are duplicates in the given events, if there are + will map duplicates to the *first* event. + """ + + mapping = {} + txn_id_to_event = {} # type: Dict[Tuple[str, int, str], str] + + for event in events: + token_id = getattr(event.internal_metadata, "token_id", None) + txn_id = getattr(event.internal_metadata, "txn_id", None) + + if token_id and txn_id: + # Check if this is a duplicate of an event in the given events. + existing = txn_id_to_event.get((event.room_id, token_id, txn_id)) + if existing: + mapping[event.event_id] = existing + continue + + # Check if this is a duplicate of an event we've already + # persisted. + existing = await self.get_event_id_from_transaction_id( + event.room_id, event.sender, token_id, txn_id + ) + if existing: + mapping[event.event_id] = existing + txn_id_to_event[(event.room_id, token_id, txn_id)] = existing + else: + txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id + + return mapping + + async def _cleanup_old_transaction_ids(self): + """Cleans out transaction id mappings older than 24hrs. + """ + + def _cleanup_old_transaction_ids_txn(txn): + sql = """ + DELETE FROM event_txn_id + WHERE inserted_ts < ? + """ + one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 + txn.execute(sql, (one_day_ago,)) + + return await self.db_pool.runInteraction( + "_cleanup_old_transaction_ids", _cleanup_old_transaction_ids_txn, + ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 236d3cdbe3..9a003e30f9 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1003,7 +1003,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): token: str, device_id: Optional[str], valid_until_ms: Optional[int], - ) -> None: + ) -> int: """Adds an access token for the given user. Args: @@ -1013,6 +1013,8 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): valid_until_ms: when the token is valid until. None for no expiry. Raises: StoreError if there was a problem adding this. + Returns: + The token ID """ next_id = self._access_tokens_id_gen.get_next() @@ -1028,6 +1030,8 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): desc="add_access_token_to_user", ) + return next_id + def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str: old_device_id = self.db_pool.simple_select_one_onecol_txn( txn, "access_tokens", {"token": token}, "device_id" diff --git a/synapse/storage/databases/main/schema/delta/58/19txn_id.sql b/synapse/storage/databases/main/schema/delta/58/19txn_id.sql new file mode 100644 index 0000000000..b2454121a8 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/19txn_id.sql @@ -0,0 +1,40 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- A map of recent events persisted with transaction IDs. Used to deduplicate +-- send event requests with the same transaction ID. +-- +-- Note: transaction IDs are scoped to the room ID/user ID/access token that was +-- used to make the request. +-- +-- Note: The foreign key constraints are ON DELETE CASCADE, as if we delete the +-- events or access token we don't want to try and de-duplicate the event. +CREATE TABLE IF NOT EXISTS event_txn_id ( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + user_id TEXT NOT NULL, + token_id BIGINT NOT NULL, + txn_id TEXT NOT NULL, + inserted_ts BIGINT NOT NULL, + FOREIGN KEY (event_id) + REFERENCES events (event_id) ON DELETE CASCADE, + FOREIGN KEY (token_id) + REFERENCES access_tokens (id) ON DELETE CASCADE +); + +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_event_id ON event_txn_id(event_id); +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_txn_id ON event_txn_id(room_id, user_id, token_id, txn_id); +CREATE INDEX IF NOT EXISTS event_txn_id_ts ON event_txn_id(inserted_ts); diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 4d2d88d1f0..70e636b0ba 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -96,7 +96,9 @@ class _EventPeristenceQueue: Returns: defer.Deferred: a deferred which will resolve once the events are - persisted. Runs its callbacks *without* a logcontext. + persisted. Runs its callbacks *without* a logcontext. The result + is the same as that returned by the callback passed to + `handle_queue`. """ queue = self._event_persist_queues.setdefault(room_id, deque()) if queue: @@ -199,7 +201,7 @@ class EventsPersistenceStorage: self, events_and_contexts: Iterable[Tuple[EventBase, EventContext]], backfilled: bool = False, - ) -> RoomStreamToken: + ) -> Tuple[List[EventBase], RoomStreamToken]: """ Write events to the database Args: @@ -209,7 +211,11 @@ class EventsPersistenceStorage: which might update the current state etc. Returns: - the stream ordering of the latest persisted event + List of events persisted, the current position room stream position. + The list of events persisted may not be the same as those passed in + if they were deduplicated due to an event already existing that + matched the transcation ID; the existing event is returned in such + a case. """ partitioned = {} # type: Dict[str, List[Tuple[EventBase, EventContext]]] for event, ctx in events_and_contexts: @@ -225,19 +231,41 @@ class EventsPersistenceStorage: for room_id in partitioned: self._maybe_start_persisting(room_id) - await make_deferred_yieldable( + # Each deferred returns a map from event ID to existing event ID if the + # event was deduplicated. (The dict may also include other entries if + # the event was persisted in a batch with other events). + # + # Since we use `defer.gatherResults` we need to merge the returned list + # of dicts into one. + ret_vals = await make_deferred_yieldable( defer.gatherResults(deferreds, consumeErrors=True) ) + replaced_events = {} + for d in ret_vals: + replaced_events.update(d) + + events = [] + for event, _ in events_and_contexts: + existing_event_id = replaced_events.get(event.event_id) + if existing_event_id: + events.append(await self.main_store.get_event(existing_event_id)) + else: + events.append(event) - return self.main_store.get_room_max_token() + return ( + events, + self.main_store.get_room_max_token(), + ) async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False - ) -> Tuple[PersistedEventPosition, RoomStreamToken]: + ) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]: """ Returns: - The stream ordering of `event`, and the stream ordering of the - latest persisted event + The event, stream ordering of `event`, and the stream ordering of the + latest persisted event. The returned event may not match the given + event if it was deduplicated due to an existing event matching the + transaction ID. """ deferred = self._event_persist_queue.add_to_queue( event.room_id, [(event, context)], backfilled=backfilled @@ -245,19 +273,33 @@ class EventsPersistenceStorage: self._maybe_start_persisting(event.room_id) - await make_deferred_yieldable(deferred) + # The deferred returns a map from event ID to existing event ID if the + # event was deduplicated. (The dict may also include other entries if + # the event was persisted in a batch with other events.) + replaced_events = await make_deferred_yieldable(deferred) + replaced_event = replaced_events.get(event.event_id) + if replaced_event: + event = await self.main_store.get_event(replaced_event) event_stream_id = event.internal_metadata.stream_ordering # stream ordering should have been assigned by now assert event_stream_id pos = PersistedEventPosition(self._instance_name, event_stream_id) - return pos, self.main_store.get_room_max_token() + return event, pos, self.main_store.get_room_max_token() def _maybe_start_persisting(self, room_id: str): + """Pokes the `_event_persist_queue` to start handling new items in the + queue, if not already in progress. + + Causes the deferreds returned by `add_to_queue` to resolve with: a + dictionary of event ID to event ID we didn't persist as we already had + another event persisted with the same TXN ID. + """ + async def persisting_queue(item): with Measure(self._clock, "persist_events"): - await self._persist_events( + return await self._persist_events( item.events_and_contexts, backfilled=item.backfilled ) @@ -267,12 +309,38 @@ class EventsPersistenceStorage: self, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool = False, - ): + ) -> Dict[str, str]: """Calculates the change to current state and forward extremities, and persists the given events and with those updates. + + Returns: + A dictionary of event ID to event ID we didn't persist as we already + had another event persisted with the same TXN ID. """ + replaced_events = {} # type: Dict[str, str] if not events_and_contexts: - return + return replaced_events + + # Check if any of the events have a transaction ID that has already been + # persisted, and if so we don't persist it again. + # + # We should have checked this a long time before we get here, but it's + # possible that different send event requests race in such a way that + # they both pass the earlier checks. Checking here isn't racey as we can + # have only one `_persist_events` per room being called at a time. + replaced_events = await self.main_store.get_already_persisted_events( + (event for event, _ in events_and_contexts) + ) + + if replaced_events: + events_and_contexts = [ + (e, ctx) + for e, ctx in events_and_contexts + if e.event_id not in replaced_events + ] + + if not events_and_contexts: + return replaced_events chunks = [ events_and_contexts[x : x + 100] @@ -441,6 +509,8 @@ class EventsPersistenceStorage: await self._handle_potentially_left_users(potentially_left_users) + return replaced_events + async def _calculate_new_extremities( self, room_id: str, diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py new file mode 100644 index 0000000000..64e28bc639 --- /dev/null +++ b/tests/handlers/test_message.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import Tuple + +from synapse.api.constants import EventTypes +from synapse.events import EventBase +from synapse.events.snapshot import EventContext +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.types import create_requester +from synapse.util.stringutils import random_string + +from tests import unittest + +logger = logging.getLogger(__name__) + + +class EventCreationTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = self.hs.get_event_creation_handler() + self.persist_event_storage = self.hs.get_storage().persistence + + self.user_id = self.register_user("tester", "foobar") + self.access_token = self.login("tester", "foobar") + self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) + + self.info = self.get_success( + self.hs.get_datastore().get_user_by_access_token(self.access_token,) + ) + self.token_id = self.info["token_id"] + + self.requester = create_requester(self.user_id, access_token_id=self.token_id) + + def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext]: + """Create a new event with the given transaction ID. All events produced + by this method will be considered duplicates. + """ + + # We create a new event with a random body, as otherwise we'll produce + # *exactly* the same event with the same hash, and so same event ID. + return self.get_success( + self.handler.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.requester.user.to_string(), + "content": {"msgtype": "m.text", "body": random_string(5)}, + }, + token_id=self.token_id, + txn_id=txn_id, + ) + ) + + def test_duplicated_txn_id(self): + """Test that attempting to handle/persist an event with a transaction ID + that has already been persisted correctly returns the old event and does + *not* produce duplicate messages. + """ + + txn_id = "something_suitably_random" + + event1, context = self._create_duplicate_event(txn_id) + + ret_event1 = self.get_success( + self.handler.handle_new_client_event(self.requester, event1, context) + ) + stream_id1 = ret_event1.internal_metadata.stream_ordering + + self.assertEqual(event1.event_id, ret_event1.event_id) + + event2, context = self._create_duplicate_event(txn_id) + + # We want to test that the deduplication at the persit event end works, + # so we want to make sure we test with different events. + self.assertNotEqual(event1.event_id, event2.event_id) + + ret_event2 = self.get_success( + self.handler.handle_new_client_event(self.requester, event2, context) + ) + stream_id2 = ret_event2.internal_metadata.stream_ordering + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event2.event_id) + self.assertEqual(stream_id1, stream_id2) + + # Let's test that calling `persist_event` directly also does the right + # thing. + event3, context = self._create_duplicate_event(txn_id) + self.assertNotEqual(event1.event_id, event3.event_id) + + ret_event3, event_pos3, _ = self.get_success( + self.persist_event_storage.persist_event(event3, context) + ) + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event3.event_id) + self.assertEqual(stream_id1, event_pos3.stream) + + # Let's test that calling `persist_events` directly also does the right + # thing. + event4, context = self._create_duplicate_event(txn_id) + self.assertNotEqual(event1.event_id, event3.event_id) + + events, _ = self.get_success( + self.persist_event_storage.persist_events([(event3, context)]) + ) + ret_event4 = events[0] + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event4.event_id) + + def test_duplicated_txn_id_one_call(self): + """Test that we correctly handle duplicates that we try and persist at + the same time. + """ + + txn_id = "something_else_suitably_random" + + # Create two duplicate events to persist at the same time + event1, context1 = self._create_duplicate_event(txn_id) + event2, context2 = self._create_duplicate_event(txn_id) + + # Ensure their event IDs are different to start with + self.assertNotEqual(event1.event_id, event2.event_id) + + events, _ = self.get_success( + self.persist_event_storage.persist_events( + [(event1, context1), (event2, context2)] + ) + ) + + # Check that we've deduplicated the events. + self.assertEqual(len(events), 2) + self.assertEqual(events[0].event_id, events[1].event_id) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index d03e121664..b737625e33 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -107,7 +107,7 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id, {}, access_token=self.tok, ) diff --git a/tests/unittest.py b/tests/unittest.py index 5c87f6097e..6c1661c92c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -254,17 +254,24 @@ class HomeserverTestCase(TestCase): if hasattr(self, "user_id"): if self.hijack_auth: + # We need a valid token ID to satisfy foreign key constraints. + token_id = self.get_success( + self.hs.get_datastore().add_access_token_to_user( + self.helper.auth_user_id, "some_fake_token", None, None, + ) + ) + async def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.helper.auth_user_id), - "token_id": 1, + "token_id": token_id, "is_guest": False, } async def get_user_by_req(request, allow_guest=False, rights="access"): return create_requester( UserID.from_string(self.helper.auth_user_id), - 1, + token_id, False, False, None, -- cgit 1.5.1 From 629a951b49ae58af43323e6829cf49d7452ebf39 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 13 Oct 2020 08:20:32 -0400 Subject: Move additional tasks to the background worker, part 4 (#8513) --- changelog.d/8513.feature | 1 + synapse/handlers/account_validity.py | 3 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/message.py | 18 +-- synapse/handlers/pagination.py | 2 +- synapse/handlers/profile.py | 29 ++--- synapse/server.py | 12 +- synapse/storage/databases/main/profile.py | 82 ++++++------ synapse/storage/databases/main/registration.py | 52 ++++---- synapse/storage/databases/main/room.py | 168 ++++++++++++------------- tests/handlers/test_typing.py | 48 +++---- 11 files changed, 196 insertions(+), 221 deletions(-) create mode 100644 changelog.d/8513.feature (limited to 'tests') diff --git a/changelog.d/8513.feature b/changelog.d/8513.feature new file mode 100644 index 0000000000..542993110b --- /dev/null +++ b/changelog.d/8513.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 4caf6d591a..f33044e97a 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -70,7 +70,8 @@ class AccountValidityHandler: "send_renewals", self._send_renewal_emails ) - self.clock.looping_call(send_emails, 30 * 60 * 1000) + if hs.config.run_background_tasks: + self.clock.looping_call(send_emails, 30 * 60 * 1000) async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 58c9f12686..4efe6c530a 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -45,7 +45,7 @@ class DeactivateAccountHandler(BaseHandler): # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). - if hs.config.worker_app is None: + if hs.config.run_background_tasks: hs.get_reactor().callWhenRunning(self._start_user_parting) self._account_validity_enabled = hs.config.account_validity.enabled diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b0da938aa9..c52e6824d3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -402,21 +402,23 @@ class EventCreationHandler: self.config.block_events_without_consent_error ) + # we need to construct a ConsentURIBuilder here, as it checks that the necessary + # config options, but *only* if we have a configuration for which we are + # going to need it. + if self._block_events_without_consent_error: + self._consent_uri_builder = ConsentURIBuilder(self.config) + # Rooms which should be excluded from dummy insertion. (For instance, # those without local users who can send events into the room). # # map from room id to time-of-last-attempt. # self._rooms_to_exclude_from_dummy_event_insertion = {} # type: Dict[str, int] - - # we need to construct a ConsentURIBuilder here, as it checks that the necessary - # config options, but *only* if we have a configuration for which we are - # going to need it. - if self._block_events_without_consent_error: - self._consent_uri_builder = ConsentURIBuilder(self.config) + # The number of forward extremeities before a dummy event is sent. + self._dummy_events_threshold = hs.config.dummy_events_threshold if ( - not self.config.worker_app + self.config.run_background_tasks and self.config.cleanup_extremities_with_dummy_events ): self.clock.looping_call( @@ -431,8 +433,6 @@ class EventCreationHandler: self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages - self._dummy_events_threshold = hs.config.dummy_events_threshold - async def create_event( self, requester: Requester, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 085b685959..426b58da9e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -92,7 +92,7 @@ class PaginationHandler: self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max - if hs.config.retention_enabled: + if hs.config.run_background_tasks and hs.config.retention_enabled: # Run the purge jobs described in the configuration file. for job in hs.config.retention_purge_jobs: logger.info("Setting up purge job with config: %s", job) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 5453e6dfc8..b784938755 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -35,14 +35,16 @@ MAX_DISPLAYNAME_LEN = 256 MAX_AVATAR_URL_LEN = 1000 -class BaseProfileHandler(BaseHandler): +class ProfileHandler(BaseHandler): """Handles fetching and updating user profile information. - BaseProfileHandler can be instantiated directly on workers and will - delegate to master when necessary. The master process should use the - subclass MasterProfileHandler + ProfileHandler can be instantiated directly on workers and will + delegate to master when necessary. """ + PROFILE_UPDATE_MS = 60 * 1000 + PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 + def __init__(self, hs): super().__init__(hs) @@ -53,6 +55,11 @@ class BaseProfileHandler(BaseHandler): self.user_directory_handler = hs.get_user_directory_handler() + if hs.config.run_background_tasks: + self.clock.looping_call( + self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + ) + async def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -363,20 +370,6 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) raise - -class MasterProfileHandler(BaseProfileHandler): - PROFILE_UPDATE_MS = 60 * 1000 - PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 - - def __init__(self, hs): - super().__init__(hs) - - assert hs.config.worker_app is None - - self.clock.looping_call( - self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS - ) - def _start_update_remote_profile_cache(self): return run_as_background_process( "Update remote profile", self._update_remote_profile_cache diff --git a/synapse/server.py b/synapse/server.py index e793793cdc..f921ee4b53 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -75,7 +75,7 @@ from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler from synapse.handlers.password_policy import PasswordPolicyHandler from synapse.handlers.presence import PresenceHandler -from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler +from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler @@ -191,7 +191,12 @@ class HomeServer(metaclass=abc.ABCMeta): """ REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "account_validity", "auth", + "deactivate_account", + "message", + "pagination", + "profile", "stats", ] @@ -462,10 +467,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_profile_handler(self): - if self.config.worker_app: - return BaseProfileHandler(self) - else: - return MasterProfileHandler(self) + return ProfileHandler(self) @cache_in_self def get_event_creation_handler(self) -> EventCreationHandler: diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index d2e0685e9e..1681caa1f0 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -91,27 +91,6 @@ class ProfileWorkerStore(SQLBaseStore): desc="set_profile_avatar_url", ) - -class ProfileStore(ProfileWorkerStore): - async def add_remote_profile_cache( - self, user_id: str, displayname: str, avatar_url: str - ) -> None: - """Ensure we are caching the remote user's profiles. - - This should only be called when `is_subscribed_remote_profile_for_user` - would return true for the user. - """ - await self.db_pool.simple_upsert( - table="remote_profile_cache", - keyvalues={"user_id": user_id}, - values={ - "displayname": displayname, - "avatar_url": avatar_url, - "last_check": self._clock.time_msec(), - }, - desc="add_remote_profile_cache", - ) - async def update_remote_profile_cache( self, user_id: str, displayname: str, avatar_url: str ) -> int: @@ -138,6 +117,31 @@ class ProfileStore(ProfileWorkerStore): desc="delete_remote_profile_cache", ) + async def is_subscribed_remote_profile_for_user(self, user_id): + """Check whether we are interested in a remote user's profile. + """ + res = await self.db_pool.simple_select_one_onecol( + table="group_users", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + return True + + res = await self.db_pool.simple_select_one_onecol( + table="group_invites", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + return True + async def get_remote_profile_cache_entries_that_expire( self, last_checked: int ) -> Dict[str, str]: @@ -160,27 +164,23 @@ class ProfileStore(ProfileWorkerStore): _get_remote_profile_cache_entries_that_expire_txn, ) - async def is_subscribed_remote_profile_for_user(self, user_id): - """Check whether we are interested in a remote user's profile. - """ - res = await self.db_pool.simple_select_one_onecol( - table="group_users", - keyvalues={"user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="should_update_remote_profile_cache_for_user", - ) - if res: - return True +class ProfileStore(ProfileWorkerStore): + async def add_remote_profile_cache( + self, user_id: str, displayname: str, avatar_url: str + ) -> None: + """Ensure we are caching the remote user's profiles. - res = await self.db_pool.simple_select_one_onecol( - table="group_invites", + This should only be called when `is_subscribed_remote_profile_for_user` + would return true for the user. + """ + await self.db_pool.simple_upsert( + table="remote_profile_cache", keyvalues={"user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="should_update_remote_profile_cache_for_user", + values={ + "displayname": displayname, + "avatar_url": avatar_url, + "last_check": self._clock.time_msec(), + }, + desc="add_remote_profile_cache", ) - - if res: - return True diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 9a003e30f9..4c843b7679 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -862,6 +862,32 @@ class RegistrationWorkerStore(SQLBaseStore): values={"expiration_ts_ms": expiration_ts, "email_sent": False}, ) + async def get_user_pending_deactivation(self) -> Optional[str]: + """ + Gets one user from the table of users waiting to be parted from all the rooms + they're in. + """ + return await self.db_pool.simple_select_one_onecol( + "users_pending_deactivation", + keyvalues={}, + retcol="user_id", + allow_none=True, + desc="get_users_pending_deactivation", + ) + + async def del_user_pending_deactivation(self, user_id: str) -> None: + """ + Removes the given user to the table of users who need to be parted from all the + rooms they're in, effectively marking that user as fully deactivated. + """ + # XXX: This should be simple_delete_one but we failed to put a unique index on + # the table, so somehow duplicate entries have ended up in it. + await self.db_pool.simple_delete( + "users_pending_deactivation", + keyvalues={"user_id": user_id}, + desc="del_user_pending_deactivation", + ) + class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -1371,32 +1397,6 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): desc="add_user_pending_deactivation", ) - async def del_user_pending_deactivation(self, user_id: str) -> None: - """ - Removes the given user to the table of users who need to be parted from all the - rooms they're in, effectively marking that user as fully deactivated. - """ - # XXX: This should be simple_delete_one but we failed to put a unique index on - # the table, so somehow duplicate entries have ended up in it. - await self.db_pool.simple_delete( - "users_pending_deactivation", - keyvalues={"user_id": user_id}, - desc="del_user_pending_deactivation", - ) - - async def get_user_pending_deactivation(self) -> Optional[str]: - """ - Gets one user from the table of users waiting to be parted from all the rooms - they're in. - """ - return await self.db_pool.simple_select_one_onecol( - "users_pending_deactivation", - keyvalues={}, - retcol="user_id", - allow_none=True, - desc="get_users_pending_deactivation", - ) - async def validate_threepid_session( self, session_id: str, client_secret: str, token: str, current_ts: int ) -> Optional[str]: diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index c0f2af0785..e83d961c20 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -869,6 +869,89 @@ class RoomWorkerStore(SQLBaseStore): "get_all_new_public_rooms", get_all_new_public_rooms ) + async def get_rooms_for_retention_period_in_range( + self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False + ) -> Dict[str, dict]: + """Retrieves all of the rooms within the given retention range. + + Optionally includes the rooms which don't have a retention policy. + + Args: + min_ms: Duration in milliseconds that define the lower limit of + the range to handle (exclusive). If None, doesn't set a lower limit. + max_ms: Duration in milliseconds that define the upper limit of + the range to handle (inclusive). If None, doesn't set an upper limit. + include_null: Whether to include rooms which retention policy is NULL + in the returned set. + + Returns: + The rooms within this range, along with their retention + policy. The key is "room_id", and maps to a dict describing the retention + policy associated with this room ID. The keys for this nested dict are + "min_lifetime" (int|None), and "max_lifetime" (int|None). + """ + + def get_rooms_for_retention_period_in_range_txn(txn): + range_conditions = [] + args = [] + + if min_ms is not None: + range_conditions.append("max_lifetime > ?") + args.append(min_ms) + + if max_ms is not None: + range_conditions.append("max_lifetime <= ?") + args.append(max_ms) + + # Do a first query which will retrieve the rooms that have a retention policy + # in their current state. + sql = """ + SELECT room_id, min_lifetime, max_lifetime FROM room_retention + INNER JOIN current_state_events USING (event_id, room_id) + """ + + if len(range_conditions): + sql += " WHERE (" + " AND ".join(range_conditions) + ")" + + if include_null: + sql += " OR max_lifetime IS NULL" + + txn.execute(sql, args) + + rows = self.db_pool.cursor_to_dict(txn) + rooms_dict = {} + + for row in rows: + rooms_dict[row["room_id"]] = { + "min_lifetime": row["min_lifetime"], + "max_lifetime": row["max_lifetime"], + } + + if include_null: + # If required, do a second query that retrieves all of the rooms we know + # of so we can handle rooms with no retention policy. + sql = "SELECT DISTINCT room_id FROM current_state_events" + + txn.execute(sql) + + rows = self.db_pool.cursor_to_dict(txn) + + # If a room isn't already in the dict (i.e. it doesn't have a retention + # policy in its state), add it with a null policy. + for row in rows: + if row["room_id"] not in rooms_dict: + rooms_dict[row["room_id"]] = { + "min_lifetime": None, + "max_lifetime": None, + } + + return rooms_dict + + return await self.db_pool.runInteraction( + "get_rooms_for_retention_period_in_range", + get_rooms_for_retention_period_in_range_txn, + ) + class RoomBackgroundUpdateStore(SQLBaseStore): REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" @@ -1446,88 +1529,3 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): self.is_room_blocked, (room_id,), ) - - async def get_rooms_for_retention_period_in_range( - self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False - ) -> Dict[str, dict]: - """Retrieves all of the rooms within the given retention range. - - Optionally includes the rooms which don't have a retention policy. - - Args: - min_ms: Duration in milliseconds that define the lower limit of - the range to handle (exclusive). If None, doesn't set a lower limit. - max_ms: Duration in milliseconds that define the upper limit of - the range to handle (inclusive). If None, doesn't set an upper limit. - include_null: Whether to include rooms which retention policy is NULL - in the returned set. - - Returns: - The rooms within this range, along with their retention - policy. The key is "room_id", and maps to a dict describing the retention - policy associated with this room ID. The keys for this nested dict are - "min_lifetime" (int|None), and "max_lifetime" (int|None). - """ - - def get_rooms_for_retention_period_in_range_txn(txn): - range_conditions = [] - args = [] - - if min_ms is not None: - range_conditions.append("max_lifetime > ?") - args.append(min_ms) - - if max_ms is not None: - range_conditions.append("max_lifetime <= ?") - args.append(max_ms) - - # Do a first query which will retrieve the rooms that have a retention policy - # in their current state. - sql = """ - SELECT room_id, min_lifetime, max_lifetime FROM room_retention - INNER JOIN current_state_events USING (event_id, room_id) - """ - - if len(range_conditions): - sql += " WHERE (" + " AND ".join(range_conditions) + ")" - - if include_null: - sql += " OR max_lifetime IS NULL" - - txn.execute(sql, args) - - rows = self.db_pool.cursor_to_dict(txn) - rooms_dict = {} - - for row in rows: - rooms_dict[row["room_id"]] = { - "min_lifetime": row["min_lifetime"], - "max_lifetime": row["max_lifetime"], - } - - if include_null: - # If required, do a second query that retrieves all of the rooms we know - # of so we can handle rooms with no retention policy. - sql = "SELECT DISTINCT room_id FROM current_state_events" - - txn.execute(sql) - - rows = self.db_pool.cursor_to_dict(txn) - - # If a room isn't already in the dict (i.e. it doesn't have a retention - # policy in its state), add it with a null policy. - for row in rows: - if row["room_id"] not in rooms_dict: - rooms_dict[row["room_id"]] = { - "min_lifetime": None, - "max_lifetime": None, - } - - return rooms_dict - - rooms = await self.db_pool.runInteraction( - "get_rooms_for_retention_period_in_range", - get_rooms_for_retention_period_in_range_txn, - ) - - return rooms diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 3fec09ea8a..16ff2e22d2 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -65,26 +65,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): mock_federation_client = Mock(spec=["put_json"]) mock_federation_client.put_json.return_value = defer.succeed((200, "OK")) - datastores = Mock() - datastores.main = Mock( - spec=[ - # Bits that Federation needs - "prep_send_transaction", - "delivered_txn", - "get_received_txn_response", - "set_received_txn_response", - "get_destination_last_successful_stream_ordering", - "get_destination_retry_timings", - "get_devices_by_remote", - "maybe_store_room_on_invite", - # Bits that user_directory needs - "get_user_directory_stream_pos", - "get_current_state_deltas", - "get_device_updates_by_remote", - "get_room_max_stream_ordering", - ] - ) - # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) @@ -95,8 +75,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): replication_streams={}, ) - hs.datastores = datastores - return hs def prepare(self, reactor, clock, hs): @@ -114,16 +92,16 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): "retry_interval": 0, "failure_ts": None, } - self.datastore.get_destination_retry_timings.return_value = defer.succeed( - retry_timings_res + self.datastore.get_destination_retry_timings = Mock( + return_value=defer.succeed(retry_timings_res) ) - self.datastore.get_device_updates_by_remote.return_value = make_awaitable( - (0, []) + self.datastore.get_device_updates_by_remote = Mock( + return_value=make_awaitable((0, [])) ) - self.datastore.get_destination_last_successful_stream_ordering.return_value = make_awaitable( - None + self.datastore.get_destination_last_successful_stream_ordering = Mock( + return_value=make_awaitable(None) ) def get_received_txn_response(*args): @@ -145,17 +123,19 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room - def get_users_in_room(room_id): - return defer.succeed({str(u) for u in self.room_members}) + async def get_users_in_room(room_id): + return {str(u) for u in self.room_members} self.datastore.get_users_in_room = get_users_in_room - self.datastore.get_user_directory_stream_pos.side_effect = ( - # we deliberately return a non-None stream pos to avoid doing an initial_spam - lambda: make_awaitable(1) + self.datastore.get_user_directory_stream_pos = Mock( + side_effect=( + # we deliberately return a non-None stream pos to avoid doing an initial_spam + lambda: make_awaitable(1) + ) ) - self.datastore.get_current_state_deltas.return_value = (0, None) + self.datastore.get_current_state_deltas = Mock(return_value=(0, None)) self.datastore.get_to_device_stream_token = lambda: 0 self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: make_awaitable( -- cgit 1.5.1 From d9d86c29965ceaf92927f061375a0d7a888aea67 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 23:06:36 +0100 Subject: Remove redundant `token_id` parameter to create_event this is always the same as requester.access_token_id. --- synapse/handlers/message.py | 8 +++----- synapse/handlers/room.py | 1 - synapse/handlers/room_member.py | 1 - tests/handlers/test_message.py | 1 - 4 files changed, 3 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c52e6824d3..b8f541425b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -437,7 +437,6 @@ class EventCreationHandler: self, requester: Requester, event_dict: dict, - token_id: Optional[str] = None, txn_id: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, require_consent: bool = True, @@ -453,7 +452,6 @@ class EventCreationHandler: Args: requester event_dict: An entire event - token_id txn_id prev_event_ids: the forward extremities to use as the prev_events for the @@ -511,8 +509,8 @@ class EventCreationHandler: if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) - if token_id is not None: - builder.internal_metadata.token_id = token_id + if requester.access_token_id is not None: + builder.internal_metadata.token_id = requester.access_token_id if txn_id is not None: builder.internal_metadata.txn_id = txn_id @@ -726,7 +724,7 @@ class EventCreationHandler: return event, event.internal_metadata.stream_ordering event, context = await self.create_event( - requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id + requester, event_dict, txn_id=txn_id ) assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 93ed51063a..ec300d8877 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -214,7 +214,6 @@ class RoomCreationHandler(BaseHandler): "replacement_room": new_room_id, }, }, - token_id=requester.access_token_id, ) old_room_version = await self.store.get_room_version_id(old_room_id) await self.auth.check_from_context( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 0080eeaf8d..d9b98d8acd 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -193,7 +193,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # For backwards compatibility: "membership": membership, }, - token_id=requester.access_token_id, txn_id=txn_id, prev_event_ids=prev_event_ids, require_consent=require_consent, diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 64e28bc639..9f6f21a6e2 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -66,7 +66,6 @@ class EventCreationTestCase(unittest.HomeserverTestCase): "sender": self.requester.user.to_string(), "content": {"msgtype": "m.text", "body": random_string(5)}, }, - token_id=self.token_id, txn_id=txn_id, ) ) -- cgit 1.5.1 From 617e8a46538af56bd5abbb2c9e4df8025841c338 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 18:53:56 +0100 Subject: Allow ThirdPartyRules modules to replace event content Support returning a new event dict from `check_event_allowed`. --- synapse/events/third_party_rules.py | 12 ++++-- synapse/handlers/message.py | 64 ++++++++++++++++++++++++++++- tests/rest/client/test_third_party_rules.py | 8 ++-- 3 files changed, 75 insertions(+), 9 deletions(-) (limited to 'tests') diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1535cc5339..a9aabe00df 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -12,7 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable + +from typing import Callable, Union from synapse.events import EventBase from synapse.events.snapshot import EventContext @@ -44,15 +45,20 @@ class ThirdPartyEventRules: async def check_event_allowed( self, event: EventBase, context: EventContext - ) -> bool: + ) -> Union[bool, dict]: """Check if a provided event should be allowed in the given context. + The module can return: + * True: the event is allowed. + * False: the event is not allowed, and should be rejected with M_FORBIDDEN. + * a dict: replacement event data. + Args: event: The event to be checked. context: The context of the event. Returns: - True if the event should be allowed, False if not. + The result from the ThirdPartyRules module, as above """ if self.third_party_rules is None: return True diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 987c759791..0c6aec347e 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -795,16 +795,22 @@ class EventCreationHandler: if requester: context.app_service = requester.app_service - event_allowed = await self.third_party_event_rules.check_event_allowed( + third_party_result = await self.third_party_event_rules.check_event_allowed( event, context ) - if not event_allowed: + if not third_party_result: logger.info( "Event %s forbidden by third-party rules", event, ) raise SynapseError( 403, "This event is not allowed in this context", Codes.FORBIDDEN ) + elif isinstance(third_party_result, dict): + # the third-party rules want to replace the event. We'll need to build a new + # event. + event, context = await self._rebuild_event_after_third_party_rules( + third_party_result, event + ) self.validator.validate_new(event, self.config) @@ -1294,3 +1300,57 @@ class EventCreationHandler: room_id, ) del self._rooms_to_exclude_from_dummy_event_insertion[room_id] + + async def _rebuild_event_after_third_party_rules( + self, third_party_result: dict, original_event: EventBase + ) -> Tuple[EventBase, EventContext]: + # the third_party_event_rules want to replace the event. + # we do some basic checks, and then return the replacement event and context. + + # Construct a new EventBuilder and validate it, which helps with the + # rest of these checks. + try: + builder = self.event_builder_factory.for_room_version( + original_event.room_version, third_party_result + ) + self.validator.validate_builder(builder) + except SynapseError as e: + raise Exception( + "Third party rules module created an invalid event: " + e.msg, + ) + + immutable_fields = [ + # changing the room is going to break things: we've already checked that the + # room exists, and are holding a concurrency limiter token for that room. + # Also, we might need to use a different room version. + "room_id", + # changing the type or state key might work, but we'd need to check that the + # calling functions aren't making assumptions about them. + "type", + "state_key", + ] + + for k in immutable_fields: + if getattr(builder, k, None) != original_event.get(k): + raise Exception( + "Third party rules module created an invalid event: " + "cannot change field " + k + ) + + # check that the new sender belongs to this HS + if not self.hs.is_mine_id(builder.sender): + raise Exception( + "Third party rules module created an invalid event: " + "invalid sender " + builder.sender + ) + + # copy over the original internal metadata + for k, v in original_event.internal_metadata.get_dict().items(): + setattr(builder.internal_metadata, k, v) + + event = await builder.build(prev_event_ids=original_event.prev_event_ids()) + + # we rebuild the event context, to be on the safe side. If nothing else, + # delta_ids might need an update. + context = await self.state.compute_event_context(event) + return event, context diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index b737625e33..d404550800 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -115,12 +115,12 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"403", channel.result) def test_modify_event(self): - """Tests that the module can successfully tweak an event before it is persisted. - """ + """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event async def check(ev: EventBase, state): - ev.content = {"x": "y"} - return True + d = ev.get_dict() + d["content"] = {"x": "y"} + return d current_rules_module().check_event_allowed = check -- cgit 1.5.1 From 898196f1cca419c0d2b60529c86ddff3cea83072 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 22:02:41 +0100 Subject: guard against accidental modification --- synapse/events/__init__.py | 6 ++++++ synapse/events/third_party_rules.py | 7 ++++--- tests/rest/client/test_third_party_rules.py | 20 ++++++++++++++++++++ 3 files changed, 30 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 7a51d0a22f..65df62107f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -312,6 +312,12 @@ class EventBase(metaclass=abc.ABCMeta): """ return [e for e, _ in self.auth_events] + def freeze(self): + """'Freeze' the event dict, so it cannot be modified by accident""" + + # this will be a no-op if the event dict is already frozen. + self._dict = freeze(self._dict) + class FrozenEvent(EventBase): format_version = EventFormatVersions.V1 # All events of this type are V1 diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index a9aabe00df..77fbd3f68a 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -69,9 +69,10 @@ class ThirdPartyEventRules: events = await self.store.get_events(prev_state_ids.values()) state_events = {(ev.type, ev.state_key): ev for ev in events.values()} - # The module can modify the event slightly if it wants, but caution should be - # exercised, and it's likely to go very wrong if applied to events received over - # federation. + # Ensure that the event is frozen, to make sure that the module is not tempted + # to try to modify it. Any attempt to modify it at this point will invalidate + # the hashes and signatures. + event.freeze() return await self.third_party_rules.check_event_allowed(event, state_events) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index d404550800..0048bea54a 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -114,6 +114,26 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + def test_cannot_modify_event(self): + """cannot accidentally modify an event before it is persisted""" + + # first patch the event checker so that it will try to modify the event + async def check(ev: EventBase, state): + ev.content = {"x": "y"} + return True + + current_rules_module().check_event_allowed = check + + # now send the event + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id, + {"x": "x"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"500", channel.result) + def test_modify_event(self): """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event -- cgit 1.5.1 From a34b17e492129adba260915de401ab1cbabf6215 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 23:14:35 +0100 Subject: Simplify `_locally_reject_invite` Update `EventCreationHandler.create_event` to accept an auth_events param, and use it in `_locally_reject_invite` instead of reinventing the wheel. --- synapse/events/builder.py | 21 ++++---- synapse/handlers/message.py | 22 ++++++++- synapse/handlers/room_member.py | 58 ++++++----------------- tests/handlers/test_presence.py | 2 +- tests/replication/test_federation_sender_shard.py | 2 +- tests/storage/test_redaction.py | 4 +- 6 files changed, 52 insertions(+), 57 deletions(-) (limited to 'tests') diff --git a/synapse/events/builder.py b/synapse/events/builder.py index b6c47be646..df4f950fec 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -97,32 +97,37 @@ class EventBuilder: def is_state(self): return self._state_key is not None - async def build(self, prev_event_ids: List[str]) -> EventBase: + async def build( + self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]] + ) -> EventBase: """Transform into a fully signed and hashed event Args: prev_event_ids: The event IDs to use as the prev events + auth_event_ids: The event IDs to use as the auth events. + Should normally be set to None, which will cause them to be calculated + based on the room state at the prev_events. Returns: The signed and hashed event. """ - - state_ids = await self._state.get_current_state_ids( - self.room_id, prev_event_ids - ) - auth_ids = self._auth.compute_auth_events(self, state_ids) + if auth_event_ids is None: + state_ids = await self._state.get_current_state_ids( + self.room_id, prev_event_ids + ) + auth_event_ids = self._auth.compute_auth_events(self, state_ids) format_version = self.room_version.event_format if format_version == EventFormatVersions.V1: # The types of auth/prev events changes between event versions. auth_events = await self._store.add_event_hashes( - auth_ids + auth_event_ids ) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]] prev_events = await self._store.add_event_hashes( prev_event_ids ) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]] else: - auth_events = auth_ids + auth_events = auth_event_ids prev_events = prev_event_ids old_depth = await self._store.get_max_depth_of(prev_event_ids) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b8f541425b..f18f882596 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -439,6 +439,7 @@ class EventCreationHandler: event_dict: dict, txn_id: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, + auth_event_ids: Optional[List[str]] = None, require_consent: bool = True, ) -> Tuple[EventBase, EventContext]: """ @@ -458,6 +459,12 @@ class EventCreationHandler: new event. If None, they will be requested from the database. + + auth_event_ids: + The event ids to use as the auth_events for the new event. + Should normally be left as None, which will cause them to be calculated + based on the room state at the prev_events. + require_consent: Whether to check if the requester has consented to the privacy policy. Raises: @@ -516,7 +523,10 @@ class EventCreationHandler: builder.internal_metadata.txn_id = txn_id event, context = await self.create_new_client_event( - builder=builder, requester=requester, prev_event_ids=prev_event_ids, + builder=builder, + requester=requester, + prev_event_ids=prev_event_ids, + auth_event_ids=auth_event_ids, ) # In an ideal world we wouldn't need the second part of this condition. However, @@ -755,6 +765,7 @@ class EventCreationHandler: builder: EventBuilder, requester: Optional[Requester] = None, prev_event_ids: Optional[List[str]] = None, + auth_event_ids: Optional[List[str]] = None, ) -> Tuple[EventBase, EventContext]: """Create a new event for a local client @@ -767,6 +778,11 @@ class EventCreationHandler: If None, they will be requested from the database. + auth_event_ids: + The event ids to use as the auth_events for the new event. + Should normally be left as None, which will cause them to be calculated + based on the room state at the prev_events. + Returns: Tuple of created event, context """ @@ -788,7 +804,9 @@ class EventCreationHandler: builder.type == EventTypes.Create or len(prev_event_ids) > 0 ), "Attempting to create an event with no prev_events" - event = await builder.build(prev_event_ids=prev_event_ids) + event = await builder.build( + prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids + ) context = await self.state.compute_event_context(event) if requester: context.app_service = requester.app_service diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d9b98d8acd..ec784030e9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -17,12 +17,10 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union - -from unpaddedbase64 import encode_base64 +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import MAX_DEPTH, AccountDataTypes, EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -31,12 +29,8 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.api.room_versions import EventFormatVersions -from synapse.crypto.event_signing import compute_event_reference_hash from synapse.events import EventBase -from synapse.events.builder import create_local_event_from_event_dict from synapse.events.snapshot import EventContext -from synapse.events.validator import EventValidator from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID from synapse.util.async_helpers import Linearizer @@ -1132,31 +1126,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): room_id = invite_event.room_id target_user = invite_event.state_key - room_version = await self.store.get_room_version(room_id) content["membership"] = Membership.LEAVE - # the auth events for the new event are the same as that of the invite, plus - # the invite itself. - # - # the prev_events are just the invite. - invite_hash = invite_event.event_id # type: Union[str, Tuple] - if room_version.event_format == EventFormatVersions.V1: - alg, h = compute_event_reference_hash(invite_event) - invite_hash = (invite_event.event_id, {alg: encode_base64(h)}) - - auth_events = tuple(invite_event.auth_events) + (invite_hash,) - prev_events = (invite_hash,) - - # we cap depth of generated events, to ensure that they are not - # rejected by other servers (and so that they can be persisted in - # the db) - depth = min(invite_event.depth + 1, MAX_DEPTH) - event_dict = { - "depth": depth, - "auth_events": auth_events, - "prev_events": prev_events, "type": EventTypes.Member, "room_id": room_id, "sender": target_user, @@ -1164,24 +1137,23 @@ class RoomMemberMasterHandler(RoomMemberHandler): "state_key": target_user, } - event = create_local_event_from_event_dict( - clock=self.clock, - hostname=self.hs.hostname, - signing_key=self.hs.signing_key, - room_version=room_version, - event_dict=event_dict, + # the auth events for the new event are the same as that of the invite, plus + # the invite itself. + # + # the prev_events are just the invite. + prev_event_ids = [invite_event.event_id] + auth_event_ids = invite_event.auth_event_ids() + prev_event_ids + + event, context = await self.event_creation_handler.create_event( + requester, + event_dict, + txn_id=txn_id, + prev_event_ids=prev_event_ids, + auth_event_ids=auth_event_ids, ) event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True - if txn_id is not None: - event.internal_metadata.txn_id = txn_id - if requester.access_token_id is not None: - event.internal_metadata.token_id = requester.access_token_id - - EventValidator().validate_new(event, self.config) - context = await self.state_handler.compute_event_context(event) - context.app_service = requester.app_service result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[UserID.from_string(target_user)], ) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 914c82e7a8..8ed67640f8 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -615,7 +615,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): self.store.get_latest_event_ids_in_room(room_id) ) - event = self.get_success(builder.build(prev_event_ids)) + event = self.get_success(builder.build(prev_event_ids, None)) self.get_success(self.federation_handler.on_receive_pdu(hostname, event)) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 9c4a9c3563..779745ae9d 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -226,7 +226,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): } builder = factory.for_room_version(room_version, event_dict) - join_event = self.get_success(builder.build(prev_event_ids)) + join_event = self.get_success(builder.build(prev_event_ids, None)) self.get_success(federation.on_send_join_request(remote_server, join_event)) self.replicate() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 1ea35d60c1..d4f9e809db 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -236,9 +236,9 @@ class RedactionTestCase(unittest.HomeserverTestCase): self._event_id = event_id @defer.inlineCallbacks - def build(self, prev_event_ids): + def build(self, prev_event_ids, auth_event_ids): built_event = yield defer.ensureDeferred( - self._base_builder.build(prev_event_ids) + self._base_builder.build(prev_event_ids, auth_event_ids) ) built_event._event_id = self._event_id -- cgit 1.5.1 From 921a3f8a59da0f8fe706a22627f464a74b54c992 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 13:27:51 +0100 Subject: Fix not sending events over federation when using sharded event persisters (#8536) * Fix outbound federaion with multiple event persisters. We incorrectly notified federation senders that the minimum persisted stream position had advanced when we got an `RDATA` from an event persister. Notifying of federation senders already correctly happens in the notifier, so we just delete the offending line. * Change some interfaces to use RoomStreamToken. By enforcing use of `RoomStreamTokens` we make it less likely that people pass in random ints that they got from somewhere random. --- changelog.d/8536.bugfix | 1 + synapse/app/generic_worker.py | 4 ---- synapse/federation/send_queue.py | 2 +- synapse/federation/sender/__init__.py | 9 +++++++-- synapse/handlers/appservice.py | 11 +++++++---- synapse/notifier.py | 6 +++--- synapse/push/emailpusher.py | 8 +++++++- synapse/push/httppusher.py | 8 +++++++- synapse/push/pusherpool.py | 10 ++++++++-- tests/handlers/test_appservice.py | 13 ++++++++++--- 10 files changed, 51 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8536.bugfix (limited to 'tests') diff --git a/changelog.d/8536.bugfix b/changelog.d/8536.bugfix new file mode 100644 index 0000000000..8d238cc008 --- /dev/null +++ b/changelog.d/8536.bugfix @@ -0,0 +1 @@ +Fix not sending events over federation when using sharded event writers. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d53181deb1..1b511890aa 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -790,10 +790,6 @@ class FederationSenderHandler: send_queue.process_rows_for_federation(self.federation_sender, rows) await self.update_token(token) - # We also need to poke the federation sender when new events happen - elif stream_name == "events": - self.federation_sender.notify_new_events(token) - # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: await self._on_new_receipts(rows) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 8e46957d15..5f1bf492c1 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -188,7 +188,7 @@ class FederationRemoteSendQueue: for key in keys[:i]: del self.edus[key] - def notify_new_events(self, current_id): + def notify_new_events(self, max_token): """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index e33b29a42c..604cfd1935 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -40,7 +40,7 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import ReadReceipt +from synapse.types import ReadReceipt, RoomStreamToken from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -154,10 +154,15 @@ class FederationSender: self._per_destination_queues[destination] = queue return queue - def notify_new_events(self, current_id: int) -> None: + def notify_new_events(self, max_token: RoomStreamToken) -> None: """This gets called when we have some new events we might want to send out to other servers. """ + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + current_id = max_token.stream + self._last_poked_id = max(current_id, self._last_poked_id) if self._is_processing: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 9d4e87dad6..c8d5e58035 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -27,6 +27,7 @@ from synapse.metrics import ( event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import RoomStreamToken from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -47,15 +48,17 @@ class ApplicationServicesHandler: self.current_max = 0 self.is_processing = False - async def notify_interested_services(self, current_id): + async def notify_interested_services(self, max_token: RoomStreamToken): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any prolonged length of time. - - Args: - current_id(int): The current maximum ID. """ + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + current_id = max_token.stream + services = self.store.get_app_services() if not services or not self.notify_appservices: return diff --git a/synapse/notifier.py b/synapse/notifier.py index 13adeed01e..51c830c91e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -319,19 +319,19 @@ class Notifier: ) if self.federation_sender: - self.federation_sender.notify_new_events(max_room_stream_token.stream) + self.federation_sender.notify_new_events(max_room_stream_token) async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: await self.appservice_handler.notify_interested_services( - max_room_stream_token.stream + max_room_stream_token ) except Exception: logger.exception("Error notifying application services of event") async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_token.stream) + await self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception("Error pusher pool of event") diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 28bd8ab748..c6763971ee 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -18,6 +18,7 @@ import logging from twisted.internet.error import AlreadyCalled, AlreadyCancelled from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) @@ -91,7 +92,12 @@ class EmailPusher: pass self.timed_call = None - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_ordering = max_token.stream + if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 26706bf3e1..793d0db2d9 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException +from synapse.types import RoomStreamToken from . import push_rule_evaluator, push_tools @@ -114,7 +115,12 @@ class HttpPusher: if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_ordering = max_token.stream + self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 76150e117b..0080c68ce2 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -24,6 +24,7 @@ from synapse.push import PusherConfigException from synapse.push.emailpusher import EmailPusher from synapse.push.httppusher import HttpPusher from synapse.push.pusher import PusherFactory +from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute if TYPE_CHECKING: @@ -186,11 +187,16 @@ class PusherPool: ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_stream_id: int): + async def on_new_notifications(self, max_token: RoomStreamToken): if not self.pushers: # nothing to do here. return + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_id = max_token.stream + if max_stream_id < self._last_room_stream_id_seen: # Nothing to do return @@ -214,7 +220,7 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(max_stream_id) + p.on_new_notifications(max_token) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 2a0b7c1b56..ee4f3da31c 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -18,6 +18,7 @@ from mock import Mock from twisted.internet import defer from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.types import RoomStreamToken from tests.test_utils import make_awaitable from tests.utils import MockClock @@ -61,7 +62,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.mock_scheduler.submit_event_for_as.assert_called_once_with( interested_service, event ) @@ -80,7 +83,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) @defer.inlineCallbacks @@ -97,7 +102,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.assertFalse( self.mock_as_api.query_user.called, "query_user called when it shouldn't have been.", -- cgit 1.5.1 From 1264c8ac89e5bfa757acee8d26437f14621206a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 13:53:20 +0100 Subject: Add basic tests for sync/pagination with vector clock tokens. (#8488) These are tests for #8439 --- changelog.d/8488.misc | 1 + tests/replication/test_sharded_event_persister.py | 217 ++++++++++++++++++++++ tests/unittest.py | 32 +++- 3 files changed, 249 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8488.misc (limited to 'tests') diff --git a/changelog.d/8488.misc b/changelog.d/8488.misc new file mode 100644 index 0000000000..237cb3b311 --- /dev/null +++ b/changelog.d/8488.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 6068d14905..82cf033d4e 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -14,8 +14,12 @@ # limitations under the License. import logging +from mock import patch + +from synapse.api.room_versions import RoomVersion from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import sync from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.utils import USE_POSTGRES_FOR_TESTS @@ -36,6 +40,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, + sync.register_servlets, ] def prepare(self, reactor, clock, hs): @@ -43,6 +48,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): self.other_user_id = self.register_user("otheruser", "pass") self.other_access_token = self.login("otheruser", "pass") + self.room_creator = self.hs.get_room_creation_handler() + self.store = hs.get_datastore() + def default_config(self): conf = super().default_config() conf["redis"] = {"enabled": "true"} @@ -53,6 +61,29 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): } return conf + def _create_room(self, room_id: str, user_id: str, tok: str): + """Create a room with given room_id + """ + + # We control the room ID generation by patching out the + # `_generate_room_id` method + async def generate_room( + creator_id: str, is_public: bool, room_version: RoomVersion + ): + await self.store.store_room( + room_id=room_id, + room_creator_user_id=creator_id, + is_public=is_public, + room_version=room_version, + ) + return room_id + + with patch( + "synapse.handlers.room.RoomCreationHandler._generate_room_id" + ) as mock: + mock.side_effect = generate_room + self.helper.create_room_as(user_id, tok=tok) + def test_basic(self): """Simple test to ensure that multiple rooms can be created and joined, and that different rooms get handled by different instances. @@ -100,3 +131,189 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): self.assertTrue(persisted_on_1) self.assertTrue(persisted_on_2) + + def test_vector_clock_token(self): + """Tests that using a stream token with a vector clock component works + correctly with basic /sync and /messages usage. + """ + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker1"}, + ) + + worker_hs2 = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker2"}, + ) + + sync_hs = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "sync"}, + ) + + # Specially selected room IDs that get persisted on different workers. + room_id1 = "!foo:test" + room_id2 = "!baz:test" + + self.assertEqual( + self.hs.config.worker.events_shard_config.get_instance(room_id1), "worker1" + ) + self.assertEqual( + self.hs.config.worker.events_shard_config.get_instance(room_id2), "worker2" + ) + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + store = self.hs.get_datastore() + + # Create two room on the different workers. + self._create_room(room_id1, user_id, access_token) + self._create_room(room_id2, user_id, access_token) + + # The other user joins + self.helper.join( + room=room_id1, user=self.other_user_id, tok=self.other_access_token + ) + self.helper.join( + room=room_id2, user=self.other_user_id, tok=self.other_access_token + ) + + # Do an initial sync so that we're up to date. + request, channel = self.make_request("GET", "/sync", access_token=access_token) + self.render_on_worker(sync_hs, request) + next_batch = channel.json_body["next_batch"] + + # We now gut wrench into the events stream MultiWriterIdGenerator on + # worker2 to mimic it getting stuck persisting an event. This ensures + # that when we send an event on worker1 we end up in a state where + # worker2 events stream position lags that on worker1, resulting in a + # RoomStreamToken with a non-empty instance map component. + # + # Worker2's event stream position will not advance until we call + # __aexit__ again. + actx = worker_hs2.get_datastore()._stream_id_gen.get_next() + self.get_success(actx.__aenter__()) + + response = self.helper.send(room_id1, body="Hi!", tok=self.other_access_token) + first_event_in_room1 = response["event_id"] + + # Assert that the current stream token has an instance map component, as + # we are trying to test vector clock tokens. + room_stream_token = store.get_room_max_token() + self.assertNotEqual(len(room_stream_token.instance_map), 0) + + # Check that syncing still gets the new event, despite the gap in the + # stream IDs. + request, channel = self.make_request( + "GET", "/sync?since={}".format(next_batch), access_token=access_token + ) + self.render_on_worker(sync_hs, request) + + # We should only see the new event and nothing else + self.assertIn(room_id1, channel.json_body["rooms"]["join"]) + self.assertNotIn(room_id2, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id1]["timeline"]["events"] + self.assertListEqual( + [first_event_in_room1], [event["event_id"] for event in events] + ) + + # Get the next batch and makes sure its a vector clock style token. + vector_clock_token = channel.json_body["next_batch"] + self.assertTrue(vector_clock_token.startswith("m")) + + # Now that we've got a vector clock token we finish the fake persisting + # an event we started above. + self.get_success(actx.__aexit__(None, None, None)) + + # Now try and send an event to the other rooom so that we can test that + # the vector clock style token works as a `since` token. + response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token) + first_event_in_room2 = response["event_id"] + + request, channel = self.make_request( + "GET", + "/sync?since={}".format(vector_clock_token), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + + self.assertNotIn(room_id1, channel.json_body["rooms"]["join"]) + self.assertIn(room_id2, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id2]["timeline"]["events"] + self.assertListEqual( + [first_event_in_room2], [event["event_id"] for event in events] + ) + + next_batch = channel.json_body["next_batch"] + + # We also want to test that the vector clock style token works with + # pagination. We do this by sending a couple of new events into the room + # and syncing again to get a prev_batch token for each room, then + # paginating from there back to the vector clock token. + self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token) + self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token) + + request, channel = self.make_request( + "GET", "/sync?since={}".format(next_batch), access_token=access_token + ) + self.render_on_worker(sync_hs, request) + + prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][ + "prev_batch" + ] + prev_batch2 = channel.json_body["rooms"]["join"][room_id2]["timeline"][ + "prev_batch" + ] + + # Paginating back in the first room should not produce any results, as + # no events have happened in it. This tests that we are correctly + # filtering results based on the vector clock portion. + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=b".format( + room_id1, prev_batch1, vector_clock_token + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertListEqual([], channel.json_body["chunk"]) + + # Paginating back on the second room should produce the first event + # again. This tests that pagination isn't completely broken. + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=b".format( + room_id2, prev_batch2, vector_clock_token + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertEqual( + channel.json_body["chunk"][0]["event_id"], first_event_in_room2 + ) + + # Paginating forwards should give the same results + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=f".format( + room_id1, vector_clock_token, prev_batch1 + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertListEqual([], channel.json_body["chunk"]) + + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=f".format( + room_id2, vector_clock_token, prev_batch2, + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertEqual( + channel.json_body["chunk"][0]["event_id"], first_event_in_room2 + ) diff --git a/tests/unittest.py b/tests/unittest.py index 6c1661c92c..040b126a27 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -20,7 +20,7 @@ import hmac import inspect import logging import time -from typing import Optional, Tuple, Type, TypeVar, Union +from typing import Optional, Tuple, Type, TypeVar, Union, overload from mock import Mock, patch @@ -364,6 +364,36 @@ class HomeserverTestCase(TestCase): Function to optionally be overridden in subclasses. """ + # Annoyingly mypy doesn't seem to pick up the fact that T is SynapseRequest + # when the `request` arg isn't given, so we define an explicit override to + # cover that case. + @overload + def make_request( + self, + method: Union[bytes, str], + path: Union[bytes, str], + content: Union[bytes, dict] = b"", + access_token: Optional[str] = None, + shorthand: bool = True, + federation_auth_origin: str = None, + content_is_form: bool = False, + ) -> Tuple[SynapseRequest, FakeChannel]: + ... + + @overload + def make_request( + self, + method: Union[bytes, str], + path: Union[bytes, str], + content: Union[bytes, dict] = b"", + access_token: Optional[str] = None, + request: Type[T] = SynapseRequest, + shorthand: bool = True, + federation_auth_origin: str = None, + content_is_form: bool = False, + ) -> Tuple[T, FakeChannel]: + ... + def make_request( self, method: Union[bytes, str], -- cgit 1.5.1 From 9f87da0a84f93096e228f01f1139c9b5db8ea3d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 19:43:37 +0100 Subject: Rename Cache->DeferredCache --- synapse/replication/slave/storage/client_ips.py | 6 +++--- synapse/storage/databases/main/client_ips.py | 4 ++-- synapse/storage/databases/main/devices.py | 4 ++-- synapse/storage/databases/main/events_worker.py | 4 ++-- synapse/util/caches/descriptors.py | 19 ++++++++++++------- tests/storage/test__base.py | 10 +++++----- tests/test_metrics.py | 4 ++-- tests/util/caches/test_descriptors.py | 4 ++-- 8 files changed, 30 insertions(+), 25 deletions(-) (limited to 'tests') diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 273d627fad..40ea78a353 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache from ._base import BaseSlavedStore @@ -24,9 +24,9 @@ class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.client_ip_last_seen = Cache( + self.client_ip_last_seen = DeferredCache( name="client_ip_last_seen", keylen=4, max_entries=50000 - ) # type: Cache[tuple, int] + ) # type: DeferredCache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index a25a888443..ad32701d36 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from typing import Dict, Optional, Tuple from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache logger = logging.getLogger(__name__) @@ -410,7 +410,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore): class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - self.client_ip_last_seen = Cache( + self.client_ip_last_seen = DeferredCache( name="client_ip_last_seen", keylen=4, max_entries=50000 ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 88fd97e1df..d903155e89 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,7 +34,7 @@ from synapse.storage.database import ( ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.descriptors import Cache, cached, cachedList +from synapse.util.caches.descriptors import DeferredCache, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -1004,7 +1004,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. - self.device_id_exists_cache = Cache( + self.device_id_exists_cache = DeferredCache( name="device_id_exists", keylen=2, max_entries=10000 ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 3ec4d1d9c2..be7f60f2e8 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,7 +42,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.descriptors import Cache, cached +from synapse.util.caches.descriptors import DeferredCache, cached from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -145,7 +145,7 @@ class EventsWorkerStore(SQLBaseStore): self._cleanup_old_transaction_ids, ) - self._get_event_cache = Cache( + self._get_event_cache = DeferredCache( "*getEvent*", keylen=3, max_entries=hs.config.caches.event_cache_size, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 14458bc20f..7c9fe199bf 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -99,7 +99,7 @@ class CacheEntry: self.callbacks.clear() -class Cache(Generic[KT, VT]): +class DeferredCache(Generic[KT, VT]): """Wraps an LruCache, adding support for Deferred results. It expects that each entry added with set() will be a Deferred; likewise get() @@ -225,7 +225,10 @@ class Cache(Generic[KT, VT]): return default def set( - self, key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None + self, + key: KT, + value: defer.Deferred, + callback: Optional[Callable[[], None]] = None, ) -> ObservableDeferred: if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") @@ -427,13 +430,13 @@ class CacheDescriptor(_CacheDescriptorBase): self.iterable = iterable def __get__(self, obj, owner): - cache = Cache( + cache = DeferredCache( name=self.orig.__name__, max_entries=self.max_entries, keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) # type: Cache[Tuple, Any] + ) # type: DeferredCache[Tuple, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into @@ -677,9 +680,9 @@ class _CacheContext: _cache_context_objects = ( WeakValueDictionary() - ) # type: WeakValueDictionary[Tuple[Cache, CacheKey], _CacheContext] + ) # type: WeakValueDictionary[Tuple[DeferredCache, CacheKey], _CacheContext] - def __init__(self, cache, cache_key): # type: (Cache, CacheKey) -> None + def __init__(self, cache, cache_key): # type: (DeferredCache, CacheKey) -> None self._cache = cache self._cache_key = cache_key @@ -688,7 +691,9 @@ class _CacheContext: self._cache.invalidate(self._cache_key) @classmethod - def get_instance(cls, cache, cache_key): # type: (Cache, CacheKey) -> _CacheContext + def get_instance( + cls, cache, cache_key + ): # type: (DeferredCache, CacheKey) -> _CacheContext """Returns an instance constructed with the given arguments. A new instance is only created if none already exists. diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index f5afed017c..00adcab7b9 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,14 +20,14 @@ from mock import Mock from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import Cache, cached +from synapse.util.caches.descriptors import DeferredCache, cached from tests import unittest -class CacheTestCase(unittest.HomeserverTestCase): +class DeferredCacheTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.cache = Cache("test") + self.cache = DeferredCache("test") def test_empty(self): failed = False @@ -56,7 +56,7 @@ class CacheTestCase(unittest.HomeserverTestCase): self.assertTrue(failed) def test_eviction(self): - cache = Cache("test", max_entries=2) + cache = DeferredCache("test", max_entries=2) cache.prefill(1, "one") cache.prefill(2, "two") @@ -74,7 +74,7 @@ class CacheTestCase(unittest.HomeserverTestCase): cache.get(3) def test_eviction_lru(self): - cache = Cache("test", max_entries=2) + cache = DeferredCache("test", max_entries=2) cache.prefill(1, "one") cache.prefill(2, "two") diff --git a/tests/test_metrics.py b/tests/test_metrics.py index f5f63d8ed6..1c03a52f7c 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -15,7 +15,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, InFlightGauge, generate_latest -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache from tests import unittest @@ -138,7 +138,7 @@ class CacheMetricsTests(unittest.HomeserverTestCase): Caches produce metrics reflecting their state when scraped. """ CACHE_NAME = "cache_metrics_test_fgjkbdfg" - cache = Cache(CACHE_NAME, max_entries=777) + cache = DeferredCache(CACHE_NAME, max_entries=777) items = { x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 677e925477..bd870b4a33 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -42,9 +42,9 @@ def run_on_reactor(): return make_deferred_yieldable(d) -class CacheTestCase(unittest.TestCase): +class DeferredCacheTestCase(unittest.TestCase): def test_invalidate_all(self): - cache = descriptors.Cache("testcache") + cache = descriptors.DeferredCache("testcache") callback_record = [False, False] -- cgit 1.5.1 From 4182bb812f21d7231ff0efc5e93e5f2e88f6605e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 23:25:23 +0100 Subject: move DeferredCache into its own module --- synapse/replication/slave/storage/client_ips.py | 2 +- synapse/storage/databases/main/client_ips.py | 2 +- synapse/storage/databases/main/devices.py | 3 +- synapse/storage/databases/main/events_worker.py | 3 +- synapse/util/caches/deferred_cache.py | 292 ++++++++++++++++++++++++ synapse/util/caches/descriptors.py | 284 +---------------------- tests/storage/test__base.py | 3 +- tests/test_metrics.py | 2 +- tests/util/caches/test_deferred_cache.py | 64 ++++++ tests/util/caches/test_descriptors.py | 44 ---- 10 files changed, 367 insertions(+), 332 deletions(-) create mode 100644 synapse/util/caches/deferred_cache.py create mode 100644 tests/util/caches/test_deferred_cache.py (limited to 'tests') diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 40ea78a353..4b0ea0cc01 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache from ._base import BaseSlavedStore diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index ad32701d36..9e66e6648a 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from typing import Dict, Optional, Tuple from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d903155e89..e662a20d24 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,7 +34,8 @@ from synapse.storage.database import ( ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.descriptors import DeferredCache, cached, cachedList +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index be7f60f2e8..ff150f0be7 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,7 +42,8 @@ from synapse.storage.database import DatabasePool from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.descriptors import DeferredCache, cached +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py new file mode 100644 index 0000000000..f728cd2cf2 --- /dev/null +++ b/synapse/util/caches/deferred_cache.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import threading +from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, cast + +from prometheus_client import Gauge + +from twisted.internet import defer + +from synapse.util.async_helpers import ObservableDeferred +from synapse.util.caches import register_cache +from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry + +cache_pending_metric = Gauge( + "synapse_util_caches_cache_pending", + "Number of lookups currently pending for this cache", + ["name"], +) + + +KT = TypeVar("KT") +VT = TypeVar("VT") + + +class _Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup. + sentinel = object() + + +class DeferredCache(Generic[KT, VT]): + """Wraps an LruCache, adding support for Deferred results. + + It expects that each entry added with set() will be a Deferred; likewise get() + may return an ObservableDeferred. + """ + + __slots__ = ( + "cache", + "name", + "keylen", + "thread", + "metrics", + "_pending_deferred_cache", + ) + + def __init__( + self, + name: str, + max_entries: int = 1000, + keylen: int = 1, + tree: bool = False, + iterable: bool = False, + apply_cache_factor_from_config: bool = True, + ): + """ + Args: + name: The name of the cache + max_entries: Maximum amount of entries that the cache will hold + keylen: The length of the tuple used as the cache key. Ignored unless + `tree` is True. + tree: Use a TreeCache instead of a dict as the underlying cache type + iterable: If True, count each item in the cached object as an entry, + rather than each cached object + apply_cache_factor_from_config: Whether cache factors specified in the + config file affect `max_entries` + """ + cache_type = TreeCache if tree else dict + + # _pending_deferred_cache maps from the key value to a `CacheEntry` object. + self._pending_deferred_cache = ( + cache_type() + ) # type: MutableMapping[KT, CacheEntry] + + # cache is used for completed results and maps to the result itself, rather than + # a Deferred. + self.cache = LruCache( + max_size=max_entries, + keylen=keylen, + cache_type=cache_type, + size_callback=(lambda d: len(d)) if iterable else None, + evicted_callback=self._on_evicted, + apply_cache_factor_from_config=apply_cache_factor_from_config, + ) + + self.name = name + self.keylen = keylen + self.thread = None # type: Optional[threading.Thread] + self.metrics = register_cache( + "cache", + name, + self.cache, + collect_callback=self._metrics_collection_callback, + ) + + @property + def max_entries(self): + return self.cache.max_size + + def _on_evicted(self, evicted_count): + self.metrics.inc_evictions(evicted_count) + + def _metrics_collection_callback(self): + cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) + + def check_thread(self): + expected_thread = self.thread + if expected_thread is None: + self.thread = threading.current_thread() + else: + if expected_thread is not threading.current_thread(): + raise ValueError( + "Cache objects can only be accessed from the main thread" + ) + + def get( + self, + key: KT, + default=_Sentinel.sentinel, + callback: Optional[Callable[[], None]] = None, + update_metrics: bool = True, + ): + """Looks the key up in the caches. + + Args: + key(tuple) + default: What is returned if key is not in the caches. If not + specified then function throws KeyError instead + callback(fn): Gets called when the entry in the cache is invalidated + update_metrics (bool): whether to update the cache hit rate metrics + + Returns: + Either an ObservableDeferred or the result itself + """ + callbacks = [callback] if callback else [] + val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) + if val is not _Sentinel.sentinel: + val.callbacks.update(callbacks) + if update_metrics: + self.metrics.inc_hits() + return val.deferred + + val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) + if val is not _Sentinel.sentinel: + self.metrics.inc_hits() + return val + + if update_metrics: + self.metrics.inc_misses() + + if default is _Sentinel.sentinel: + raise KeyError() + else: + return default + + def set( + self, + key: KT, + value: defer.Deferred, + callback: Optional[Callable[[], None]] = None, + ) -> ObservableDeferred: + if not isinstance(value, defer.Deferred): + raise TypeError("not a Deferred") + + callbacks = [callback] if callback else [] + self.check_thread() + observable = ObservableDeferred(value, consumeErrors=True) + observer = observable.observe() + entry = CacheEntry(deferred=observable, callbacks=callbacks) + + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry: + existing_entry.invalidate() + + self._pending_deferred_cache[key] = entry + + def compare_and_pop(): + """Check if our entry is still the one in _pending_deferred_cache, and + if so, pop it. + + Returns true if the entries matched. + """ + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry is entry: + return True + + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + return False + + def cb(result): + if compare_and_pop(): + self.cache.set(key, result, entry.callbacks) + else: + # we're not going to put this entry into the cache, so need + # to make sure that the invalidation callbacks are called. + # That was probably done when _pending_deferred_cache was + # updated, but it's possible that `set` was called without + # `invalidate` being previously called, in which case it may + # not have been. Either way, let's double-check now. + entry.invalidate() + + def eb(_fail): + compare_and_pop() + entry.invalidate() + + # once the deferred completes, we can move the entry from the + # _pending_deferred_cache to the real cache. + # + observer.addCallbacks(cb, eb) + return observable + + def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): + callbacks = [callback] if callback else [] + self.cache.set(key, value, callbacks=callbacks) + + def invalidate(self, key): + self.check_thread() + self.cache.pop(key, None) + + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, which will (a) stop it being returned + # for future queries and (b) stop it being persisted as a proper entry + # in self.cache. + entry = self._pending_deferred_cache.pop(key, None) + + # run the invalidation callbacks now, rather than waiting for the + # deferred to resolve. + if entry: + entry.invalidate() + + def invalidate_many(self, key: KT): + self.check_thread() + if not isinstance(key, tuple): + raise TypeError("The cache key must be a tuple not %r" % (type(key),)) + self.cache.del_multi(key) + + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, as above + entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) + if entry_dict is not None: + for entry in iterate_tree_cache_entry(entry_dict): + entry.invalidate() + + def invalidate_all(self): + self.check_thread() + self.cache.clear() + for entry in self._pending_deferred_cache.values(): + entry.invalidate() + self._pending_deferred_cache.clear() + + +class CacheEntry: + __slots__ = ["deferred", "callbacks", "invalidated"] + + def __init__( + self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] + ): + self.deferred = deferred + self.callbacks = set(callbacks) + self.invalidated = False + + def invalidate(self): + if not self.invalidated: + self.invalidated = True + for callback in self.callbacks: + callback() + self.callbacks.clear() diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7c9fe199bf..1f43886804 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -13,44 +13,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import enum import functools import inspect import logging -import threading -from typing import ( - Any, - Callable, - Generic, - Iterable, - MutableMapping, - Optional, - Tuple, - TypeVar, - Union, - cast, -) +from typing import Any, Callable, Generic, Optional, Tuple, TypeVar, Union, cast from weakref import WeakValueDictionary -from prometheus_client import Gauge - from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.lrucache import LruCache -from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry - -from . import register_cache +from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) CacheKey = Union[Tuple, Any] F = TypeVar("F", bound=Callable[..., Any]) -KT = TypeVar("KT") -VT = TypeVar("VT") class _CachedFunction(Generic[F]): @@ -68,266 +48,6 @@ class _CachedFunction(Generic[F]): __call__ = None # type: F -cache_pending_metric = Gauge( - "synapse_util_caches_cache_pending", - "Number of lookups currently pending for this cache", - ["name"], -) - - -class _Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup. - sentinel = object() - - -class CacheEntry: - __slots__ = ["deferred", "callbacks", "invalidated"] - - def __init__( - self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] - ): - self.deferred = deferred - self.callbacks = set(callbacks) - self.invalidated = False - - def invalidate(self): - if not self.invalidated: - self.invalidated = True - for callback in self.callbacks: - callback() - self.callbacks.clear() - - -class DeferredCache(Generic[KT, VT]): - """Wraps an LruCache, adding support for Deferred results. - - It expects that each entry added with set() will be a Deferred; likewise get() - may return an ObservableDeferred. - """ - - __slots__ = ( - "cache", - "name", - "keylen", - "thread", - "metrics", - "_pending_deferred_cache", - ) - - def __init__( - self, - name: str, - max_entries: int = 1000, - keylen: int = 1, - tree: bool = False, - iterable: bool = False, - apply_cache_factor_from_config: bool = True, - ): - """ - Args: - name: The name of the cache - max_entries: Maximum amount of entries that the cache will hold - keylen: The length of the tuple used as the cache key. Ignored unless - `tree` is True. - tree: Use a TreeCache instead of a dict as the underlying cache type - iterable: If True, count each item in the cached object as an entry, - rather than each cached object - apply_cache_factor_from_config: Whether cache factors specified in the - config file affect `max_entries` - """ - cache_type = TreeCache if tree else dict - - # _pending_deferred_cache maps from the key value to a `CacheEntry` object. - self._pending_deferred_cache = ( - cache_type() - ) # type: MutableMapping[KT, CacheEntry] - - # cache is used for completed results and maps to the result itself, rather than - # a Deferred. - self.cache = LruCache( - max_size=max_entries, - keylen=keylen, - cache_type=cache_type, - size_callback=(lambda d: len(d)) if iterable else None, - evicted_callback=self._on_evicted, - apply_cache_factor_from_config=apply_cache_factor_from_config, - ) - - self.name = name - self.keylen = keylen - self.thread = None # type: Optional[threading.Thread] - self.metrics = register_cache( - "cache", - name, - self.cache, - collect_callback=self._metrics_collection_callback, - ) - - @property - def max_entries(self): - return self.cache.max_size - - def _on_evicted(self, evicted_count): - self.metrics.inc_evictions(evicted_count) - - def _metrics_collection_callback(self): - cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) - - def check_thread(self): - expected_thread = self.thread - if expected_thread is None: - self.thread = threading.current_thread() - else: - if expected_thread is not threading.current_thread(): - raise ValueError( - "Cache objects can only be accessed from the main thread" - ) - - def get( - self, - key: KT, - default=_Sentinel.sentinel, - callback: Optional[Callable[[], None]] = None, - update_metrics: bool = True, - ): - """Looks the key up in the caches. - - Args: - key(tuple) - default: What is returned if key is not in the caches. If not - specified then function throws KeyError instead - callback(fn): Gets called when the entry in the cache is invalidated - update_metrics (bool): whether to update the cache hit rate metrics - - Returns: - Either an ObservableDeferred or the result itself - """ - callbacks = [callback] if callback else [] - val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) - if val is not _Sentinel.sentinel: - val.callbacks.update(callbacks) - if update_metrics: - self.metrics.inc_hits() - return val.deferred - - val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) - if val is not _Sentinel.sentinel: - self.metrics.inc_hits() - return val - - if update_metrics: - self.metrics.inc_misses() - - if default is _Sentinel.sentinel: - raise KeyError() - else: - return default - - def set( - self, - key: KT, - value: defer.Deferred, - callback: Optional[Callable[[], None]] = None, - ) -> ObservableDeferred: - if not isinstance(value, defer.Deferred): - raise TypeError("not a Deferred") - - callbacks = [callback] if callback else [] - self.check_thread() - observable = ObservableDeferred(value, consumeErrors=True) - observer = observable.observe() - entry = CacheEntry(deferred=observable, callbacks=callbacks) - - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry: - existing_entry.invalidate() - - self._pending_deferred_cache[key] = entry - - def compare_and_pop(): - """Check if our entry is still the one in _pending_deferred_cache, and - if so, pop it. - - Returns true if the entries matched. - """ - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - return True - - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - - return False - - def cb(result): - if compare_and_pop(): - self.cache.set(key, result, entry.callbacks) - else: - # we're not going to put this entry into the cache, so need - # to make sure that the invalidation callbacks are called. - # That was probably done when _pending_deferred_cache was - # updated, but it's possible that `set` was called without - # `invalidate` being previously called, in which case it may - # not have been. Either way, let's double-check now. - entry.invalidate() - - def eb(_fail): - compare_and_pop() - entry.invalidate() - - # once the deferred completes, we can move the entry from the - # _pending_deferred_cache to the real cache. - # - observer.addCallbacks(cb, eb) - return observable - - def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): - callbacks = [callback] if callback else [] - self.cache.set(key, value, callbacks=callbacks) - - def invalidate(self, key): - self.check_thread() - self.cache.pop(key, None) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, which will (a) stop it being returned - # for future queries and (b) stop it being persisted as a proper entry - # in self.cache. - entry = self._pending_deferred_cache.pop(key, None) - - # run the invalidation callbacks now, rather than waiting for the - # deferred to resolve. - if entry: - entry.invalidate() - - def invalidate_many(self, key: KT): - self.check_thread() - if not isinstance(key, tuple): - raise TypeError("The cache key must be a tuple not %r" % (type(key),)) - self.cache.del_multi(key) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) - if entry_dict is not None: - for entry in iterate_tree_cache_entry(entry_dict): - entry.invalidate() - - def invalidate_all(self): - self.check_thread() - self.cache.clear() - for entry in self._pending_deferred_cache.values(): - entry.invalidate() - self._pending_deferred_cache.clear() - - class _CacheDescriptorBase: def __init__(self, orig: _CachedFunction, num_args, cache_context=False): self.orig = orig diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 00adcab7b9..2598dbe0a7 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,7 +20,8 @@ from mock import Mock from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import DeferredCache, cached +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached from tests import unittest diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 1c03a52f7c..759e4cd048 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -15,7 +15,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, InFlightGauge, generate_latest -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache from tests import unittest diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py new file mode 100644 index 0000000000..9b6acdfc43 --- /dev/null +++ b/tests/util/caches/test_deferred_cache.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from functools import partial + +from twisted.internet import defer + +import synapse.util.caches.deferred_cache + + +class DeferredCacheTestCase(unittest.TestCase): + def test_invalidate_all(self): + cache = synapse.util.caches.deferred_cache.DeferredCache("testcache") + + callback_record = [False, False] + + def record_callback(idx): + callback_record[idx] = True + + # add a couple of pending entries + d1 = defer.Deferred() + cache.set("key1", d1, partial(record_callback, 0)) + + d2 = defer.Deferred() + cache.set("key2", d2, partial(record_callback, 1)) + + # lookup should return observable deferreds + self.assertFalse(cache.get("key1").has_called()) + self.assertFalse(cache.get("key2").has_called()) + + # let one of the lookups complete + d2.callback("result2") + + # for now at least, the cache will return real results rather than an + # observabledeferred + self.assertEqual(cache.get("key2"), "result2") + + # now do the invalidation + cache.invalidate_all() + + # lookup should return none + self.assertIsNone(cache.get("key1", None)) + self.assertIsNone(cache.get("key2", None)) + + # both callbacks should have been callbacked + self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") + self.assertTrue(callback_record[1], "Invalidation callback for key2 not called") + + # letting the other lookup complete should do nothing + d1.callback("result1") + self.assertIsNone(cache.get("key1", None)) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index bd870b4a33..3d1f960869 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from functools import partial import mock @@ -42,49 +41,6 @@ def run_on_reactor(): return make_deferred_yieldable(d) -class DeferredCacheTestCase(unittest.TestCase): - def test_invalidate_all(self): - cache = descriptors.DeferredCache("testcache") - - callback_record = [False, False] - - def record_callback(idx): - callback_record[idx] = True - - # add a couple of pending entries - d1 = defer.Deferred() - cache.set("key1", d1, partial(record_callback, 0)) - - d2 = defer.Deferred() - cache.set("key2", d2, partial(record_callback, 1)) - - # lookup should return observable deferreds - self.assertFalse(cache.get("key1").has_called()) - self.assertFalse(cache.get("key2").has_called()) - - # let one of the lookups complete - d2.callback("result2") - - # for now at least, the cache will return real results rather than an - # observabledeferred - self.assertEqual(cache.get("key2"), "result2") - - # now do the invalidation - cache.invalidate_all() - - # lookup should return none - self.assertIsNone(cache.get("key1", None)) - self.assertIsNone(cache.get("key2", None)) - - # both callbacks should have been callbacked - self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") - self.assertTrue(callback_record[1], "Invalidation callback for key2 not called") - - # letting the other lookup complete should do nothing - d1.callback("result1") - self.assertIsNone(cache.get("key1", None)) - - class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): -- cgit 1.5.1 From 470dedd2662536c309407d05085d04a7d61c5de8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 23:37:23 +0100 Subject: Combine the two sets of DeferredCache tests --- tests/storage/test__base.py | 72 ----------------------------- tests/util/caches/test_deferred_cache.py | 77 +++++++++++++++++++++++++++++++- 2 files changed, 75 insertions(+), 74 deletions(-) (limited to 'tests') diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 2598dbe0a7..8e69b1e9cc 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,83 +20,11 @@ from mock import Mock from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached from tests import unittest -class DeferredCacheTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): - self.cache = DeferredCache("test") - - def test_empty(self): - failed = False - try: - self.cache.get("foo") - except KeyError: - failed = True - - self.assertTrue(failed) - - def test_hit(self): - self.cache.prefill("foo", 123) - - self.assertEquals(self.cache.get("foo"), 123) - - def test_invalidate(self): - self.cache.prefill(("foo",), 123) - self.cache.invalidate(("foo",)) - - failed = False - try: - self.cache.get(("foo",)) - except KeyError: - failed = True - - self.assertTrue(failed) - - def test_eviction(self): - cache = DeferredCache("test", max_entries=2) - - cache.prefill(1, "one") - cache.prefill(2, "two") - cache.prefill(3, "three") # 1 will be evicted - - failed = False - try: - cache.get(1) - except KeyError: - failed = True - - self.assertTrue(failed) - - cache.get(2) - cache.get(3) - - def test_eviction_lru(self): - cache = DeferredCache("test", max_entries=2) - - cache.prefill(1, "one") - cache.prefill(2, "two") - - # Now access 1 again, thus causing 2 to be least-recently used - cache.get(1) - - cache.prefill(3, "three") - - failed = False - try: - cache.get(2) - except KeyError: - failed = True - - self.assertTrue(failed) - - cache.get(1) - cache.get(3) - - class CacheDecoratorTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def test_passthrough(self): diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 9b6acdfc43..9717be56b6 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -18,12 +18,41 @@ from functools import partial from twisted.internet import defer -import synapse.util.caches.deferred_cache +from synapse.util.caches.deferred_cache import DeferredCache class DeferredCacheTestCase(unittest.TestCase): + def test_empty(self): + cache = DeferredCache("test") + failed = False + try: + cache.get("foo") + except KeyError: + failed = True + + self.assertTrue(failed) + + def test_hit(self): + cache = DeferredCache("test") + cache.prefill("foo", 123) + + self.assertEquals(cache.get("foo"), 123) + + def test_invalidate(self): + cache = DeferredCache("test") + cache.prefill(("foo",), 123) + cache.invalidate(("foo",)) + + failed = False + try: + cache.get(("foo",)) + except KeyError: + failed = True + + self.assertTrue(failed) + def test_invalidate_all(self): - cache = synapse.util.caches.deferred_cache.DeferredCache("testcache") + cache = DeferredCache("testcache") callback_record = [False, False] @@ -62,3 +91,47 @@ class DeferredCacheTestCase(unittest.TestCase): # letting the other lookup complete should do nothing d1.callback("result1") self.assertIsNone(cache.get("key1", None)) + + def test_eviction(self): + cache = DeferredCache( + "test", max_entries=2, apply_cache_factor_from_config=False + ) + + cache.prefill(1, "one") + cache.prefill(2, "two") + cache.prefill(3, "three") # 1 will be evicted + + failed = False + try: + cache.get(1) + except KeyError: + failed = True + + self.assertTrue(failed) + + cache.get(2) + cache.get(3) + + def test_eviction_lru(self): + cache = DeferredCache( + "test", max_entries=2, apply_cache_factor_from_config=False + ) + + cache.prefill(1, "one") + cache.prefill(2, "two") + + # Now access 1 again, thus causing 2 to be least-recently used + cache.get(1) + + cache.prefill(3, "three") + + failed = False + try: + cache.get(2) + except KeyError: + failed = True + + self.assertTrue(failed) + + cache.get(1) + cache.get(3) -- cgit 1.5.1 From c276bd996916adce899410b9c4c891892f51b992 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 15 Oct 2020 17:33:28 +0100 Subject: Send some ephemeral events to appservices (#8437) Optionally sends typing, presence, and read receipt information to appservices. --- changelog.d/8437.feature | 1 + mypy.ini | 1 + synapse/appservice/__init__.py | 180 ++++++++++++++------- synapse/appservice/api.py | 27 +++- synapse/appservice/scheduler.py | 48 ++++-- synapse/config/appservice.py | 3 + synapse/handlers/appservice.py | 109 ++++++++++++- synapse/handlers/receipts.py | 35 +++- synapse/handlers/sync.py | 1 - synapse/handlers/typing.py | 31 +++- synapse/notifier.py | 25 +++ synapse/storage/databases/main/appservice.py | 66 ++++++-- synapse/storage/databases/main/receipts.py | 55 +++++++ .../main/schema/delta/59/19as_device_stream.sql | 18 +++ tests/appservice/test_scheduler.py | 77 ++++++--- tests/storage/test_appservice.py | 8 +- 16 files changed, 563 insertions(+), 122 deletions(-) create mode 100644 changelog.d/8437.feature create mode 100644 synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql (limited to 'tests') diff --git a/changelog.d/8437.feature b/changelog.d/8437.feature new file mode 100644 index 0000000000..4abcccb326 --- /dev/null +++ b/changelog.d/8437.feature @@ -0,0 +1 @@ +Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/mypy.ini b/mypy.ini index 9748f6258c..b5db54ee3b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -15,6 +15,7 @@ files = synapse/events/builder.py, synapse/events/spamcheck.py, synapse/federation, + synapse/handlers/appservice.py, synapse/handlers/account_data.py, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 13ec1f71a6..3862d9c08f 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -14,14 +14,15 @@ # limitations under the License. import logging import re -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Iterable, List, Match, Optional from synapse.api.constants import EventTypes -from synapse.appservice.api import ApplicationServiceApi -from synapse.types import GroupID, get_domain_from_id +from synapse.events import EventBase +from synapse.types import GroupID, JsonDict, UserID, get_domain_from_id from synapse.util.caches.descriptors import cached if TYPE_CHECKING: + from synapse.appservice.api import ApplicationServiceApi from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -32,38 +33,6 @@ class ApplicationServiceState: UP = "up" -class AppServiceTransaction: - """Represents an application service transaction.""" - - def __init__(self, service, id, events): - self.service = service - self.id = id - self.events = events - - async def send(self, as_api: ApplicationServiceApi) -> bool: - """Sends this transaction using the provided AS API interface. - - Args: - as_api: The API to use to send. - Returns: - True if the transaction was sent. - """ - return await as_api.push_bulk( - service=self.service, events=self.events, txn_id=self.id - ) - - async def complete(self, store: "DataStore") -> None: - """Completes this transaction as successful. - - Marks this transaction ID on the application service and removes the - transaction contents from the database. - - Args: - store: The database store to operate on. - """ - await store.complete_appservice_txn(service=self.service, txn_id=self.id) - - class ApplicationService: """Defines an application service. This definition is mostly what is provided to the /register AS API. @@ -91,6 +60,7 @@ class ApplicationService: protocols=None, rate_limited=True, ip_range_whitelist=None, + supports_ephemeral=False, ): self.token = token self.url = ( @@ -102,6 +72,7 @@ class ApplicationService: self.namespaces = self._check_namespaces(namespaces) self.id = id self.ip_range_whitelist = ip_range_whitelist + self.supports_ephemeral = supports_ephemeral if "|" in self.id: raise Exception("application service ID cannot contain '|' character") @@ -161,19 +132,21 @@ class ApplicationService: raise ValueError("Expected string for 'regex' in ns '%s'" % ns) return namespaces - def _matches_regex(self, test_string, namespace_key): + def _matches_regex(self, test_string: str, namespace_key: str) -> Optional[Match]: for regex_obj in self.namespaces[namespace_key]: if regex_obj["regex"].match(test_string): return regex_obj return None - def _is_exclusive(self, ns_key, test_string): + def _is_exclusive(self, ns_key: str, test_string: str) -> bool: regex_obj = self._matches_regex(test_string, ns_key) if regex_obj: return regex_obj["exclusive"] return False - async def _matches_user(self, event, store): + async def _matches_user( + self, event: Optional[EventBase], store: Optional["DataStore"] = None + ) -> bool: if not event: return False @@ -188,14 +161,23 @@ class ApplicationService: if not store: return False - does_match = await self._matches_user_in_member_list(event.room_id, store) + does_match = await self.matches_user_in_member_list(event.room_id, store) return does_match - @cached(num_args=1, cache_context=True) - async def _matches_user_in_member_list(self, room_id, store, cache_context): - member_list = await store.get_users_in_room( - room_id, on_invalidate=cache_context.invalidate - ) + @cached(num_args=1) + async def matches_user_in_member_list( + self, room_id: str, store: "DataStore" + ) -> bool: + """Check if this service is interested a room based upon it's membership + + Args: + room_id: The room to check. + store: The datastore to query. + + Returns: + True if this service would like to know about this room. + """ + member_list = await store.get_users_in_room(room_id) # check joined member events for user_id in member_list: @@ -203,12 +185,14 @@ class ApplicationService: return True return False - def _matches_room_id(self, event): + def _matches_room_id(self, event: EventBase) -> bool: if hasattr(event, "room_id"): return self.is_interested_in_room(event.room_id) return False - async def _matches_aliases(self, event, store): + async def _matches_aliases( + self, event: EventBase, store: Optional["DataStore"] = None + ) -> bool: if not store or not event: return False @@ -218,12 +202,15 @@ class ApplicationService: return True return False - async def is_interested(self, event, store=None) -> bool: + async def is_interested( + self, event: EventBase, store: Optional["DataStore"] = None + ) -> bool: """Check if this service is interested in this event. Args: - event(Event): The event to check. - store(DataStore) + event: The event to check. + store: The datastore to query. + Returns: True if this service would like to know about this event. """ @@ -231,39 +218,66 @@ class ApplicationService: if self._matches_room_id(event): return True + # This will check the namespaces first before + # checking the store, so should be run before _matches_aliases + if await self._matches_user(event, store): + return True + + # This will check the store, so should be run last if await self._matches_aliases(event, store): return True - if await self._matches_user(event, store): + return False + + @cached(num_args=1) + async def is_interested_in_presence( + self, user_id: UserID, store: "DataStore" + ) -> bool: + """Check if this service is interested a user's presence + + Args: + user_id: The user to check. + store: The datastore to query. + + Returns: + True if this service would like to know about presence for this user. + """ + # Find all the rooms the sender is in + if self.is_interested_in_user(user_id.to_string()): return True + room_ids = await store.get_rooms_for_user(user_id.to_string()) + # Then find out if the appservice is interested in any of those rooms + for room_id in room_ids: + if await self.matches_user_in_member_list(room_id, store): + return True return False - def is_interested_in_user(self, user_id): + def is_interested_in_user(self, user_id: str) -> bool: return ( - self._matches_regex(user_id, ApplicationService.NS_USERS) + bool(self._matches_regex(user_id, ApplicationService.NS_USERS)) or user_id == self.sender ) - def is_interested_in_alias(self, alias): + def is_interested_in_alias(self, alias: str) -> bool: return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES)) - def is_interested_in_room(self, room_id): + def is_interested_in_room(self, room_id: str) -> bool: return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS)) - def is_exclusive_user(self, user_id): + def is_exclusive_user(self, user_id: str) -> bool: return ( self._is_exclusive(ApplicationService.NS_USERS, user_id) or user_id == self.sender ) - def is_interested_in_protocol(self, protocol): + def is_interested_in_protocol(self, protocol: str) -> bool: return protocol in self.protocols - def is_exclusive_alias(self, alias): + def is_exclusive_alias(self, alias: str) -> bool: return self._is_exclusive(ApplicationService.NS_ALIASES, alias) - def is_exclusive_room(self, room_id): + def is_exclusive_room(self, room_id: str) -> bool: return self._is_exclusive(ApplicationService.NS_ROOMS, room_id) def get_exclusive_user_regexes(self): @@ -276,14 +290,14 @@ class ApplicationService: if regex_obj["exclusive"] ] - def get_groups_for_user(self, user_id): + def get_groups_for_user(self, user_id: str) -> Iterable[str]: """Get the groups that this user is associated with by this AS Args: - user_id (str): The ID of the user. + user_id: The ID of the user. Returns: - iterable[str]: an iterable that yields group_id strings. + An iterable that yields group_id strings. """ return ( regex_obj["group_id"] @@ -291,7 +305,7 @@ class ApplicationService: if "group_id" in regex_obj and regex_obj["regex"].match(user_id) ) - def is_rate_limited(self): + def is_rate_limited(self) -> bool: return self.rate_limited def __str__(self): @@ -300,3 +314,45 @@ class ApplicationService: dict_copy["token"] = "" dict_copy["hs_token"] = "" return "ApplicationService: %s" % (dict_copy,) + + +class AppServiceTransaction: + """Represents an application service transaction.""" + + def __init__( + self, + service: ApplicationService, + id: int, + events: List[EventBase], + ephemeral: List[JsonDict], + ): + self.service = service + self.id = id + self.events = events + self.ephemeral = ephemeral + + async def send(self, as_api: "ApplicationServiceApi") -> bool: + """Sends this transaction using the provided AS API interface. + + Args: + as_api: The API to use to send. + Returns: + True if the transaction was sent. + """ + return await as_api.push_bulk( + service=self.service, + events=self.events, + ephemeral=self.ephemeral, + txn_id=self.id, + ) + + async def complete(self, store: "DataStore") -> None: + """Completes this transaction as successful. + + Marks this transaction ID on the application service and removes the + transaction contents from the database. + + Args: + store: The database store to operate on. + """ + await store.complete_appservice_txn(service=self.service, txn_id=self.id) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index e8f0793795..e366a982b8 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -14,12 +14,13 @@ # limitations under the License. import logging import urllib -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple from prometheus_client import Counter from synapse.api.constants import EventTypes, ThirdPartyEntityKind from synapse.api.errors import CodeMessageException +from synapse.events import EventBase from synapse.events.utils import serialize_event from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, ThirdPartyInstanceID @@ -201,7 +202,13 @@ class ApplicationServiceApi(SimpleHttpClient): key = (service.id, protocol) return await self.protocol_meta_cache.wrap(key, _get) - async def push_bulk(self, service, events, txn_id=None): + async def push_bulk( + self, + service: "ApplicationService", + events: List[EventBase], + ephemeral: List[JsonDict], + txn_id: Optional[int] = None, + ): if service.url is None: return True @@ -211,15 +218,19 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning( "push_bulk: Missing txn ID sending events to %s", service.url ) - txn_id = str(0) - txn_id = str(txn_id) + txn_id = 0 + + uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) + + # Never send ephemeral events to appservices that do not support it + if service.supports_ephemeral: + body = {"events": events, "de.sorunome.msc2409.ephemeral": ephemeral} + else: + body = {"events": events} - uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id)) try: await self.put_json( - uri=uri, - json_body={"events": events}, - args={"access_token": service.hs_token}, + uri=uri, json_body=body, args={"access_token": service.hs_token}, ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 8eb8c6f51c..ad3c408519 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -49,10 +49,13 @@ This is all tied together by the AppServiceScheduler which DIs the required components. """ import logging +from typing import List -from synapse.appservice import ApplicationServiceState +from synapse.appservice import ApplicationService, ApplicationServiceState +from synapse.events import EventBase from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -82,8 +85,13 @@ class ApplicationServiceScheduler: for service in services: self.txn_ctrl.start_recoverer(service) - def submit_event_for_as(self, service, event): - self.queuer.enqueue(service, event) + def submit_event_for_as(self, service: ApplicationService, event: EventBase): + self.queuer.enqueue_event(service, event) + + def submit_ephemeral_events_for_as( + self, service: ApplicationService, events: List[JsonDict] + ): + self.queuer.enqueue_ephemeral(service, events) class _ServiceQueuer: @@ -96,17 +104,15 @@ class _ServiceQueuer: def __init__(self, txn_ctrl, clock): self.queued_events = {} # dict of {service_id: [events]} + self.queued_ephemeral = {} # dict of {service_id: [events]} # the appservices which currently have a transaction in flight self.requests_in_flight = set() self.txn_ctrl = txn_ctrl self.clock = clock - def enqueue(self, service, event): - self.queued_events.setdefault(service.id, []).append(event) - + def _start_background_request(self, service): # start a sender for this appservice if we don't already have one - if service.id in self.requests_in_flight: return @@ -114,7 +120,15 @@ class _ServiceQueuer: "as-sender-%s" % (service.id,), self._send_request, service ) - async def _send_request(self, service): + def enqueue_event(self, service: ApplicationService, event: EventBase): + self.queued_events.setdefault(service.id, []).append(event) + self._start_background_request(service) + + def enqueue_ephemeral(self, service: ApplicationService, events: List[JsonDict]): + self.queued_ephemeral.setdefault(service.id, []).extend(events) + self._start_background_request(service) + + async def _send_request(self, service: ApplicationService): # sanity-check: we shouldn't get here if this service already has a sender # running. assert service.id not in self.requests_in_flight @@ -123,10 +137,11 @@ class _ServiceQueuer: try: while True: events = self.queued_events.pop(service.id, []) - if not events: + ephemeral = self.queued_ephemeral.pop(service.id, []) + if not events and not ephemeral: return try: - await self.txn_ctrl.send(service, events) + await self.txn_ctrl.send(service, events, ephemeral) except Exception: logger.exception("AS request failed") finally: @@ -158,9 +173,16 @@ class _TransactionController: # for UTs self.RECOVERER_CLASS = _Recoverer - async def send(self, service, events): + async def send( + self, + service: ApplicationService, + events: List[EventBase], + ephemeral: List[JsonDict] = [], + ): try: - txn = await self.store.create_appservice_txn(service=service, events=events) + txn = await self.store.create_appservice_txn( + service=service, events=events, ephemeral=ephemeral + ) service_is_up = await self._is_service_up(service) if service_is_up: sent = await txn.send(self.as_api) @@ -204,7 +226,7 @@ class _TransactionController: recoverer.recover() logger.info("Now %i active recoverers", len(self.recoverers)) - async def _is_service_up(self, service): + async def _is_service_up(self, service: ApplicationService) -> bool: state = await self.store.get_appservice_state(service) return state == ApplicationServiceState.UP or state is None diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 8ed3e24258..746fc3cc02 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -160,6 +160,8 @@ def _load_appservice(hostname, as_info, config_filename): if as_info.get("ip_range_whitelist"): ip_range_whitelist = IPSet(as_info.get("ip_range_whitelist")) + supports_ephemeral = as_info.get("de.sorunome.msc2409.push_ephemeral", False) + return ApplicationService( token=as_info["as_token"], hostname=hostname, @@ -168,6 +170,7 @@ def _load_appservice(hostname, as_info, config_filename): hs_token=as_info["hs_token"], sender=user_id, id=as_info["id"], + supports_ephemeral=supports_ephemeral, protocols=protocols, rate_limited=rate_limited, ip_range_whitelist=ip_range_whitelist, diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index c8d5e58035..07240d3a14 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Dict, List, Optional from prometheus_client import Counter @@ -21,13 +22,16 @@ from twisted.internet import defer import synapse from synapse.api.constants import EventTypes +from synapse.appservice import ApplicationService +from synapse.events import EventBase +from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( event_processing_loop_counter, event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import RoomStreamToken +from synapse.types import Collection, JsonDict, RoomStreamToken, UserID from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -44,6 +48,7 @@ class ApplicationServicesHandler: self.started_scheduler = False self.clock = hs.get_clock() self.notify_appservices = hs.config.notify_appservices + self.event_sources = hs.get_event_sources() self.current_max = 0 self.is_processing = False @@ -82,7 +87,7 @@ class ApplicationServicesHandler: if not events: break - events_by_room = {} + events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) @@ -161,6 +166,104 @@ class ApplicationServicesHandler: finally: self.is_processing = False + async def notify_interested_services_ephemeral( + self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [], + ): + """This is called by the notifier in the background + when a ephemeral event handled by the homeserver. + + This will determine which appservices + are interested in the event, and submit them. + + Events will only be pushed to appservices + that have opted into ephemeral events + + Args: + stream_key: The stream the event came from. + new_token: The latest stream token + users: The user(s) involved with the event. + """ + services = [ + service + for service in self.store.get_app_services() + if service.supports_ephemeral + ] + if not services or not self.notify_appservices: + return + logger.info("Checking interested services for %s" % (stream_key)) + with Measure(self.clock, "notify_interested_services_ephemeral"): + for service in services: + # Only handle typing if we have the latest token + if stream_key == "typing_key" and new_token is not None: + events = await self._handle_typing(service, new_token) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + # We don't persist the token for typing_key for performance reasons + elif stream_key == "receipt_key": + events = await self._handle_receipts(service) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + await self.store.set_type_stream_id_for_appservice( + service, "read_receipt", new_token + ) + elif stream_key == "presence_key": + events = await self._handle_presence(service, users) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + await self.store.set_type_stream_id_for_appservice( + service, "presence", new_token + ) + + async def _handle_typing(self, service: ApplicationService, new_token: int): + typing_source = self.event_sources.sources["typing"] + # Get the typing events from just before current + typing, _ = await typing_source.get_new_events_as( + service=service, + # For performance reasons, we don't persist the previous + # token in the DB and instead fetch the latest typing information + # for appservices. + from_key=new_token - 1, + ) + return typing + + async def _handle_receipts(self, service: ApplicationService): + from_key = await self.store.get_type_stream_id_for_appservice( + service, "read_receipt" + ) + receipts_source = self.event_sources.sources["receipt"] + receipts, _ = await receipts_source.get_new_events_as( + service=service, from_key=from_key + ) + return receipts + + async def _handle_presence( + self, service: ApplicationService, users: Collection[UserID] + ): + events = [] # type: List[JsonDict] + presence_source = self.event_sources.sources["presence"] + from_key = await self.store.get_type_stream_id_for_appservice( + service, "presence" + ) + for user in users: + interested = await service.is_interested_in_presence(user, self.store) + if not interested: + continue + presence_events, _ = await presence_source.get_new_events( + user=user, service=service, from_key=from_key, + ) + time_now = self.clock.time_msec() + presence_events = [ + { + "type": "m.presence", + "sender": event.user_id, + "content": format_user_presence_state( + event, time_now, include_user_id=False + ), + } + for event in presence_events + ] + events = events + presence_events + async def query_user_exists(self, user_id): """Check if any application service knows this user_id exists. @@ -223,7 +326,7 @@ class ApplicationServicesHandler: async def get_3pe_protocols(self, only_protocol=None): services = self.store.get_app_services() - protocols = {} + protocols = {} # type: Dict[str, List[JsonDict]] # Collect up all the individual protocol responses out of the ASes for s in services: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 7225923757..c242c409cf 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -13,9 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Tuple +from synapse.appservice import ApplicationService from synapse.handlers._base import BaseHandler -from synapse.types import ReadReceipt, get_domain_from_id +from synapse.types import JsonDict, ReadReceipt, get_domain_from_id from synapse.util.async_helpers import maybe_awaitable logger = logging.getLogger(__name__) @@ -140,5 +142,36 @@ class ReceiptEventSource: return (events, to_key) + async def get_new_events_as( + self, from_key: int, service: ApplicationService + ) -> Tuple[List[JsonDict], int]: + """Returns a set of new receipt events that an appservice + may be interested in. + + Args: + from_key: the stream position at which events should be fetched from + service: The appservice which may be interested + """ + from_key = int(from_key) + to_key = self.get_current_key() + + if from_key == to_key: + return [], to_key + + # We first need to fetch all new receipts + rooms_to_events = await self.store.get_linearized_receipts_for_all_rooms( + from_key=from_key, to_key=to_key + ) + + # Then filter down to rooms that the AS can read + events = [] + for room_id, event in rooms_to_events.items(): + if not await service.matches_user_in_member_list(room_id, self.store): + continue + + events.append(event) + + return (events, to_key) + def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a306631094..b527724bc4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import itertools import logging from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 3cbfc2d780..d3692842e3 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -12,16 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import random from collections import namedtuple from typing import TYPE_CHECKING, List, Set, Tuple from synapse.api.errors import AuthError, ShadowBanError, SynapseError +from synapse.appservice import ApplicationService from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import TypingStream -from synapse.types import UserID, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -430,6 +430,33 @@ class TypingNotificationEventSource: "content": {"user_ids": list(typing)}, } + async def get_new_events_as( + self, from_key: int, service: ApplicationService + ) -> Tuple[List[JsonDict], int]: + """Returns a set of new typing events that an appservice + may be interested in. + + Args: + from_key: the stream position at which events should be fetched from + service: The appservice which may be interested + """ + with Measure(self.clock, "typing.get_new_events_as"): + from_key = int(from_key) + handler = self.get_typing_handler() + + events = [] + for room_id in handler._room_serials.keys(): + if handler._room_serials[room_id] <= from_key: + continue + if not await service.matches_user_in_member_list( + room_id, handler.store + ): + continue + + events.append(self._make_event_for(room_id)) + + return (events, handler._latest_room_serial) + async def get_new_events(self, from_key, room_ids, **kwargs): with Measure(self.clock, "typing.get_new_events"): from_key = int(from_key) diff --git a/synapse/notifier.py b/synapse/notifier.py index 51c830c91e..2e993411b9 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -329,6 +329,22 @@ class Notifier: except Exception: logger.exception("Error notifying application services of event") + async def _notify_app_services_ephemeral( + self, + stream_key: str, + new_token: Union[int, RoomStreamToken], + users: Collection[UserID] = [], + ): + try: + stream_token = None + if isinstance(new_token, int): + stream_token = new_token + await self.appservice_handler.notify_interested_services_ephemeral( + stream_key, stream_token, users + ) + except Exception: + logger.exception("Error notifying application services of event") + async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: await self._pusher_pool.on_new_notifications(max_room_stream_token) @@ -367,6 +383,15 @@ class Notifier: self.notify_replication() + # Notify appservices + run_as_background_process( + "_notify_app_services_ephemeral", + self._notify_app_services_ephemeral, + stream_key, + new_token, + users, + ) + def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happend without waking up any of the normal user event streams""" diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 85f6b1e3fd..43bf0f649a 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -15,12 +15,15 @@ # limitations under the License. import logging import re +from typing import List -from synapse.appservice import AppServiceTransaction +from synapse.appservice import ApplicationService, AppServiceTransaction from synapse.config.appservice import load_appservices +from synapse.events import EventBase from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.types import JsonDict from synapse.util import json_encoder logger = logging.getLogger(__name__) @@ -172,15 +175,23 @@ class ApplicationServiceTransactionWorkerStore( "application_services_state", {"as_id": service.id}, {"state": state} ) - async def create_appservice_txn(self, service, events): + async def create_appservice_txn( + self, + service: ApplicationService, + events: List[EventBase], + ephemeral: List[JsonDict], + ) -> AppServiceTransaction: """Atomically creates a new transaction for this application service - with the given list of events. + with the given list of events. Ephemeral events are NOT persisted to the + database and are not resent if a transaction is retried. Args: - service(ApplicationService): The service who the transaction is for. - events(list): A list of events to put in the transaction. + service: The service who the transaction is for. + events: A list of persistent events to put in the transaction. + ephemeral: A list of ephemeral events to put in the transaction. + Returns: - AppServiceTransaction: A new transaction. + A new transaction. """ def _create_appservice_txn(txn): @@ -207,7 +218,9 @@ class ApplicationServiceTransactionWorkerStore( "VALUES(?,?,?)", (service.id, new_txn_id, event_ids), ) - return AppServiceTransaction(service=service, id=new_txn_id, events=events) + return AppServiceTransaction( + service=service, id=new_txn_id, events=events, ephemeral=ephemeral + ) return await self.db_pool.runInteraction( "create_appservice_txn", _create_appservice_txn @@ -296,7 +309,9 @@ class ApplicationServiceTransactionWorkerStore( events = await self.get_events_as_list(event_ids) - return AppServiceTransaction(service=service, id=entry["txn_id"], events=events) + return AppServiceTransaction( + service=service, id=entry["txn_id"], events=events, ephemeral=[] + ) def _get_last_txn(self, txn, service_id): txn.execute( @@ -320,7 +335,7 @@ class ApplicationServiceTransactionWorkerStore( ) async def get_new_events_for_appservice(self, current_id, limit): - """Get all new evnets""" + """Get all new events for an appservice""" def get_new_events_for_appservice_txn(txn): sql = ( @@ -351,6 +366,39 @@ class ApplicationServiceTransactionWorkerStore( return upper_bound, events + async def get_type_stream_id_for_appservice( + self, service: ApplicationService, type: str + ) -> int: + def get_type_stream_id_for_appservice_txn(txn): + stream_id_type = "%s_stream_id" % type + txn.execute( + "SELECT ? FROM application_services_state WHERE as_id=?", + (stream_id_type, service.id,), + ) + last_txn_id = txn.fetchone() + if last_txn_id is None or last_txn_id[0] is None: # no row exists + return 0 + else: + return int(last_txn_id[0]) + + return await self.db_pool.runInteraction( + "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn + ) + + async def set_type_stream_id_for_appservice( + self, service: ApplicationService, type: str, pos: int + ) -> None: + def set_type_stream_id_for_appservice_txn(txn): + stream_id_type = "%s_stream_id" % type + txn.execute( + "UPDATE ? SET device_list_stream_id = ? WHERE as_id=?", + (stream_id_type, pos, service.id), + ) + + await self.db_pool.runInteraction( + "set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn + ) + class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): # This is currently empty due to there not being any AS storage functions diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index c79ddff680..5cdf16521c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.util.id_generators import StreamIdGenerator +from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedList @@ -274,6 +275,60 @@ class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): } return results + @cached(num_args=2,) + async def get_linearized_receipts_for_all_rooms( + self, to_key: int, from_key: Optional[int] = None + ) -> Dict[str, JsonDict]: + """Get receipts for all rooms between two stream_ids. + + Args: + to_key: Max stream id to fetch receipts upto. + from_key: Min stream id to fetch receipts from. None fetches + from the start. + + Returns: + A dictionary of roomids to a list of receipts. + """ + + def f(txn): + if from_key: + sql = """ + SELECT * FROM receipts_linearized WHERE + stream_id > ? AND stream_id <= ? + """ + txn.execute(sql, [from_key, to_key]) + else: + sql = """ + SELECT * FROM receipts_linearized WHERE + stream_id <= ? + """ + + txn.execute(sql, [to_key]) + + return self.db_pool.cursor_to_dict(txn) + + txn_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_all_rooms", f + ) + + results = {} + for row in txn_results: + # We want a single event per room, since we want to batch the + # receipts by room, event and type. + room_event = results.setdefault( + row["room_id"], + {"type": "m.receipt", "room_id": row["room_id"], "content": {}}, + ) + + # The content is of the form: + # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } + event_entry = room_event["content"].setdefault(row["event_id"], {}) + receipt_type = event_entry.setdefault(row["receipt_type"], {}) + + receipt_type[row["user_id"]] = db_to_json(row["data"]) + + return results + async def get_users_sent_receipts_between( self, last_id: int, current_id: int ) -> List[str]: diff --git a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql b/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql new file mode 100644 index 0000000000..20f5a95a24 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE application_services_state + ADD COLUMN read_receipt_stream_id INT, + ADD COLUMN presence_stream_id INT; \ No newline at end of file diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 68a4caabbf..2acb8b7603 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -60,7 +60,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events # txn made and saved + service=service, events=events, ephemeral=[] # txn made and saved ) self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed @@ -81,7 +81,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events # txn made and saved + service=service, events=events, ephemeral=[] # txn made and saved ) self.assertEquals(0, txn.send.call_count) # txn not sent though self.assertEquals(0, txn.complete.call_count) # or completed @@ -106,7 +106,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events + service=service, events=events, ephemeral=[] ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked @@ -202,26 +202,28 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): # Expect the event to be sent immediately. service = Mock(id=4) event = Mock() - self.queuer.enqueue(service, event) - self.txn_ctrl.send.assert_called_once_with(service, [event]) + self.queuer.enqueue_event(service, event) + self.txn_ctrl.send.assert_called_once_with(service, [event], []) def test_send_single_event_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock(side_effect=lambda x, y: make_deferred_yieldable(d)) + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) service = Mock(id=4) event = Mock(event_id="first") event2 = Mock(event_id="second") event3 = Mock(event_id="third") # Send an event and don't resolve it just yet. - self.queuer.enqueue(service, event) + self.queuer.enqueue_event(service, event) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue(service, event2) - self.queuer.enqueue(service, event3) - self.txn_ctrl.send.assert_called_with(service, [event]) + self.queuer.enqueue_event(service, event2) + self.queuer.enqueue_event(service, event3) + self.txn_ctrl.send.assert_called_with(service, [event], []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [event2, event3]) + self.txn_ctrl.send.assert_called_with(service, [event2, event3], []) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): @@ -239,21 +241,58 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y): + def do_send(x, y, z): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) # send events for different ASes and make sure they are sent - self.queuer.enqueue(srv1, srv_1_event) - self.queuer.enqueue(srv1, srv_1_event2) - self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event]) - self.queuer.enqueue(srv2, srv_2_event) - self.queuer.enqueue(srv2, srv_2_event2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event]) + self.queuer.enqueue_event(srv1, srv_1_event) + self.queuer.enqueue_event(srv1, srv_1_event2) + self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], []) + self.queuer.enqueue_event(srv2, srv_2_event) + self.queuer.enqueue_event(srv2, srv_2_event2) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], []) # make sure callbacks for a service only send queued events for THAT # service srv_2_defer.callback(srv2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2]) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) self.assertEquals(3, self.txn_ctrl.send.call_count) + + def test_send_single_ephemeral_no_queue(self): + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + event_list = [Mock(name="event")] + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + + def test_send_multiple_ephemeral_no_queue(self): + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + + def test_send_single_ephemeral_with_queue(self): + d = defer.Deferred() + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) + service = Mock(id=4) + event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")] + event_list_2 = [Mock(event_id="event3"), Mock(event_id="event4")] + event_list_3 = [Mock(event_id="event5"), Mock(event_id="event6")] + + # Send an event and don't resolve it just yet. + self.queuer.enqueue_ephemeral(service, event_list_1) + # Send more events: expect send() to NOT be called multiple times. + self.queuer.enqueue_ephemeral(service, event_list_2) + self.queuer.enqueue_ephemeral(service, event_list_3) + self.txn_ctrl.send.assert_called_with(service, [], event_list_1) + self.assertEquals(1, self.txn_ctrl.send.call_count) + # Resolve txn_ctrl.send + d.callback(service) + # Expect the queued events to be sent + self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) + self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c905a38930..c5c7987349 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -244,7 +244,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): service = Mock(id=self.as_list[0]["id"]) events = [Mock(event_id="e1"), Mock(event_id="e2")] txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 1) self.assertEquals(txn.events, events) @@ -258,7 +258,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self._insert_txn(service.id, 9644, events) yield self._insert_txn(service.id, 9645, events) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9646) self.assertEquals(txn.events, events) @@ -270,7 +270,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): events = [Mock(event_id="e1"), Mock(event_id="e2")] yield self._set_last_txn(service.id, 9643) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) @@ -293,7 +293,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self._insert_txn(self.as_list[3]["id"], 9643, events) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) -- cgit 1.5.1 From 6b5a115c0a0f9036444cd8686b32afbdf5334915 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 15 Oct 2020 21:29:13 +0200 Subject: Solidify the HomeServer constructor. (#8515) This implements a more standard API for instantiating a homeserver and moves some of the dependency injection into the test suite. More concretely this stops using `setattr` on all `kwargs` passed to `HomeServer`. --- changelog.d/8515.misc | 1 + synapse/server.py | 14 +++++++++----- tests/app/test_frontend_proxy.py | 2 +- tests/app/test_openid_listener.py | 4 ++-- tests/replication/_base.py | 4 ++-- tests/replication/test_federation_ack.py | 2 +- tests/utils.py | 31 +++++++++++++++++-------------- 7 files changed, 33 insertions(+), 25 deletions(-) create mode 100644 changelog.d/8515.misc (limited to 'tests') diff --git a/changelog.d/8515.misc b/changelog.d/8515.misc new file mode 100644 index 0000000000..1f8aa292d8 --- /dev/null +++ b/changelog.d/8515.misc @@ -0,0 +1 @@ +Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. diff --git a/synapse/server.py b/synapse/server.py index f921ee4b53..21a232bbd9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -205,7 +205,13 @@ class HomeServer(metaclass=abc.ABCMeta): # instantiated during setup() for future return by get_datastore() DATASTORE_CLASS = abc.abstractproperty() - def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwargs): + def __init__( + self, + hostname: str, + config: HomeServerConfig, + reactor=None, + version_string="Synapse", + ): """ Args: hostname : The hostname for the server. @@ -236,11 +242,9 @@ class HomeServer(metaclass=abc.ABCMeta): burst_count=config.rc_registration.burst_count, ) - self.datastores = None # type: Optional[Databases] + self.version_string = version_string - # Other kwargs are explicit dependencies - for depname in kwargs: - setattr(self, depname, kwargs[depname]) + self.datastores = None # type: Optional[Databases] def get_instance_id(self) -> str: """A unique ID for this synapse process instance. diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 641093d349..4a301b84e1 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -22,7 +22,7 @@ class FrontendProxyTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=GenericWorkerServer + http_client=None, homeserver_to_use=GenericWorkerServer ) return hs diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 0f016c32eb..c2b10d2c70 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -26,7 +26,7 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=GenericWorkerServer + http_client=None, homeserver_to_use=GenericWorkerServer ) return hs @@ -84,7 +84,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=SynapseHomeServer + http_client=None, homeserver_to_use=SynapseHomeServer ) return hs diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 81ea985b9f..093e2faac7 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -59,7 +59,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( http_client=None, - homeserverToUse=GenericWorkerServer, + homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, ) @@ -266,7 +266,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): config.update(extra_config) worker_hs = self.setup_test_homeserver( - homeserverToUse=GenericWorkerServer, + homeserver_to_use=GenericWorkerServer, config=config, reactor=self.reactor, **kwargs diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 23be1167a3..1853667558 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -31,7 +31,7 @@ class FederationAckTestCase(HomeserverTestCase): return config def make_homeserver(self, reactor, clock): - hs = self.setup_test_homeserver(homeserverToUse=GenericWorkerServer) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs diff --git a/tests/utils.py b/tests/utils.py index 0c09f5457f..acec74e9e9 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -21,6 +21,7 @@ import time import uuid import warnings from inspect import getcallargs +from typing import Type from urllib import parse as urlparse from mock import Mock, patch @@ -194,8 +195,8 @@ def setup_test_homeserver( name="test", config=None, reactor=None, - homeserverToUse=TestHomeServer, - **kargs + homeserver_to_use: Type[HomeServer] = TestHomeServer, + **kwargs ): """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -218,8 +219,8 @@ def setup_test_homeserver( config.ldap_enabled = False - if "clock" not in kargs: - kargs["clock"] = MockClock() + if "clock" not in kwargs: + kwargs["clock"] = MockClock() if USE_POSTGRES_FOR_TESTS: test_db = "synapse_test_%s" % uuid.uuid4().hex @@ -264,18 +265,20 @@ def setup_test_homeserver( cur.close() db_conn.close() - hs = homeserverToUse( - name, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs + hs = homeserver_to_use( + name, config=config, version_string="Synapse/tests", reactor=reactor, ) + # Install @cache_in_self attributes + for key, val in kwargs.items(): + setattr(hs, key, val) + + # Mock TLS + hs.tls_server_context_factory = Mock() + hs.tls_client_options_factory = Mock() + hs.setup() - if homeserverToUse.__name__ == "TestHomeServer": + if homeserver_to_use == TestHomeServer: hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): @@ -339,7 +342,7 @@ def setup_test_homeserver( hs.get_auth_handler().validate_hash = validate_hash - fed = kargs.get("resource_for_federation", None) + fed = kwargs.get("resource_for_federation", None) if fed: register_federation_servlets(hs, fed) -- cgit 1.5.1 From 3ee17585cd095e590096683395cfb9a017eac15e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 16 Oct 2020 15:51:57 +0100 Subject: Make LruCache register its own metrics (#8561) rather than have everything that instantiates an LruCache manage metrics separately, have LruCache do it itself. --- changelog.d/8561.misc | 1 + synapse/api/auth.py | 4 +-- synapse/push/push_rule_evaluator.py | 4 +-- synapse/util/caches/__init__.py | 13 ++++++---- synapse/util/caches/deferred_cache.py | 43 ++++++++++-------------------- synapse/util/caches/dictionary_cache.py | 9 +------ synapse/util/caches/lrucache.py | 46 +++++++++++++++++++++++++-------- tests/util/test_lrucache.py | 4 +-- 8 files changed, 62 insertions(+), 62 deletions(-) create mode 100644 changelog.d/8561.misc (limited to 'tests') diff --git a/changelog.d/8561.misc b/changelog.d/8561.misc new file mode 100644 index 0000000000..a40dedfa8e --- /dev/null +++ b/changelog.d/8561.misc @@ -0,0 +1 @@ +Move metric registration code down into `LruCache`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 1071a0576e..eb6f418b13 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -34,7 +34,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.logging import opentracing as opentracing from synapse.types import StateMap, UserID -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache from synapse.util.metrics import Measure @@ -70,8 +69,7 @@ class Auth: self.store = hs.get_datastore() self.state = hs.get_state_handler() - self.token_cache = LruCache(10000) - register_cache("cache", "token_cache", self.token_cache) + self.token_cache = LruCache(10000, "token_cache") self._auth_blocking = AuthBlocking(self.hs) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 3a68ce636f..4c95b149c5 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -20,7 +20,6 @@ from typing import Any, Dict, List, Optional, Pattern, Union from synapse.events import EventBase from synapse.types import UserID -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -186,8 +185,7 @@ class PushRuleEvaluatorForEvent: # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches -regex_cache = LruCache(50000) -register_cache("cache", "regex_push_cache", regex_cache) +regex_cache = LruCache(50000, "regex_push_cache") def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 8fc05be278..89f0b38535 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -16,7 +16,7 @@ import logging from sys import intern -from typing import Callable, Dict, Optional +from typing import Callable, Dict, Optional, Sized import attr from prometheus_client.core import Gauge @@ -92,7 +92,7 @@ class CacheMetric: def register_cache( cache_type: str, cache_name: str, - cache, + cache: Sized, collect_callback: Optional[Callable] = None, resizable: bool = True, resize_callback: Optional[Callable] = None, @@ -100,12 +100,15 @@ def register_cache( """Register a cache object for metric collection and resizing. Args: - cache_type + cache_type: a string indicating the "type" of the cache. This is used + only for deduplication so isn't too important provided it's constant. cache_name: name of the cache - cache: cache itself + cache: cache itself, which must implement __len__(), and may optionally implement + a max_size property collect_callback: If given, a function which is called during metric collection to update additional metrics. - resizable: Whether this cache supports being resized. + resizable: Whether this cache supports being resized, in which case either + resize_callback must be provided, or the cache must support set_max_size(). resize_callback: A function which can be called to resize the cache. Returns: diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index f728cd2cf2..91fdc8142d 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -24,7 +24,6 @@ from prometheus_client import Gauge from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry @@ -54,10 +53,7 @@ class DeferredCache(Generic[KT, VT]): __slots__ = ( "cache", - "name", - "keylen", "thread", - "metrics", "_pending_deferred_cache", ) @@ -89,37 +85,27 @@ class DeferredCache(Generic[KT, VT]): cache_type() ) # type: MutableMapping[KT, CacheEntry] + def metrics_cb(): + cache_pending_metric.labels(name).set(len(self._pending_deferred_cache)) + # cache is used for completed results and maps to the result itself, rather than # a Deferred. self.cache = LruCache( max_size=max_entries, keylen=keylen, + cache_name=name, cache_type=cache_type, size_callback=(lambda d: len(d)) if iterable else None, - evicted_callback=self._on_evicted, + metrics_collection_callback=metrics_cb, apply_cache_factor_from_config=apply_cache_factor_from_config, ) - self.name = name - self.keylen = keylen self.thread = None # type: Optional[threading.Thread] - self.metrics = register_cache( - "cache", - name, - self.cache, - collect_callback=self._metrics_collection_callback, - ) @property def max_entries(self): return self.cache.max_size - def _on_evicted(self, evicted_count): - self.metrics.inc_evictions(evicted_count) - - def _metrics_collection_callback(self): - cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) - def check_thread(self): expected_thread = self.thread if expected_thread is None: @@ -154,21 +140,18 @@ class DeferredCache(Generic[KT, VT]): if val is not _Sentinel.sentinel: val.callbacks.update(callbacks) if update_metrics: - self.metrics.inc_hits() + m = self.cache.metrics + assert m # we always have a name, so should always have metrics + m.inc_hits() return val.deferred - val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) - if val is not _Sentinel.sentinel: - self.metrics.inc_hits() - return val - - if update_metrics: - self.metrics.inc_misses() - - if default is _Sentinel.sentinel: + val = self.cache.get( + key, default, callbacks=callbacks, update_metrics=update_metrics + ) + if val is _Sentinel.sentinel: raise KeyError() else: - return default + return val def set( self, diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 8592b93689..8b426c005b 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -19,8 +19,6 @@ from collections import namedtuple from synapse.util.caches.lrucache import LruCache -from . import register_cache - logger = logging.getLogger(__name__) @@ -46,18 +44,16 @@ class DictionaryCache: """ def __init__(self, name, max_entries=1000): - self.cache = LruCache(max_size=max_entries, size_callback=len) + self.cache = LruCache(max_size=max_entries, cache_name=name, size_callback=len) self.name = name self.sequence = 0 self.thread = None - # caches_by_name[name] = self.cache class Sentinel: __slots__ = [] self.sentinel = Sentinel() - self.metrics = register_cache("dictionary", name, self.cache) def check_thread(self): expected_thread = self.thread @@ -82,8 +78,6 @@ class DictionaryCache: """ entry = self.cache.get(key, self.sentinel) if entry is not self.sentinel: - self.metrics.inc_hits() - if dict_keys is None: return DictionaryEntry( entry.full, entry.known_absent, dict(entry.value) @@ -95,7 +89,6 @@ class DictionaryCache: {k: entry.value[k] for k in dict_keys if k in entry.value}, ) - self.metrics.inc_misses() return DictionaryEntry(False, set(), {}) def invalidate(self, key): diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 33eae2b7c4..e4804f79e0 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -18,6 +18,7 @@ from functools import wraps from typing import Callable, Optional, Type, Union from synapse.config import cache as cache_config +from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache @@ -43,27 +44,29 @@ class _Node: class LruCache: """ - Least-recently-used cache. + Least-recently-used cache, supporting prometheus metrics and invalidation callbacks. + Supports del_multi only if cache_type=TreeCache If cache_type=TreeCache, all keys must be tuples. - - Can also set callbacks on objects when getting/setting which are fired - when that key gets invalidated/evicted. """ def __init__( self, max_size: int, + cache_name: Optional[str] = None, keylen: int = 1, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable] = None, - evicted_callback: Optional[Callable] = None, + metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, ): """ Args: max_size: The maximum amount of entries the cache can hold + cache_name: The name of this cache, for the prometheus metrics. If unset, + no metrics will be reported on this cache. + keylen: The length of the tuple used as the cache key. Ignored unless cache_type is `TreeCache`. @@ -73,9 +76,13 @@ class LruCache: size_callback (func(V) -> int | None): - evicted_callback (func(int)|None): - if not None, called on eviction with the size of the evicted - entry + metrics_collection_callback: + metrics collection callback. This is called early in the metrics + collection process, before any of the metrics registered with the + prometheus Registry are collected, so can be used to update any dynamic + metrics. + + Ignored if cache_name is None. apply_cache_factor_from_config (bool): If true, `max_size` will be multiplied by a cache factor derived from the homeserver config @@ -94,6 +101,19 @@ class LruCache: else: self.max_size = int(max_size) + if cache_name is not None: + metrics = register_cache( + "lru_cache", + cache_name, + self, + collect_callback=metrics_collection_callback, + ) # type: Optional[CacheMetric] + else: + metrics = None + + # this is exposed for access from outside this class + self.metrics = metrics + list_root = _Node(None, None, None, None) list_root.next_node = list_root list_root.prev_node = list_root @@ -105,8 +125,8 @@ class LruCache: todelete = list_root.prev_node evicted_len = delete_node(todelete) cache.pop(todelete.key, None) - if evicted_callback: - evicted_callback(evicted_len) + if metrics: + metrics.inc_evictions(evicted_len) def synchronized(f): @wraps(f) @@ -169,13 +189,17 @@ class LruCache: return deleted_len @synchronized - def cache_get(key, default=None, callbacks=[]): + def cache_get(key, default=None, callbacks=[], update_metrics=True): node = cache.get(key, None) if node is not None: move_node_to_front(node) node.callbacks.update(callbacks) + if update_metrics and metrics: + metrics.inc_hits() return node.value else: + if update_metrics and metrics: + metrics.inc_misses() return default @synchronized diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 0adb2174af..f12834edab 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -59,7 +59,7 @@ class LruCacheTestCase(unittest.HomeserverTestCase): self.assertEquals(cache.pop("key"), None) def test_del_multi(self): - cache = LruCache(4, 2, cache_type=TreeCache) + cache = LruCache(4, keylen=2, cache_type=TreeCache) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" cache[("vehicles", "car")] = "vroom" @@ -160,7 +160,7 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): m2 = Mock() m3 = Mock() m4 = Mock() - cache = LruCache(4, 2, cache_type=TreeCache) + cache = LruCache(4, keylen=2, cache_type=TreeCache) cache.set(("a", "1"), "value", callbacks=[m1]) cache.set(("a", "2"), "value", callbacks=[m2]) -- cgit 1.5.1 From c356b4bf422430cd5769c9bf90756fca2efd8451 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 19 Oct 2020 09:12:39 -0400 Subject: Include a simple message in email notifications that include encrypted content (#8545) --- changelog.d/8545.bugfix | 1 + synapse/push/mailer.py | 16 ++++++---- synapse/res/templates/notif.html | 56 +++++++++++++++++++---------------- synapse/res/templates/notif.txt | 24 +++++++++------ synapse/res/templates/notif_mail.html | 26 ++++++++-------- synapse/res/templates/notif_mail.txt | 6 ++-- synapse/res/templates/room.html | 26 ++++++++-------- synapse/res/templates/room.txt | 12 ++++---- tests/push/test_email.py | 15 +++++++++- 9 files changed, 107 insertions(+), 75 deletions(-) create mode 100644 changelog.d/8545.bugfix (limited to 'tests') diff --git a/changelog.d/8545.bugfix b/changelog.d/8545.bugfix new file mode 100644 index 0000000000..64ba307df0 --- /dev/null +++ b/changelog.d/8545.bugfix @@ -0,0 +1 @@ +Fix a long standing bug where email notifications for encrypted messages were blank. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 455a1acb46..155791b754 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -387,8 +387,8 @@ class Mailer: return ret async def get_message_vars(self, notif, event, room_state_ids): - if event.type != EventTypes.Message: - return + if event.type != EventTypes.Message and event.type != EventTypes.Encrypted: + return None sender_state_event_id = room_state_ids[("m.room.member", event.sender)] sender_state_event = await self.store.get_event(sender_state_event_id) @@ -399,10 +399,8 @@ class Mailer: # sender_hash % the number of default images to choose from sender_hash = string_ordinal_total(event.sender) - msgtype = event.content.get("msgtype") - ret = { - "msgtype": msgtype, + "event_type": event.type, "is_historical": event.event_id != notif["event_id"], "id": event.event_id, "ts": event.origin_server_ts, @@ -411,6 +409,14 @@ class Mailer: "sender_hash": sender_hash, } + # Encrypted messages don't have any additional useful information. + if event.type == EventTypes.Encrypted: + return ret + + msgtype = event.content.get("msgtype") + + ret["msgtype"] = msgtype + if msgtype == "m.text": self.add_text_message_vars(ret, event) elif msgtype == "m.image": diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html index 1a6c70b562..6d76064d13 100644 --- a/synapse/res/templates/notif.html +++ b/synapse/res/templates/notif.html @@ -1,41 +1,47 @@ -{% for message in notif.messages %} +{%- for message in notif.messages %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {% if message.sender_avatar_url %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {%- if message.sender_avatar_url %} - {% else %} - {% if message.sender_hash % 3 == 0 %} + {%- else %} + {%- if message.sender_hash % 3 == 0 %} - {% elif message.sender_hash % 3 == 1 %} + {%- elif message.sender_hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} + {%- endif %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
- {% endif %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} +
{%- if message.msgtype == "m.emote" %}*{%- endif %} {{ message.sender_name }}
+ {%- endif %}
- {% if message.msgtype == "m.text" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.emote" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.notice" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.image" %} - - {% elif message.msgtype == "m.file" %} - {{ message.body_text_plain }} - {% endif %} + {%- if message.event_type == "m.room.encrypted" %} + An encrypted message. + {%- elif message.event_type == "m.room.message" %} + {%- if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.image" %} + + {%- elif message.msgtype == "m.file" %} + {{ message.body_text_plain }} + {%- else %} + A message with unrecognised content. + {%- endif %} + {%- endif %}
{{ message.ts|format_ts("%H:%M") }} -{% endfor %} +{%- endfor %} diff --git a/synapse/res/templates/notif.txt b/synapse/res/templates/notif.txt index a37bee9833..1ee7da3c50 100644 --- a/synapse/res/templates/notif.txt +++ b/synapse/res/templates/notif.txt @@ -1,16 +1,22 @@ -{% for message in notif.messages %} -{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) -{% if message.msgtype == "m.text" %} +{%- for message in notif.messages %} +{%- if message.event_type == "m.room.encrypted" %} +An encrypted message. +{%- elif message.event_type == "m.room.message" %} +{%- if message.msgtype == "m.emote" %}* {%- endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{%- if message.msgtype == "m.text" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.emote" %} +{%- elif message.msgtype == "m.emote" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.notice" %} +{%- elif message.msgtype == "m.notice" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.image" %} +{%- elif message.msgtype == "m.image" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.file" %} +{%- elif message.msgtype == "m.file" %} {{ message.body_text_plain }} -{% endif %} -{% endfor %} +{%- else %} +A message with unrecognised content. +{%- endif %} +{%- endif %} +{%- endfor %} View {{ room.title }} at {{ notif.link }} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html index a2dfeb9e9f..27d4182790 100644 --- a/synapse/res/templates/notif_mail.html +++ b/synapse/res/templates/notif_mail.html @@ -2,8 +2,8 @@ @@ -18,21 +18,21 @@
{{ summary_text }}
- {% if app_name == "Riot" %} + {%- if app_name == "Riot" %} [Riot] - {% elif app_name == "Vector" %} + {%- elif app_name == "Vector" %} [Vector] - {% elif app_name == "Element" %} + {%- elif app_name == "Element" %} [Element] - {% else %} + {%- else %} [matrix] - {% endif %} + {%- endif %} - {% for room in rooms %} - {% include 'room.html' with context %} - {% endfor %} + {%- for room in rooms %} + {%- include 'room.html' with context %} + {%- endfor %} diff --git a/synapse/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt index 24843042a5..df3c253979 100644 --- a/synapse/res/templates/notif_mail.txt +++ b/synapse/res/templates/notif_mail.txt @@ -2,9 +2,9 @@ Hi {{ user_display_name }}, {{ summary_text }} -{% for room in rooms %} -{% include 'room.txt' with context %} -{% endfor %} +{%- for room in rooms %} +{%- include 'room.txt' with context %} +{%- endfor %} You can disable these notifications at {{ unsubscribe_link }} diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html index b8525fef88..4fc6f6ac9b 100644 --- a/synapse/res/templates/room.html +++ b/synapse/res/templates/room.html @@ -1,23 +1,23 @@ - {% if room.invite %} + {%- if room.invite %} - {% else %} - {% for notif in room.notifs %} - {% include 'notif.html' with context %} - {% endfor %} - {% endif %} + {%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.html' with context %} + {%- endfor %} + {%- endif %}
- {% if room.avatar_url %} + {%- if room.avatar_url %} - {% else %} - {% if room.hash % 3 == 0 %} + {%- else %} + {%- if room.hash % 3 == 0 %} - {% elif room.hash % 3 == 1 %} + {%- elif room.hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} {{ room.title }}
@@ -25,9 +25,9 @@
diff --git a/synapse/res/templates/room.txt b/synapse/res/templates/room.txt index 84648c710e..df841e9e6f 100644 --- a/synapse/res/templates/room.txt +++ b/synapse/res/templates/room.txt @@ -1,9 +1,9 @@ {{ room.title }} -{% if room.invite %} +{%- if room.invite %} You've been invited, join at {{ room.link }} -{% else %} - {% for notif in room.notifs %} - {% include 'notif.txt' with context %} - {% endfor %} -{% endif %} +{%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.txt' with context %} + {%- endfor %} +{%- endif %} diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 3224568640..55545d9341 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -158,8 +158,21 @@ class EmailPusherTests(HomeserverTestCase): # We should get emailed about those messages self._check_for_mail() + def test_encrypted_message(self): + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) + + # The other user sends some messages + self.helper.send_event(room, "m.room.encrypted", {}, tok=self.others[0].token) + + # We should get emailed about that message + self._check_for_mail() + def _check_for_mail(self): - "Check that the user receives an email notification" + """Check that the user receives an email notification""" # Get the stream ordering before it gets sent pushers = self.get_success( -- cgit 1.5.1 From 903d11c43a5df9f704e5dad4d14506a6470524fc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 19 Oct 2020 15:00:12 +0100 Subject: Add `DeferredCache.get_immediate` method (#8568) * Add `DeferredCache.get_immediate` method A bunch of things that are currently calling `DeferredCache.get` are only really interested in the result if it's completed. We can optimise and simplify this case. * Remove unused 'default' parameter to DeferredCache.get() * another get_immediate instance --- changelog.d/8568.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/databases/main/pusher.py | 2 +- synapse/storage/databases/main/receipts.py | 11 +-------- synapse/storage/databases/main/roommember.py | 2 +- synapse/util/caches/deferred_cache.py | 35 ++++++++++++++++++++-------- tests/util/caches/test_deferred_cache.py | 27 +++++++++++++++++---- 7 files changed, 53 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8568.misc (limited to 'tests') diff --git a/changelog.d/8568.misc b/changelog.d/8568.misc new file mode 100644 index 0000000000..0ed7db92d3 --- /dev/null +++ b/changelog.d/8568.misc @@ -0,0 +1 @@ +Add `get_immediate` method to `DeferredCache`. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c440f2545c..a701defcdd 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -496,6 +496,6 @@ class _Invalidation(namedtuple("_Invalidation", ("cache", "room_id"))): # dedupe when we add callbacks to lru cache nodes, otherwise the number # of callbacks would grow. def __call__(self): - rules = self.cache.get(self.room_id, None, update_metrics=False) + rules = self.cache.get_immediate(self.room_id, None, update_metrics=False) if rules: rules.invalidate_all() diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index df8609b97b..7997242d90 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -303,7 +303,7 @@ class PusherStore(PusherWorkerStore): lock=False, ) - user_has_pusher = self.get_if_user_has_pusher.cache.get( + user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate( (user_id,), None, update_metrics=False ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 5cdf16521c..ca7917c989 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -25,7 +25,6 @@ from synapse.storage.database import DatabasePool from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -413,18 +412,10 @@ class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): if receipt_type != "m.read": return - # Returns either an ObservableDeferred or the raw result - res = self.get_users_with_read_receipts_in_room.cache.get( + res = self.get_users_with_read_receipts_in_room.cache.get_immediate( room_id, None, update_metrics=False ) - # first handle the ObservableDeferred case - if isinstance(res, ObservableDeferred): - if res.has_called(): - res = res.get_result() - else: - res = None - if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 20fcdaa529..9b08b49862 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -531,7 +531,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: - prev_res = self._get_joined_users_from_context.cache.get( + prev_res = self._get_joined_users_from_context.cache.get_immediate( (room_id, context.prev_group), None ) if prev_res and isinstance(prev_res, dict): diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 4026e1f8fa..faeef75506 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -17,7 +17,16 @@ import enum import threading -from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, cast +from typing import ( + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + TypeVar, + Union, + cast, +) from prometheus_client import Gauge @@ -33,7 +42,7 @@ cache_pending_metric = Gauge( ["name"], ) - +T = TypeVar("T") KT = TypeVar("KT") VT = TypeVar("VT") @@ -119,21 +128,21 @@ class DeferredCache(Generic[KT, VT]): def get( self, key: KT, - default=_Sentinel.sentinel, callback: Optional[Callable[[], None]] = None, update_metrics: bool = True, - ): + ) -> Union[ObservableDeferred, VT]: """Looks the key up in the caches. Args: key(tuple) - default: What is returned if key is not in the caches. If not - specified then function throws KeyError instead callback(fn): Gets called when the entry in the cache is invalidated update_metrics (bool): whether to update the cache hit rate metrics Returns: Either an ObservableDeferred or the result itself + + Raises: + KeyError if the key is not found in the cache """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) @@ -145,13 +154,19 @@ class DeferredCache(Generic[KT, VT]): m.inc_hits() return val.deferred - val = self.cache.get( - key, default, callbacks=callbacks, update_metrics=update_metrics + val2 = self.cache.get( + key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics ) - if val is _Sentinel.sentinel: + if val2 is _Sentinel.sentinel: raise KeyError() else: - return val + return val2 + + def get_immediate( + self, key: KT, default: T, update_metrics: bool = True + ) -> Union[VT, T]: + """If we have a *completed* cached value, return it.""" + return self.cache.get(key, default, update_metrics=update_metrics) def set( self, diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 9717be56b6..8a08ab6661 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -38,6 +38,22 @@ class DeferredCacheTestCase(unittest.TestCase): self.assertEquals(cache.get("foo"), 123) + def test_get_immediate(self): + cache = DeferredCache("test") + d1 = defer.Deferred() + cache.set("key1", d1) + + # get_immediate should return default + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 1) + + # now complete the set + d1.callback(2) + + # get_immediate should return result + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 2) + def test_invalidate(self): cache = DeferredCache("test") cache.prefill(("foo",), 123) @@ -80,9 +96,11 @@ class DeferredCacheTestCase(unittest.TestCase): # now do the invalidation cache.invalidate_all() - # lookup should return none - self.assertIsNone(cache.get("key1", None)) - self.assertIsNone(cache.get("key2", None)) + # lookup should fail + with self.assertRaises(KeyError): + cache.get("key1") + with self.assertRaises(KeyError): + cache.get("key2") # both callbacks should have been callbacked self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") @@ -90,7 +108,8 @@ class DeferredCacheTestCase(unittest.TestCase): # letting the other lookup complete should do nothing d1.callback("result1") - self.assertIsNone(cache.get("key1", None)) + with self.assertRaises(KeyError): + cache.get("key1", None) def test_eviction(self): cache = DeferredCache( -- cgit 1.5.1 From 96e7d3c4a0feec6d19b873fd550bcfffd485d910 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 19 Oct 2020 21:13:50 +0100 Subject: Fix 'LruCache' object has no attribute '_on_resize' (#8591) We need to make sure we are readu for the `set_cache_factor` callback. --- changelog.d/8591.misc | 1 + synapse/util/caches/lrucache.py | 10 +++++++++- tests/util/test_lrucache.py | 8 +++++++- 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8591.misc (limited to 'tests') diff --git a/changelog.d/8591.misc b/changelog.d/8591.misc new file mode 100644 index 0000000000..8f16bc3e7e --- /dev/null +++ b/changelog.d/8591.misc @@ -0,0 +1 @@ + Move metric registration code down into `LruCache`. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 3b471d8fd3..60bb6ff642 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -124,6 +124,10 @@ class LruCache(Generic[KT, VT]): else: self.max_size = int(max_size) + # register_cache might call our "set_cache_factor" callback; there's nothing to + # do yet when we get resized. + self._on_resize = None # type: Optional[Callable[[],None]] + if cache_name is not None: metrics = register_cache( "lru_cache", @@ -332,7 +336,10 @@ class LruCache(Generic[KT, VT]): return key in cache self.sentinel = object() + + # make sure that we clear out any excess entries after we get resized. self._on_resize = evict + self.get = cache_get self.set = cache_set self.setdefault = cache_set_default @@ -383,6 +390,7 @@ class LruCache(Generic[KT, VT]): new_size = int(self._original_max_size * factor) if new_size != self.max_size: self.max_size = new_size - self._on_resize() + if self._on_resize: + self._on_resize() return True return False diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index f12834edab..a739a6aaaf 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -19,7 +19,8 @@ from mock import Mock from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache -from .. import unittest +from tests import unittest +from tests.unittest import override_config class LruCacheTestCase(unittest.HomeserverTestCase): @@ -83,6 +84,11 @@ class LruCacheTestCase(unittest.HomeserverTestCase): cache.clear() self.assertEquals(len(cache), 0) + @override_config({"caches": {"per_cache_factors": {"mycache": 10}}}) + def test_special_size(self): + cache = LruCache(10, "mycache") + self.assertEqual(cache.max_size, 100) + class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_get(self): -- cgit 1.5.1 From 74f29284aa4311f6cd798b21b0a401a667163a35 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 20 Oct 2020 11:49:15 +0100 Subject: Remove some extraneous @unittest.INFOs on unit tests (#8592) --- changelog.d/8592.misc | 1 + tests/rest/client/v2_alpha/test_account.py | 1 - tests/rest/client/v2_alpha/test_auth.py | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/8592.misc (limited to 'tests') diff --git a/changelog.d/8592.misc b/changelog.d/8592.misc new file mode 100644 index 0000000000..099e8fb7bb --- /dev/null +++ b/changelog.d/8592.misc @@ -0,0 +1 @@ +Remove extraneous unittest logging decorators from unit tests. \ No newline at end of file diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index ae2cd67f35..66ac4dbe85 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -352,7 +352,6 @@ class DeactivateTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(request.code, 401) - @unittest.INFO def test_pending_invites(self): """Tests that deactivating a user rejects every pending invite for them.""" store = self.hs.get_datastore() diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 293ccfba2b..86184f0d2e 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -104,7 +104,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): self.assertEqual(len(attempts), 1) self.assertEqual(attempts[0][0]["response"], "a") - @unittest.INFO def test_fallback_captcha(self): """Ensure that fallback auth via a captcha works.""" # Returns a 401 as per the spec -- cgit 1.5.1 From 20a67aa70da0e8ac49c724cdbc144004b03e5a74 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Oct 2020 06:59:54 -0400 Subject: Separate the TCP and terse JSON formatting code. (#8587) This should (theoretically) allow for using the TCP code with a different output type and make it easier to use the JSON code with files / console. --- changelog.d/8587.misc | 1 + synapse/logging/_remote.py | 225 +++++++++++++++++++++++++++++++++++++++ synapse/logging/_terse_json.py | 199 +++------------------------------- tests/logging/test_terse_json.py | 2 +- 4 files changed, 240 insertions(+), 187 deletions(-) create mode 100644 changelog.d/8587.misc create mode 100644 synapse/logging/_remote.py (limited to 'tests') diff --git a/changelog.d/8587.misc b/changelog.d/8587.misc new file mode 100644 index 0000000000..9e56551a34 --- /dev/null +++ b/changelog.d/8587.misc @@ -0,0 +1 @@ +Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py new file mode 100644 index 0000000000..0caf325916 --- /dev/null +++ b/synapse/logging/_remote.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import traceback +from collections import deque +from ipaddress import IPv4Address, IPv6Address, ip_address +from math import floor +from typing import Callable, Optional + +import attr +from zope.interface import implementer + +from twisted.application.internet import ClientService +from twisted.internet.defer import Deferred +from twisted.internet.endpoints import ( + HostnameEndpoint, + TCP4ClientEndpoint, + TCP6ClientEndpoint, +) +from twisted.internet.interfaces import IPushProducer, ITransport +from twisted.internet.protocol import Factory, Protocol +from twisted.logger import ILogObserver, Logger, LogLevel + + +@attr.s +@implementer(IPushProducer) +class LogProducer: + """ + An IPushProducer that writes logs from its buffer to its transport when it + is resumed. + + Args: + buffer: Log buffer to read logs from. + transport: Transport to write to. + format_event: A callable to format the log entry to a string. + """ + + transport = attr.ib(type=ITransport) + format_event = attr.ib(type=Callable[[dict], str]) + _buffer = attr.ib(type=deque) + _paused = attr.ib(default=False, type=bool, init=False) + + def pauseProducing(self): + self._paused = True + + def stopProducing(self): + self._paused = True + self._buffer = deque() + + def resumeProducing(self): + self._paused = False + + while self._paused is False and (self._buffer and self.transport.connected): + try: + # Request the next event and format it. + event = self._buffer.popleft() + msg = self.format_event(event) + + # Send it as a new line over the transport. + self.transport.write(msg.encode("utf8")) + except Exception: + # Something has gone wrong writing to the transport -- log it + # and break out of the while. + traceback.print_exc(file=sys.__stderr__) + break + + +@attr.s +@implementer(ILogObserver) +class TCPLogObserver: + """ + An IObserver that writes JSON logs to a TCP target. + + Args: + hs (HomeServer): The homeserver that is being logged for. + host: The host of the logging target. + port: The logging target's port. + format_event: A callable to format the log entry to a string. + maximum_buffer: The maximum buffer size. + """ + + hs = attr.ib() + host = attr.ib(type=str) + port = attr.ib(type=int) + format_event = attr.ib(type=Callable[[dict], str]) + maximum_buffer = attr.ib(type=int) + _buffer = attr.ib(default=attr.Factory(deque), type=deque) + _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) + _logger = attr.ib(default=attr.Factory(Logger)) + _producer = attr.ib(default=None, type=Optional[LogProducer]) + + def start(self) -> None: + + # Connect without DNS lookups if it's a direct IP. + try: + ip = ip_address(self.host) + if isinstance(ip, IPv4Address): + endpoint = TCP4ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + elif isinstance(ip, IPv6Address): + endpoint = TCP6ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + else: + raise ValueError("Unknown IP address provided: %s" % (self.host,)) + except ValueError: + endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + + factory = Factory.forProtocol(Protocol) + self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service.startService() + self._connect() + + def stop(self): + self._service.stopService() + + def _connect(self) -> None: + """ + Triggers an attempt to connect then write to the remote if not already writing. + """ + if self._connection_waiter: + return + + self._connection_waiter = self._service.whenConnected(failAfterFailures=1) + + @self._connection_waiter.addErrback + def fail(r): + r.printTraceback(file=sys.__stderr__) + self._connection_waiter = None + self._connect() + + @self._connection_waiter.addCallback + def writer(r): + # We have a connection. If we already have a producer, and its + # transport is the same, just trigger a resumeProducing. + if self._producer and r.transport is self._producer.transport: + self._producer.resumeProducing() + self._connection_waiter = None + return + + # If the producer is still producing, stop it. + if self._producer: + self._producer.stopProducing() + + # Make a new producer and start it. + self._producer = LogProducer( + buffer=self._buffer, + transport=r.transport, + format_event=self.format_event, + ) + r.transport.registerProducer(self._producer, True) + self._producer.resumeProducing() + self._connection_waiter = None + + def _handle_pressure(self) -> None: + """ + Handle backpressure by shedding events. + + The buffer will, in this order, until the buffer is below the maximum: + - Shed DEBUG events + - Shed INFO events + - Shed the middle 50% of the events. + """ + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out DEBUGs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.debug, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out INFOs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.info, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Cut the middle entries out + buffer_split = floor(self.maximum_buffer / 2) + + old_buffer = self._buffer + self._buffer = deque() + + for i in range(buffer_split): + self._buffer.append(old_buffer.popleft()) + + end_buffer = [] + for i in range(buffer_split): + end_buffer.append(old_buffer.pop()) + + self._buffer.extend(reversed(end_buffer)) + + def __call__(self, event: dict) -> None: + self._buffer.append(event) + + # Handle backpressure, if it exists. + try: + self._handle_pressure() + except Exception: + # If handling backpressure fails,clear the buffer and log the + # exception. + self._buffer.clear() + self._logger.failure("Failed clearing backpressure") + + # Try and write immediately. + self._connect() diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 1b8916cfa2..9b46956ca9 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -18,26 +18,11 @@ Log formatters that output terse JSON. """ import json -import sys -import traceback -from collections import deque -from ipaddress import IPv4Address, IPv6Address, ip_address -from math import floor -from typing import IO, Optional +from typing import IO -import attr -from zope.interface import implementer +from twisted.logger import FileLogObserver -from twisted.application.internet import ClientService -from twisted.internet.defer import Deferred -from twisted.internet.endpoints import ( - HostnameEndpoint, - TCP4ClientEndpoint, - TCP6ClientEndpoint, -) -from twisted.internet.interfaces import IPushProducer, ITransport -from twisted.internet.protocol import Factory, Protocol -from twisted.logger import FileLogObserver, ILogObserver, Logger +from synapse.logging._remote import TCPLogObserver _encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":")) @@ -150,180 +135,22 @@ def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogOb return FileLogObserver(outFile, formatEvent) -@attr.s -@implementer(IPushProducer) -class LogProducer: +def TerseJSONToTCPLogObserver( + hs, host: str, port: int, metadata: dict, maximum_buffer: int +) -> FileLogObserver: """ - An IPushProducer that writes logs from its buffer to its transport when it - is resumed. - - Args: - buffer: Log buffer to read logs from. - transport: Transport to write to. - """ - - transport = attr.ib(type=ITransport) - _buffer = attr.ib(type=deque) - _paused = attr.ib(default=False, type=bool, init=False) - - def pauseProducing(self): - self._paused = True - - def stopProducing(self): - self._paused = True - self._buffer = deque() - - def resumeProducing(self): - self._paused = False - - while self._paused is False and (self._buffer and self.transport.connected): - try: - event = self._buffer.popleft() - self.transport.write(_encoder.encode(event).encode("utf8")) - self.transport.write(b"\n") - except Exception: - # Something has gone wrong writing to the transport -- log it - # and break out of the while. - traceback.print_exc(file=sys.__stderr__) - break - - -@attr.s -@implementer(ILogObserver) -class TerseJSONToTCPLogObserver: - """ - An IObserver that writes JSON logs to a TCP target. + A log observer that formats events to a flattened JSON representation. Args: hs (HomeServer): The homeserver that is being logged for. host: The host of the logging target. port: The logging target's port. - metadata: Metadata to be added to each log entry. + metadata: Metadata to be added to each log object. + maximum_buffer: The maximum buffer size. """ - hs = attr.ib() - host = attr.ib(type=str) - port = attr.ib(type=int) - metadata = attr.ib(type=dict) - maximum_buffer = attr.ib(type=int) - _buffer = attr.ib(default=attr.Factory(deque), type=deque) - _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) - _logger = attr.ib(default=attr.Factory(Logger)) - _producer = attr.ib(default=None, type=Optional[LogProducer]) - - def start(self) -> None: - - # Connect without DNS lookups if it's a direct IP. - try: - ip = ip_address(self.host) - if isinstance(ip, IPv4Address): - endpoint = TCP4ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - elif isinstance(ip, IPv6Address): - endpoint = TCP6ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - except ValueError: - endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) - - factory = Factory.forProtocol(Protocol) - self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) - self._service.startService() - self._connect() - - def stop(self): - self._service.stopService() - - def _connect(self) -> None: - """ - Triggers an attempt to connect then write to the remote if not already writing. - """ - if self._connection_waiter: - return - - self._connection_waiter = self._service.whenConnected(failAfterFailures=1) - - @self._connection_waiter.addErrback - def fail(r): - r.printTraceback(file=sys.__stderr__) - self._connection_waiter = None - self._connect() - - @self._connection_waiter.addCallback - def writer(r): - # We have a connection. If we already have a producer, and its - # transport is the same, just trigger a resumeProducing. - if self._producer and r.transport is self._producer.transport: - self._producer.resumeProducing() - self._connection_waiter = None - return - - # If the producer is still producing, stop it. - if self._producer: - self._producer.stopProducing() - - # Make a new producer and start it. - self._producer = LogProducer(buffer=self._buffer, transport=r.transport) - r.transport.registerProducer(self._producer, True) - self._producer.resumeProducing() - self._connection_waiter = None - - def _handle_pressure(self) -> None: - """ - Handle backpressure by shedding events. - - The buffer will, in this order, until the buffer is below the maximum: - - Shed DEBUG events - - Shed INFO events - - Shed the middle 50% of the events. - """ - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out DEBUGs - self._buffer = deque( - filter(lambda event: event["level"] != "DEBUG", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out INFOs - self._buffer = deque( - filter(lambda event: event["level"] != "INFO", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Cut the middle entries out - buffer_split = floor(self.maximum_buffer / 2) - - old_buffer = self._buffer - self._buffer = deque() - - for i in range(buffer_split): - self._buffer.append(old_buffer.popleft()) - - end_buffer = [] - for i in range(buffer_split): - end_buffer.append(old_buffer.pop()) - - self._buffer.extend(reversed(end_buffer)) - - def __call__(self, event: dict) -> None: - flattened = flatten_event(event, self.metadata, include_time=True) - self._buffer.append(flattened) - - # Handle backpressure, if it exists. - try: - self._handle_pressure() - except Exception: - # If handling backpressure fails,clear the buffer and log the - # exception. - self._buffer.clear() - self._logger.failure("Failed clearing backpressure") + def formatEvent(_event: dict) -> str: + flattened = flatten_event(_event, metadata, include_time=True) + return _encoder.encode(flattened) + "\n" - # Try and write immediately. - self._connect() + return TCPLogObserver(hs, host, port, formatEvent, maximum_buffer) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 4cf81f7128..fd128b88e0 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -78,7 +78,7 @@ class TerseJSONTCPTestCase(StructuredLoggingTestBase, HomeserverTestCase): "server_name", "name", ] - self.assertEqual(set(log.keys()), set(expected_log_keys)) + self.assertCountEqual(log.keys(), expected_log_keys) # It contains the data we expect. self.assertEqual(log["name"], "wally") -- cgit 1.5.1 From 70259d8c8c0be71d3588a16211ccb42af87235da Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 21 Oct 2020 15:36:53 +0100 Subject: Limit AS transactions to 100 events (#8606) * Limit AS transactions to 100 events * Update changelog.d/8606.feature Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> * Add tests * Update synapse/appservice/scheduler.py Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/8606.feature | 1 + synapse/appservice/scheduler.py | 18 +++++++++++++++-- tests/appservice/test_scheduler.py | 41 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8606.feature (limited to 'tests') diff --git a/changelog.d/8606.feature b/changelog.d/8606.feature new file mode 100644 index 0000000000..fad723c108 --- /dev/null +++ b/changelog.d/8606.feature @@ -0,0 +1 @@ +Limit appservice transactions to 100 persistent and 100 ephemeral events. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index ad3c408519..58291afc22 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -60,6 +60,13 @@ from synapse.types import JsonDict logger = logging.getLogger(__name__) +# Maximum number of events to provide in an AS transaction. +MAX_PERSISTENT_EVENTS_PER_TRANSACTION = 100 + +# Maximum number of ephemeral events to provide in an AS transaction. +MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100 + + class ApplicationServiceScheduler: """ Public facing API for this module. Does the required DI to tie the components together. This also serves as the "event_pool", which in this @@ -136,10 +143,17 @@ class _ServiceQueuer: self.requests_in_flight.add(service.id) try: while True: - events = self.queued_events.pop(service.id, []) - ephemeral = self.queued_ephemeral.pop(service.id, []) + all_events = self.queued_events.get(service.id, []) + events = all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + del all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + + all_events_ephemeral = self.queued_ephemeral.get(service.id, []) + ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + if not events and not ephemeral: return + try: await self.txn_ctrl.send(service, events, ephemeral) except Exception: diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 2acb8b7603..97f8cad0dd 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -260,6 +260,31 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_large_txns(self): + srv_1_defer = defer.Deferred() + srv_2_defer = defer.Deferred() + send_return_list = [srv_1_defer, srv_2_defer] + + def do_send(x, y, z): + return make_deferred_yieldable(send_return_list.pop(0)) + + self.txn_ctrl.send = Mock(side_effect=do_send) + + service = Mock(id=4, name="service") + event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)] + for event in event_list: + self.queuer.enqueue_event(service, event) + + # Expect the first event to be sent immediately. + self.txn_ctrl.send.assert_called_with(service, [event_list[0]], []) + srv_1_defer.callback(service) + # Then send the next 100 events + self.txn_ctrl.send.assert_called_with(service, event_list[1:101], []) + srv_2_defer.callback(service) + # Then the final 99 events + self.txn_ctrl.send.assert_called_with(service, event_list[101:], []) + self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") @@ -296,3 +321,19 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): # Expect the queued events to be sent self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) self.assertEquals(2, self.txn_ctrl.send.call_count) + + def test_send_large_txns_ephemeral(self): + d = defer.Deferred() + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)] + second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] + event_list = first_chunk + second_chunk + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk) + d.callback(service) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk) + self.assertEquals(2, self.txn_ctrl.send.call_count) -- cgit 1.5.1 From 7b71695388fa2edd7ea5fd946b3d2afb68f4ef9d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 22:31:16 +0100 Subject: Combine the two sets of tests for CacheDescriptor --- tests/storage/test__base.py | 228 --------------------------------- tests/util/caches/test_descriptors.py | 230 ++++++++++++++++++++++++++++++++++ 2 files changed, 230 insertions(+), 228 deletions(-) (limited to 'tests') diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8e69b1e9cc..1ac4ebc61d 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -15,237 +15,9 @@ # limitations under the License. -from mock import Mock - -from twisted.internet import defer - -from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import cached - from tests import unittest -class CacheDecoratorTestCase(unittest.HomeserverTestCase): - @defer.inlineCallbacks - def test_passthrough(self): - class A: - @cached() - def func(self, key): - return key - - a = A() - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals((yield a.func("bar")), "bar") - - @defer.inlineCallbacks - def test_hit(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals(callcount[0], 1) - - @defer.inlineCallbacks - def test_invalidate(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - a.func.invalidate(("foo",)) - - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - - def test_invalidate_missing(self): - class A: - @cached() - def func(self, key): - return key - - A().func.invalidate(("what",)) - - @defer.inlineCallbacks - def test_max_entries(self): - callcount = [0] - - class A: - @cached(max_entries=10) - def func(self, key): - callcount[0] += 1 - return key - - a = A() - - for k in range(0, 12): - yield a.func(k) - - self.assertEquals(callcount[0], 12) - - # There must have been at least 2 evictions, meaning if we calculate - # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): - yield a.func(k) - - self.assertTrue( - callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) - ) - - def test_prefill(self): - callcount = [0] - - d = defer.succeed(123) - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return d - - a = A() - - a.func.prefill(("foo",), ObservableDeferred(d)) - - self.assertEquals(a.func("foo").result, d.result) - self.assertEquals(callcount[0], 0) - - @defer.inlineCallbacks - def test_invalidate_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func.invalidate(("foo",)) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 1) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - @defer.inlineCallbacks - def test_eviction_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached(max_entries=2) - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - yield a.func2("foo2") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func("foo3") - - self.assertEquals(callcount[0], 3) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 4) - self.assertEquals(callcount2[0], 3) - - @defer.inlineCallbacks - def test_double_get(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - a.func2.cache.cache = Mock(wraps=a.func2.cache.cache) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 1) - - yield a.func2("foo") - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 2) - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 2) - - a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 3) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 3) - - class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.storage = hs.get_datastore() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3d1f960869..3d738afa7f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,6 +27,7 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) +from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import descriptors from synapse.util.caches.descriptors import cached @@ -311,6 +312,235 @@ class DescriptorTestCase(unittest.TestCase): self.failureResultOf(d, SynapseError) +class CacheDecoratorTestCase(unittest.HomeserverTestCase): + """More tests for @cached + + The following is a set of tests that got lost in a different file for a while. + + There are probably duplicates of the tests in DescriptorTestCase. Ideally the + duplicates would be removed and the two sets of classes combined. + """ + + @defer.inlineCallbacks + def test_passthrough(self): + class A: + @cached() + def func(self, key): + return key + + a = A() + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals((yield a.func("bar")), "bar") + + @defer.inlineCallbacks + def test_hit(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals(callcount[0], 1) + + @defer.inlineCallbacks + def test_invalidate(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + a.func.invalidate(("foo",)) + + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + + def test_invalidate_missing(self): + class A: + @cached() + def func(self, key): + return key + + A().func.invalidate(("what",)) + + @defer.inlineCallbacks + def test_max_entries(self): + callcount = [0] + + class A: + @cached(max_entries=10) + def func(self, key): + callcount[0] += 1 + return key + + a = A() + + for k in range(0, 12): + yield a.func(k) + + self.assertEquals(callcount[0], 12) + + # There must have been at least 2 evictions, meaning if we calculate + # all 12 values again, we must get called at least 2 more times + for k in range(0, 12): + yield a.func(k) + + self.assertTrue( + callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) + ) + + def test_prefill(self): + callcount = [0] + + d = defer.succeed(123) + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return d + + a = A() + + a.func.prefill(("foo",), ObservableDeferred(d)) + + self.assertEquals(a.func("foo").result, d.result) + self.assertEquals(callcount[0], 0) + + @defer.inlineCallbacks + def test_invalidate_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func.invalidate(("foo",)) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 1) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + @defer.inlineCallbacks + def test_eviction_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached(max_entries=2) + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + yield a.func2("foo2") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func("foo3") + + self.assertEquals(callcount[0], 3) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 4) + self.assertEquals(callcount2[0], 3) + + @defer.inlineCallbacks + def test_double_get(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + a.func2.cache.cache = mock.Mock(wraps=a.func2.cache.cache) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 1) + + yield a.func2("foo") + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 2) + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 2) + + a.func.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 3) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 3) + + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): -- cgit 1.5.1 From 1f4269700c2353263a605856f28ded28501368e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 12:34:55 +0100 Subject: Push some deferred wrangling down into DeferredCache --- changelog.d/8572.misc | 1 + synapse/util/caches/deferred_cache.py | 57 +++++++++++++++++++++++++++----- synapse/util/caches/descriptors.py | 32 ++++-------------- tests/util/caches/test_deferred_cache.py | 18 +++++----- tests/util/caches/test_descriptors.py | 5 ++- 5 files changed, 67 insertions(+), 46 deletions(-) create mode 100644 changelog.d/8572.misc (limited to 'tests') diff --git a/changelog.d/8572.misc b/changelog.d/8572.misc new file mode 100644 index 0000000000..ea2a6d340d --- /dev/null +++ b/changelog.d/8572.misc @@ -0,0 +1 @@ +Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s. diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index faeef75506..6c162e9f34 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -57,7 +57,7 @@ class DeferredCache(Generic[KT, VT]): """Wraps an LruCache, adding support for Deferred results. It expects that each entry added with set() will be a Deferred; likewise get() - may return an ObservableDeferred. + will return a Deferred. """ __slots__ = ( @@ -130,16 +130,22 @@ class DeferredCache(Generic[KT, VT]): key: KT, callback: Optional[Callable[[], None]] = None, update_metrics: bool = True, - ) -> Union[ObservableDeferred, VT]: + ) -> defer.Deferred: """Looks the key up in the caches. + For symmetry with set(), this method does *not* follow the synapse logcontext + rules: the logcontext will not be cleared on return, and the Deferred will run + its callbacks in the sentinel context. In other words: wrap the result with + make_deferred_yieldable() before `await`ing it. + Args: - key(tuple) - callback(fn): Gets called when the entry in the cache is invalidated + key: + callback: Gets called when the entry in the cache is invalidated update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either an ObservableDeferred or the result itself + A Deferred which completes with the result. Note that this may later fail + if there is an ongoing set() operation which later completes with a failure. Raises: KeyError if the key is not found in the cache @@ -152,7 +158,7 @@ class DeferredCache(Generic[KT, VT]): m = self.cache.metrics assert m # we always have a name, so should always have metrics m.inc_hits() - return val.deferred + return val.deferred.observe() val2 = self.cache.get( key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics @@ -160,7 +166,7 @@ class DeferredCache(Generic[KT, VT]): if val2 is _Sentinel.sentinel: raise KeyError() else: - return val2 + return defer.succeed(val2) def get_immediate( self, key: KT, default: T, update_metrics: bool = True @@ -173,7 +179,36 @@ class DeferredCache(Generic[KT, VT]): key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None, - ) -> ObservableDeferred: + ) -> defer.Deferred: + """Adds a new entry to the cache (or updates an existing one). + + The given `value` *must* be a Deferred. + + First any existing entry for the same key is invalidated. Then a new entry + is added to the cache for the given key. + + Until the `value` completes, calls to `get()` for the key will also result in an + incomplete Deferred, which will ultimately complete with the same result as + `value`. + + If `value` completes successfully, subsequent calls to `get()` will then return + a completed deferred with the same result. If it *fails*, the cache is + invalidated and subequent calls to `get()` will raise a KeyError. + + If another call to `set()` happens before `value` completes, then (a) any + invalidation callbacks registered in the interim will be called, (b) any + `get()`s in the interim will continue to complete with the result from the + *original* `value`, (c) any future calls to `get()` will complete with the + result from the *new* `value`. + + It is expected that `value` does *not* follow the synapse logcontext rules - ie, + if it is incomplete, it runs its callbacks in the sentinel context. + + Args: + key: Key to be set + value: a deferred which will complete with a result to add to the cache + callback: An optional callback to be called when the entry is invalidated + """ if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") @@ -187,6 +222,8 @@ class DeferredCache(Generic[KT, VT]): if existing_entry: existing_entry.invalidate() + # XXX: why don't we invalidate the entry in `self.cache` yet? + self._pending_deferred_cache[key] = entry def compare_and_pop(): @@ -230,7 +267,9 @@ class DeferredCache(Generic[KT, VT]): # _pending_deferred_cache to the real cache. # observer.addCallbacks(cb, eb) - return observable + + # we return a new Deferred which will be called before any subsequent observers. + return observable.observe() def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): callbacks = [callback] if callback else [] diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1f43886804..a4172345ef 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -23,7 +23,6 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) @@ -156,7 +155,7 @@ class CacheDescriptor(_CacheDescriptorBase): keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) # type: DeferredCache[Tuple, Any] + ) # type: DeferredCache[CacheKey, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into @@ -208,26 +207,12 @@ class CacheDescriptor(_CacheDescriptorBase): kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) try: - cached_result_d = cache.get(cache_key, callback=invalidate_callback) - - if isinstance(cached_result_d, ObservableDeferred): - observer = cached_result_d.observe() - else: - observer = defer.succeed(cached_result_d) - + ret = cache.get(cache_key, callback=invalidate_callback) except KeyError: ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) + ret = cache.set(cache_key, ret, callback=invalidate_callback) - def onErr(f): - cache.invalidate(cache_key) - return f - - ret.addErrback(onErr) - - result_d = cache.set(cache_key, ret, callback=invalidate_callback) - observer = result_d.observe() - - return make_deferred_yieldable(observer) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -286,7 +271,7 @@ class CacheListDescriptor(_CacheDescriptorBase): def __get__(self, obj, objtype=None): cached_method = getattr(obj, self.cached_method_name) - cache = cached_method.cache + cache = cached_method.cache # type: DeferredCache[CacheKey, Any] num_args = cached_method.num_args @functools.wraps(self.orig) @@ -326,14 +311,11 @@ class CacheListDescriptor(_CacheDescriptorBase): for arg in list_args: try: res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback) - if not isinstance(res, ObservableDeferred): - results[arg] = res - elif not res.has_succeeded(): - res = res.observe() + if not res.called: res.addCallback(update_results_dict, arg) cached_defers.append(res) else: - results[arg] = res.get_result() + results[arg] = res.result except KeyError: missing.add(arg) diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 8a08ab6661..68d26128c1 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest from functools import partial from twisted.internet import defer from synapse.util.caches.deferred_cache import DeferredCache +from tests.unittest import TestCase -class DeferredCacheTestCase(unittest.TestCase): + +class DeferredCacheTestCase(TestCase): def test_empty(self): cache = DeferredCache("test") failed = False @@ -36,7 +37,7 @@ class DeferredCacheTestCase(unittest.TestCase): cache = DeferredCache("test") cache.prefill("foo", 123) - self.assertEquals(cache.get("foo"), 123) + self.assertEquals(self.successResultOf(cache.get("foo")), 123) def test_get_immediate(self): cache = DeferredCache("test") @@ -82,16 +83,15 @@ class DeferredCacheTestCase(unittest.TestCase): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return observable deferreds - self.assertFalse(cache.get("key1").has_called()) - self.assertFalse(cache.get("key2").has_called()) + # lookup should return pending deferreds + self.assertFalse(cache.get("key1").called) + self.assertFalse(cache.get("key2").called) # let one of the lookups complete d2.callback("result2") - # for now at least, the cache will return real results rather than an - # observabledeferred - self.assertEqual(cache.get("key2"), "result2") + # now the cache will return a completed deferred + self.assertEqual(self.successResultOf(cache.get("key2")), "result2") # now do the invalidation cache.invalidate_all() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3d738afa7f..fc2663c02d 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,7 +27,6 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import descriptors from synapse.util.caches.descriptors import cached @@ -419,9 +418,9 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): a = A() - a.func.prefill(("foo",), ObservableDeferred(d)) + a.func.prefill(("foo",), 456) - self.assertEquals(a.func("foo").result, d.result) + self.assertEquals(a.func("foo").result, 456) self.assertEquals(callcount[0], 0) @defer.inlineCallbacks -- cgit 1.5.1 From 6d3905c7c7a53eed7a856aa013f6a9bf9292eb7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 21:32:52 +0100 Subject: Add some more tests --- tests/util/caches/test_deferred_cache.py | 95 ++++++++++++++++++++++++++++++++ tests/util/caches/test_descriptors.py | 52 +++++++++++++++++ 2 files changed, 147 insertions(+) (limited to 'tests') diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 68d26128c1..dadfabd46d 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -39,6 +39,101 @@ class DeferredCacheTestCase(TestCase): self.assertEquals(self.successResultOf(cache.get("foo")), 123) + def test_hit_deferred(self): + cache = DeferredCache("test") + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d) + + # get should return an incomplete deferred + get_d = cache.get("k1") + self.assertFalse(get_d.called) + + # add a callback that will make sure that the set_d gets called before the get_d + def check1(r): + self.assertTrue(set_d.called) + return r + + # TODO: Actually ObservableDeferred *doesn't* run its tests in order on py3.8. + # maybe we should fix that? + # get_d.addCallback(check1) + + # now fire off all the deferreds + origin_d.callback(99) + self.assertEqual(self.successResultOf(origin_d), 99) + self.assertEqual(self.successResultOf(set_d), 99) + self.assertEqual(self.successResultOf(get_d), 99) + + def test_callbacks(self): + """Invalidation callbacks are called at the right time""" + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # we don't expect the invalidation callback for the original value to have + # been called yet, even though get() will now return a different result. + # I'm not sure if that is by design or not. + self.assertEqual(callbacks, set()) + + # now fire off all the deferreds + origin_d.callback(20) + self.assertEqual(self.successResultOf(set_d), 20) + self.assertEqual(self.successResultOf(get_d), 20) + + # now the original invalidation callback should have been called, but none of + # the others + self.assertEqual(callbacks, {"prefill"}) + callbacks.clear() + + # another update should invalidate both the previous results + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"set", "get"}) + + def test_set_fail(self): + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # none of the callbacks should have been called yet + self.assertEqual(callbacks, set()) + + # oh noes! fails! + e = Exception("oops") + origin_d.errback(e) + self.assertIs(self.failureResultOf(set_d, Exception).value, e) + self.assertIs(self.failureResultOf(get_d, Exception).value, e) + + # the callbacks for the failed requests should have been called. + # I'm not sure if this is deliberate or not. + self.assertEqual(callbacks, {"get", "set"}) + callbacks.clear() + + # the old value should still be returned now? + get_d2 = cache.get("k1", callback=lambda: callbacks.add("get2")) + self.assertEqual(self.successResultOf(get_d2), 10) + + # replacing the value now should run the callbacks for those requests + # which got the original result + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"prefill", "get2"}) + def test_get_immediate(self): cache = DeferredCache("test") d1 = defer.Deferred() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index fc2663c02d..2ad08f541b 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Set import mock @@ -130,6 +131,57 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) + def test_cache_with_async_exception(self): + """The wrapped function returns a failure + """ + + class Cls: + result = None + call_count = 0 + + @cached() + def fn(self, arg1): + self.call_count += 1 + return self.result + + obj = Cls() + callbacks = set() # type: Set[str] + + # set off an asynchronous request + obj.result = origin_d = defer.Deferred() + + d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) + self.assertFalse(d1.called) + + # a second request should also return a deferred, but should not call the + # function itself. + d2 = obj.fn(1, on_invalidate=lambda: callbacks.add("d2")) + self.assertFalse(d2.called) + self.assertEqual(obj.call_count, 1) + + # no callbacks yet + self.assertEqual(callbacks, set()) + + # the original request fails + e = Exception("bzz") + origin_d.errback(e) + + # ... which should cause the lookups to fail similarly + self.assertIs(self.failureResultOf(d1, Exception).value, e) + self.assertIs(self.failureResultOf(d2, Exception).value, e) + + # ... and the callbacks to have been, uh, called. + self.assertEqual(callbacks, {"d1", "d2"}) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should work as normal + obj.result = defer.succeed(100) + d3 = obj.fn(1) + self.assertEqual(self.successResultOf(d3), 100) + self.assertEqual(obj.call_count, 2) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" -- cgit 1.5.1 From 2ac908f377ca7522019e1560c115f893e52d79a1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Oct 2020 10:11:06 +0100 Subject: Don't instansiate Requester directly (#8614) --- changelog.d/8614.misc | 1 + tests/storage/test_cleanup_extrems.py | 6 +++--- tests/storage/test_event_metrics.py | 4 ++-- tests/storage/test_roommember.py | 4 ++-- tests/test_federation.py | 4 ++-- tests/unittest.py | 4 ++-- 6 files changed, 12 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8614.misc (limited to 'tests') diff --git a/changelog.d/8614.misc b/changelog.d/8614.misc new file mode 100644 index 0000000000..1bf9ea08f0 --- /dev/null +++ b/changelog.d/8614.misc @@ -0,0 +1 @@ +Don't instansiate Requester directly. diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 080761d1d2..5a1e5c4e66 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -22,7 +22,7 @@ import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client.v1 import login, room from synapse.storage import prepare_database -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests.unittest import HomeserverTestCase @@ -38,7 +38,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): # Create a test user and room self.user = UserID("alice", "test") - self.requester = Requester(self.user, None, False, False, None, None) + self.requester = create_requester(self.user) info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] @@ -260,7 +260,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase): # Create a test user and room self.user = UserID.from_string(self.register_user("user1", "password")) self.token1 = self.login("user1", "password") - self.requester = Requester(self.user, None, False, False, None, None) + self.requester = create_requester(self.user) info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] self.event_creator = homeserver.get_event_creation_handler() diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 3957471f3f..7691f2d790 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -14,7 +14,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, generate_latest -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests.unittest import HomeserverTestCase @@ -27,7 +27,7 @@ class ExtremStatisticsTestCase(HomeserverTestCase): room_creator = self.hs.get_room_creation_handler() user = UserID("alice", "test") - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) # Real events, forward extremities events = [(3, 2), (6, 2), (4, 6)] diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 12ccc1f53e..ff972daeaa 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,7 +19,7 @@ from unittest.mock import Mock from synapse.api.constants import Membership from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client.v1 import login, room -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests import unittest from tests.test_utils import event_injection @@ -187,7 +187,7 @@ class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): # Now let's create a room, which will insert a membership user = UserID("alice", "test") - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) self.get_success(self.room_creator.create_room(requester, {})) # Register the background update to run again. diff --git a/tests/test_federation.py b/tests/test_federation.py index d39e792580..1ce4ea3a01 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -20,7 +20,7 @@ from twisted.internet.defer import succeed from synapse.api.errors import FederationError from synapse.events import make_event_from_dict from synapse.logging.context import LoggingContext -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination @@ -43,7 +43,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): ) user_id = UserID("us", "test") - our_user = Requester(user_id, None, False, False, None, None) + our_user = create_requester(user_id) room_creator = self.homeserver.get_room_creation_handler() self.room_id = self.get_success( room_creator.create_room( diff --git a/tests/unittest.py b/tests/unittest.py index 040b126a27..257f465897 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -44,7 +44,7 @@ from synapse.logging.context import ( set_current_context, ) from synapse.server import HomeServer -from synapse.types import Requester, UserID, create_requester +from synapse.types import UserID, create_requester from synapse.util.ratelimitutils import FederationRateLimiter from tests.server import ( @@ -627,7 +627,7 @@ class HomeserverTestCase(TestCase): """ event_creator = self.hs.get_event_creation_handler() secrets = self.hs.get_secrets() - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) event, context = self.get_success( event_creator.create_event( -- cgit 1.5.1 From db9ef792f03aafa9f15d796a4004b0e0e551646d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Oct 2020 10:41:32 +0100 Subject: Fix email notifications for invites without local state. (#8627) This can happen if e.g. the room invited into is no longer on the server (or if all users left the room). --- changelog.d/8627.bugfix | 1 + synapse/push/mailer.py | 41 ++++++++++++++++++++++++++++------------- tests/push/test_email.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 13 deletions(-) create mode 100644 changelog.d/8627.bugfix (limited to 'tests') diff --git a/changelog.d/8627.bugfix b/changelog.d/8627.bugfix new file mode 100644 index 0000000000..143cf95f92 --- /dev/null +++ b/changelog.d/8627.bugfix @@ -0,0 +1 @@ +Fix email notifications for invites without local state. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 155791b754..38195c8eea 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -24,7 +24,7 @@ from typing import Iterable, List, TypeVar import bleach import jinja2 -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.api.errors import StoreError from synapse.config.emailconfig import EmailSubjectConfig from synapse.logging.context import make_deferred_yieldable @@ -317,9 +317,14 @@ class Mailer: async def get_room_vars( self, room_id, user_id, notifs, notif_events, room_state_ids ): - my_member_event_id = room_state_ids[("m.room.member", user_id)] - my_member_event = await self.store.get_event(my_member_event_id) - is_invite = my_member_event.content["membership"] == "invite" + # Check if one of the notifs is an invite event for the user. + is_invite = False + for n in notifs: + ev = notif_events[n["event_id"]] + if ev.type == EventTypes.Member and ev.state_key == user_id: + if ev.content.get("membership") == Membership.INVITE: + is_invite = True + break room_name = await calculate_room_name(self.store, room_state_ids, user_id) @@ -461,16 +466,26 @@ class Mailer: self.store, room_state_ids[room_id], user_id, fallback_to_members=False ) - my_member_event_id = room_state_ids[room_id][("m.room.member", user_id)] - my_member_event = await self.store.get_event(my_member_event_id) - if my_member_event.content["membership"] == "invite": - inviter_member_event_id = room_state_ids[room_id][ - ("m.room.member", my_member_event.sender) - ] - inviter_member_event = await self.store.get_event( - inviter_member_event_id + # See if one of the notifs is an invite event for the user + invite_event = None + for n in notifs_by_room[room_id]: + ev = notif_events[n["event_id"]] + if ev.type == EventTypes.Member and ev.state_key == user_id: + if ev.content.get("membership") == Membership.INVITE: + invite_event = ev + break + + if invite_event: + inviter_member_event_id = room_state_ids[room_id].get( + ("m.room.member", invite_event.sender) ) - inviter_name = name_from_member_event(inviter_member_event) + inviter_name = invite_event.sender + if inviter_member_event_id: + inviter_member_event = await self.store.get_event( + inviter_member_event_id, allow_none=True + ) + if inviter_member_event: + inviter_name = name_from_member_event(inviter_member_event) if room_name is None: return self.email_subjects.invite_from_person % { diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 55545d9341..d9993e6245 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -131,6 +131,35 @@ class EmailPusherTests(HomeserverTestCase): # We should get emailed about that message self._check_for_mail() + def test_invite_sends_email(self): + # Create a room and invite the user to it + room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) + self.helper.invite( + room=room, + src=self.others[0].id, + tok=self.others[0].token, + targ=self.user_id, + ) + + # We should get emailed about the invite + self._check_for_mail() + + def test_invite_to_empty_room_sends_email(self): + # Create a room and invite the user to it + room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) + self.helper.invite( + room=room, + src=self.others[0].id, + tok=self.others[0].token, + targ=self.user_id, + ) + + # Then have the original user leave + self.helper.leave(room, self.others[0].id, tok=self.others[0].token) + + # We should get emailed about the invite + self._check_for_mail() + def test_multiple_members_email(self): # We want to test multiple notifications, so we pause processing of push # while we send messages. -- cgit 1.5.1 From c850dd9a8e4e4f78fbe0b44686f3824b901236f6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Oct 2020 17:12:59 +0100 Subject: Fix handling of User-Agent headers with bad utf-8. (#8632) --- changelog.d/8632.bugfix | 1 + synapse/api/auth.py | 4 +--- synapse/handlers/auth.py | 4 +--- synapse/handlers/cas_handler.py | 4 +--- synapse/handlers/oidc_handler.py | 4 +--- synapse/handlers/saml_handler.py | 4 +--- synapse/http/site.py | 16 +++++++++------- tests/handlers/test_oidc.py | 24 ++++++++++++++++++------ 8 files changed, 33 insertions(+), 28 deletions(-) create mode 100644 changelog.d/8632.bugfix (limited to 'tests') diff --git a/changelog.d/8632.bugfix b/changelog.d/8632.bugfix new file mode 100644 index 0000000000..7d834aa2e2 --- /dev/null +++ b/changelog.d/8632.bugfix @@ -0,0 +1 @@ +Fix handling of User-Agent headers that are invalid UTF-8, which caused user agents of users to not get correctly recorded. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index bff87fabde..526cb58c5f 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -184,9 +184,7 @@ class Auth: """ try: ip_addr = self.hs.get_ip_from_request(request) - user_agent = request.requestHeaders.getRawHeaders( - b"User-Agent", default=[b""] - )[0].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") access_token = self.get_access_token_from_request(request) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 8619fbb982..48d60feaab 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -470,9 +470,7 @@ class AuthHandler(BaseHandler): # authentication flow. await self.store.set_ui_auth_clientdict(sid, clientdict) - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") await self.store.add_user_agent_ip_to_ui_auth_session( session.session_id, user_agent, clientip diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py index a4cc4b9a5a..048a3b3c0b 100644 --- a/synapse/handlers/cas_handler.py +++ b/synapse/handlers/cas_handler.py @@ -212,9 +212,7 @@ class CasHandler: else: if not registered_user_id: # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders( - b"User-Agent", default=[b""] - )[0].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) registered_user_id = await self._registration_handler.register_user( diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 05ac86e697..a312610635 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -695,9 +695,7 @@ class OidcHandler: return # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) # Call the mapper to register/login the user diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 285c481a96..fd6c5e9ea8 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -216,9 +216,7 @@ class SamlHandler: return # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) # Call the mapper to register/login the user diff --git a/synapse/http/site.py b/synapse/http/site.py index 6e79b47828..ca673028e4 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -109,8 +109,14 @@ class SynapseRequest(Request): method = self.method.decode("ascii") return method - def get_user_agent(self): - return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + def get_user_agent(self, default: str) -> str: + """Return the last User-Agent header, or the given default. + """ + user_agent = self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + if user_agent is None: + return default + + return user_agent.decode("ascii", "replace") def render(self, resrc): # this is called once a Resource has been found to serve the request; in our @@ -274,11 +280,7 @@ class SynapseRequest(Request): # with maximum recursion trying to log errors about # the charset problem. # c.f. https://github.com/matrix-org/synapse/issues/3471 - user_agent = self.get_user_agent() - if user_agent is not None: - user_agent = user_agent.decode("utf-8", "replace") - else: - user_agent = "-" + user_agent = self.get_user_agent("-") code = str(self.code) if not self.finished: diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index b6f436c016..0d51705849 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -394,7 +394,14 @@ class OidcHandlerTestCase(HomeserverTestCase): self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) self.handler._auth_handler.complete_sso_login = simple_async_mock() request = Mock( - spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + spec=[ + "args", + "getCookie", + "addCookie", + "requestHeaders", + "getClientIP", + "get_user_agent", + ] ) code = "code" @@ -414,9 +421,8 @@ class OidcHandlerTestCase(HomeserverTestCase): request.args[b"code"] = [code.encode("utf-8")] request.args[b"state"] = [state.encode("utf-8")] - request.requestHeaders = Mock(spec=["getRawHeaders"]) - request.requestHeaders.getRawHeaders.return_value = [user_agent.encode("ascii")] request.getClientIP.return_value = ip_address + request.get_user_agent.return_value = user_agent self.get_success(self.handler.handle_oidc_callback(request)) @@ -621,7 +627,14 @@ class OidcHandlerTestCase(HomeserverTestCase): self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) self.handler._auth_handler.complete_sso_login = simple_async_mock() request = Mock( - spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + spec=[ + "args", + "getCookie", + "addCookie", + "requestHeaders", + "getClientIP", + "get_user_agent", + ] ) state = "state" @@ -637,9 +650,8 @@ class OidcHandlerTestCase(HomeserverTestCase): request.args[b"code"] = [b"code"] request.args[b"state"] = [state.encode("utf-8")] - request.requestHeaders = Mock(spec=["getRawHeaders"]) - request.requestHeaders.getRawHeaders.return_value = [b"Browser"] request.getClientIP.return_value = "10.0.0.1" + request.get_user_agent.return_value = "Browser" self.get_success(self.handler.handle_oidc_callback(request)) -- cgit 1.5.1 From 2b7c180879e5d62145feed88375ba55f18fc2ae5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Oct 2020 09:30:19 +0000 Subject: Start fewer opentracing spans (#8640) #8567 started a span for every background process. This is good as it means all Synapse code that gets run should be in a span (unless in the sentinel logging context), but it means we generate about 15x the number of spans as we did previously. This PR attempts to reduce that number by a) not starting one for send commands to Redis, and b) deferring starting background processes until after we're sure they're necessary. I don't really know how much this will help. --- changelog.d/8640.misc | 1 + synapse/handlers/appservice.py | 50 +++++++++++++++++++++++---- synapse/logging/opentracing.py | 10 +++--- synapse/metrics/background_process_metrics.py | 12 +++++-- synapse/notifier.py | 34 ++++++------------ synapse/push/pusherpool.py | 18 ++++++++-- synapse/replication/tcp/redis.py | 4 ++- tests/handlers/test_appservice.py | 20 +++++------ 8 files changed, 96 insertions(+), 53 deletions(-) create mode 100644 changelog.d/8640.misc (limited to 'tests') diff --git a/changelog.d/8640.misc b/changelog.d/8640.misc new file mode 100644 index 0000000000..cf6023f783 --- /dev/null +++ b/changelog.d/8640.misc @@ -0,0 +1 @@ +Reduce number of OpenTracing spans started. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 07240d3a14..7826387e53 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Union from prometheus_client import Counter @@ -30,7 +30,10 @@ from synapse.metrics import ( event_processing_loop_counter, event_processing_loop_room_count, ) -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.types import Collection, JsonDict, RoomStreamToken, UserID from synapse.util.metrics import Measure @@ -53,7 +56,7 @@ class ApplicationServicesHandler: self.current_max = 0 self.is_processing = False - async def notify_interested_services(self, max_token: RoomStreamToken): + def notify_interested_services(self, max_token: RoomStreamToken): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any @@ -72,6 +75,12 @@ class ApplicationServicesHandler: if self.is_processing: return + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._notify_interested_services(max_token) + + @wrap_as_background_process("notify_interested_services") + async def _notify_interested_services(self, max_token: RoomStreamToken): with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: @@ -166,8 +175,11 @@ class ApplicationServicesHandler: finally: self.is_processing = False - async def notify_interested_services_ephemeral( - self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [], + def notify_interested_services_ephemeral( + self, + stream_key: str, + new_token: Optional[int], + users: Collection[Union[str, UserID]] = [], ): """This is called by the notifier in the background when a ephemeral event handled by the homeserver. @@ -183,13 +195,34 @@ class ApplicationServicesHandler: new_token: The latest stream token users: The user(s) involved with the event. """ + if not self.notify_appservices: + return + + if stream_key not in ("typing_key", "receipt_key", "presence_key"): + return + services = [ service for service in self.store.get_app_services() if service.supports_ephemeral ] - if not services or not self.notify_appservices: + if not services: return + + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._notify_interested_services_ephemeral( + services, stream_key, new_token, users + ) + + @wrap_as_background_process("notify_interested_services_ephemeral") + async def _notify_interested_services_ephemeral( + self, + services: List[ApplicationService], + stream_key: str, + new_token: Optional[int], + users: Collection[Union[str, UserID]], + ): logger.info("Checking interested services for %s" % (stream_key)) with Measure(self.clock, "notify_interested_services_ephemeral"): for service in services: @@ -237,7 +270,7 @@ class ApplicationServicesHandler: return receipts async def _handle_presence( - self, service: ApplicationService, users: Collection[UserID] + self, service: ApplicationService, users: Collection[Union[str, UserID]] ): events = [] # type: List[JsonDict] presence_source = self.event_sources.sources["presence"] @@ -245,6 +278,9 @@ class ApplicationServicesHandler: service, "presence" ) for user in users: + if isinstance(user, str): + user = UserID.from_string(user) + interested = await service.is_interested_in_presence(user, self.store) if not interested: continue diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index e58850faff..ab586c318c 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -317,7 +317,7 @@ def ensure_active_span(message, ret=None): @contextlib.contextmanager -def _noop_context_manager(*args, **kwargs): +def noop_context_manager(*args, **kwargs): """Does exactly what it says on the tin""" yield @@ -413,7 +413,7 @@ def start_active_span( """ if opentracing is None: - return _noop_context_manager() + return noop_context_manager() return opentracing.tracer.start_active_span( operation_name, @@ -428,7 +428,7 @@ def start_active_span( def start_active_span_follows_from(operation_name, contexts): if opentracing is None: - return _noop_context_manager() + return noop_context_manager() references = [opentracing.follows_from(context) for context in contexts] scope = start_active_span(operation_name, references=references) @@ -459,7 +459,7 @@ def start_active_span_from_request( # Also, twisted uses byte arrays while opentracing expects strings. if opentracing is None: - return _noop_context_manager() + return noop_context_manager() header_dict = { k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() @@ -497,7 +497,7 @@ def start_active_span_from_edu( """ if opentracing is None: - return _noop_context_manager() + return noop_context_manager() carrier = json_decoder.decode(edu_content.get("context", "{}")).get( "opentracing", {} diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 08fbf78eee..658f6ecd72 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -24,7 +24,7 @@ from prometheus_client.core import REGISTRY, Counter, Gauge from twisted.internet import defer from synapse.logging.context import LoggingContext, PreserveLoggingContext -from synapse.logging.opentracing import start_active_span +from synapse.logging.opentracing import noop_context_manager, start_active_span if TYPE_CHECKING: import resource @@ -167,7 +167,7 @@ class _BackgroundProcess: ) -def run_as_background_process(desc: str, func, *args, **kwargs): +def run_as_background_process(desc: str, func, *args, bg_start_span=True, **kwargs): """Run the given function in its own logcontext, with resource metrics This should be used to wrap processes which are fired off to run in the @@ -181,6 +181,9 @@ def run_as_background_process(desc: str, func, *args, **kwargs): Args: desc: a description for this background process type func: a function, which may return a Deferred or a coroutine + bg_start_span: Whether to start an opentracing span. Defaults to True. + Should only be disabled for processes that will not log to or tag + a span. args: positional args for func kwargs: keyword args for func @@ -199,7 +202,10 @@ def run_as_background_process(desc: str, func, *args, **kwargs): with BackgroundProcessLoggingContext(desc) as context: context.request = "%s-%i" % (desc, count) try: - with start_active_span(desc, tags={"request_id": context.request}): + ctx = noop_context_manager() + if bg_start_span: + ctx = start_active_span(desc, tags={"request_id": context.request}) + with ctx: result = func(*args, **kwargs) if inspect.isawaitable(result): diff --git a/synapse/notifier.py b/synapse/notifier.py index 858b487bec..eb56b26f21 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,6 @@ from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.utils import log_function from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.streams.config import PaginationConfig from synapse.types import ( Collection, @@ -310,44 +309,37 @@ class Notifier: """ # poke any interested application service. - run_as_background_process( - "_notify_app_services", self._notify_app_services, max_room_stream_token - ) - - run_as_background_process( - "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_token - ) + self._notify_app_services(max_room_stream_token) + self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) - async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): + def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: - await self.appservice_handler.notify_interested_services( - max_room_stream_token - ) + self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception("Error notifying application services of event") - async def _notify_app_services_ephemeral( + def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[UserID] = [], + users: Collection[Union[str, UserID]] = [], ): try: stream_token = None if isinstance(new_token, int): stream_token = new_token - await self.appservice_handler.notify_interested_services_ephemeral( + self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users ) except Exception: logger.exception("Error notifying application services of event") - async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): + def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_token) + self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception("Error pusher pool of event") @@ -384,12 +376,8 @@ class Notifier: self.notify_replication() # Notify appservices - run_as_background_process( - "_notify_app_services_ephemeral", - self._notify_app_services_ephemeral, - stream_key, - new_token, - users, + self._notify_app_services_ephemeral( + stream_key, new_token, users, ) def on_new_replication_data(self) -> None: diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 0080c68ce2..f325964983 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -19,7 +19,10 @@ from typing import TYPE_CHECKING, Dict, Union from prometheus_client import Gauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.push import PusherConfigException from synapse.push.emailpusher import EmailPusher from synapse.push.httppusher import HttpPusher @@ -187,7 +190,7 @@ class PusherPool: ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_token: RoomStreamToken): + def on_new_notifications(self, max_token: RoomStreamToken): if not self.pushers: # nothing to do here. return @@ -201,6 +204,17 @@ class PusherPool: # Nothing to do return + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._on_new_notifications(max_token) + + @wrap_as_background_process("on_new_notifications") + async def _on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_id = max_token.stream + prev_stream_id = self._last_room_stream_id_seen self._last_room_stream_id_seen = max_stream_id diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index de19705c1f..bc6ba709a7 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -166,7 +166,9 @@ class RedisSubscriber(txredisapi.SubscriberProtocol, AbstractConnection): Args: cmd (Command) """ - run_as_background_process("send-cmd", self._async_send_command, cmd) + run_as_background_process( + "send-cmd", self._async_send_command, cmd, bg_start_span=False + ) async def _async_send_command(self, cmd: Command): """Encode a replication command and send it over our outbound connection""" diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index ee4f3da31c..53763cd0f9 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -42,7 +42,6 @@ class AppServiceHandlerTestCase(unittest.TestCase): hs.get_clock.return_value = MockClock() self.handler = ApplicationServicesHandler(hs) - @defer.inlineCallbacks def test_notify_interested_services(self): interested_service = self._mkservice(is_interested=True) services = [ @@ -62,14 +61,12 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.mock_scheduler.submit_event_for_as.assert_called_once_with( interested_service, event ) - @defer.inlineCallbacks def test_query_user_exists_unknown_user(self): user_id = "@someone:anywhere" services = [self._mkservice(is_interested=True)] @@ -83,12 +80,11 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) - @defer.inlineCallbacks def test_query_user_exists_known_user(self): user_id = "@someone:anywhere" services = [self._mkservice(is_interested=True)] @@ -102,9 +98,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.assertFalse( self.mock_as_api.query_user.called, "query_user called when it shouldn't have been.", -- cgit 1.5.1 From 913f8a06e43da760ccef329d43c139ec3c7fd7d5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 26 Oct 2020 15:07:51 +0100 Subject: Add field `total` to device list in admin API (#8644) --- changelog.d/8644.misc | 1 + docs/admin_api/user_admin_api.rst | 5 ++++- synapse/rest/admin/devices.py | 2 +- tests/rest/admin/test_device.py | 17 +++++++++++++++++ 4 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8644.misc (limited to 'tests') diff --git a/changelog.d/8644.misc b/changelog.d/8644.misc new file mode 100644 index 0000000000..87f2b72924 --- /dev/null +++ b/changelog.d/8644.misc @@ -0,0 +1 @@ +Add field `total` to device list in admin API. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 7ca902faba..0f3d99c826 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -375,7 +375,8 @@ A response body like the following is returned: "last_seen_ts": 1474491775025, "user_id": "" } - ] + ], + "total": 2 } **Parameters** @@ -400,6 +401,8 @@ The following fields are returned in the JSON response body: devices was last seen. (May be a few minutes out of date, for efficiency reasons). - ``user_id`` - Owner of device. +- ``total`` - Total number of user's devices. + Delete multiple devices ------------------ Deletes the given devices for a specific ``user_id``, and invalidates diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index a163863322..ffd3aa38f7 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -119,7 +119,7 @@ class DevicesRestServlet(RestServlet): raise NotFoundError("Unknown user") devices = await self.device_handler.get_devices_by_user(target_user.to_string()) - return 200, {"devices": devices} + return 200, {"devices": devices, "total": len(devices)} class DeleteDevicesRestServlet(RestServlet): diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 92c9058887..d89eb90cfe 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -393,6 +393,22 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) + def test_user_has_no_devices(self): + """ + Tests that a normal lookup for devices is successfully + if user has no devices + """ + + # Get devices + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + self.assertEqual(0, len(channel.json_body["devices"])) + def test_get_devices(self): """ Tests that a normal lookup for devices is successfully @@ -409,6 +425,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(number_devices, channel.json_body["total"]) self.assertEqual(number_devices, len(channel.json_body["devices"])) self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"]) # Check that all fields are available -- cgit 1.5.1 From e8dbbcb64c2725933a5bcefeeeaff9efbd0c2261 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 26 Oct 2020 14:51:33 +0000 Subject: Fix get|set_type_stream_id_for_appservice store functions (#8648) --- changelog.d/8648.bugfix | 1 + synapse/handlers/appservice.py | 12 +++--- synapse/storage/databases/main/appservice.py | 29 ++++++++++---- tests/storage/test_appservice.py | 56 ++++++++++++++++++++++++++++ 4 files changed, 85 insertions(+), 13 deletions(-) create mode 100644 changelog.d/8648.bugfix (limited to 'tests') diff --git a/changelog.d/8648.bugfix b/changelog.d/8648.bugfix new file mode 100644 index 0000000000..aa71ad0ff2 --- /dev/null +++ b/changelog.d/8648.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. \ No newline at end of file diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 7826387e53..03e9ec4d4e 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -236,16 +236,16 @@ class ApplicationServicesHandler: events = await self._handle_receipts(service) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "read_receipt", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "read_receipt", new_token + ) elif stream_key == "presence_key": events = await self._handle_presence(service, users) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "presence", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "presence", new_token + ) async def _handle_typing(self, service: ApplicationService, new_token: int): typing_source = self.event_sources.sources["typing"] diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 43bf0f649a..637a938bac 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -369,17 +369,25 @@ class ApplicationServiceTransactionWorkerStore( async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def get_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "SELECT ? FROM application_services_state WHERE as_id=?", - (stream_id_type, service.id,), + # We do NOT want to escape `stream_id_type`. + "SELECT %s FROM application_services_state WHERE as_id=?" + % stream_id_type, + (service.id,), ) - last_txn_id = txn.fetchone() - if last_txn_id is None or last_txn_id[0] is None: # no row exists + last_stream_id = txn.fetchone() + if last_stream_id is None or last_stream_id[0] is None: # no row exists return 0 else: - return int(last_txn_id[0]) + return int(last_stream_id[0]) return await self.db_pool.runInteraction( "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn @@ -388,11 +396,18 @@ class ApplicationServiceTransactionWorkerStore( async def set_type_stream_id_for_appservice( self, service: ApplicationService, type: str, pos: int ) -> None: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def set_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "UPDATE ? SET device_list_stream_id = ? WHERE as_id=?", - (stream_id_type, pos, service.id), + "UPDATE application_services_state SET %s = ? WHERE as_id=?" + % stream_id_type, + (pos, service.id), ) await self.db_pool.runInteraction( diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c5c7987349..1ce29af5fd 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -410,6 +410,62 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): ) +class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.service = Mock(id="foo") + self.store = self.hs.get_datastore() + self.get_success(self.store.set_appservice_state(self.service, "up")) + + def test_get_type_stream_id_for_appservice_no_value(self): + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEquals(value, 0) + + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEquals(value, 0) + + def test_get_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.get_type_stream_id_for_appservice(self.service, "foobar"), + ValueError, + ) + + def test_set_type_stream_id_for_appservice(self): + read_receipt_value = 1024 + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "read_receipt", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEqual(result, read_receipt_value) + + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "presence", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEqual(result, read_receipt_value) + + def test_set_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024), + ValueError, + ) + + # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): def __init__(self, database: DatabasePool, db_conn, hs): -- cgit 1.5.1 From 4ac3a8c5dcec00e8aa7a796b5bf05402133b49f0 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 26 Oct 2020 17:25:48 +0100 Subject: Fix a bug in the joined_rooms admin API (#8643) If the user was not in any rooms then the API returned the same error as if the user did not exist. --- changelog.d/8643.bugfix | 1 + synapse/rest/admin/users.py | 7 ++++--- tests/rest/admin/test_user.py | 16 +++++++++++++++- 3 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8643.bugfix (limited to 'tests') diff --git a/changelog.d/8643.bugfix b/changelog.d/8643.bugfix new file mode 100644 index 0000000000..fcda1ca871 --- /dev/null +++ b/changelog.d/8643.bugfix @@ -0,0 +1 @@ +Fix a bug in the `joined_rooms` admin API if the user has never joined any rooms. The bug was introduced, along with the API, in v1.21.0. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 8efefbc0a0..e71d9b0e1c 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -702,9 +702,10 @@ class UserMembershipRestServlet(RestServlet): if not self.is_mine(UserID.from_string(user_id)): raise SynapseError(400, "Can only lookup local users") - room_ids = await self.store.get_rooms_for_user(user_id) - if not room_ids: - raise NotFoundError("User not found") + user = await self.store.get_user_by_id(user_id) + if user is None: + raise NotFoundError("Unknown user") + room_ids = await self.store.get_rooms_for_user(user_id) ret = {"joined_rooms": list(room_ids), "total": len(room_ids)} return 200, ret diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 98d0623734..d4b7ae21d1 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1016,7 +1016,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, - sync.register_servlets, room.register_servlets, ] @@ -1082,6 +1081,21 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) + def test_no_memberships(self): + """ + Tests that a normal lookup for rooms is successfully + if user has no memberships + """ + # Get rooms + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + self.assertEqual(0, len(channel.json_body["joined_rooms"])) + def test_get_rooms(self): """ Tests that a normal lookup for rooms is successfully -- cgit 1.5.1 From 49d72dea2a6804e4795fb9e1cbc1f1bb1354f08f Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 26 Oct 2020 18:02:28 +0100 Subject: Add an admin api to delete local media. (#8519) Related to: #6459, #3479 Add `DELETE /_synapse/admin/v1/media//` to delete a single file from server. --- changelog.d/8519.feature | 1 + docs/admin_api/media_admin_api.md | 79 +++ synapse/rest/admin/media.py | 81 ++- synapse/rest/media/v1/filepath.py | 17 + synapse/rest/media/v1/media_repository.py | 72 ++- synapse/storage/databases/main/media_repository.py | 53 ++ tests/rest/admin/test_media.py | 568 +++++++++++++++++++++ 7 files changed, 868 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8519.feature create mode 100644 tests/rest/admin/test_media.py (limited to 'tests') diff --git a/changelog.d/8519.feature b/changelog.d/8519.feature new file mode 100644 index 0000000000..e2ab548681 --- /dev/null +++ b/changelog.d/8519.feature @@ -0,0 +1 @@ +Add an admin api to delete a single file or files were not used for a defined time from server. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 26948770d8..3994e1f1a9 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -100,3 +100,82 @@ Response: "num_quarantined": 10 # The number of media items successfully quarantined } ``` + +# Delete local media +This API deletes the *local* media from the disk of your own server. +This includes any local thumbnails and copies of media downloaded from +remote homeservers. +This API will not affect media that has been uploaded to external +media repositories (e.g https://github.com/turt2live/matrix-media-repo/). +See also [purge_remote_media.rst](purge_remote_media.rst). + +## Delete a specific local media +Delete a specific `media_id`. + +Request: + +``` +DELETE /_synapse/admin/v1/media// + +{} +``` + +URL Parameters + +* `server_name`: string - The name of your local server (e.g `matrix.org`) +* `media_id`: string - The ID of the media (e.g `abcdefghijklmnopqrstuvwx`) + +Response: + +```json + { + "deleted_media": [ + "abcdefghijklmnopqrstuvwx" + ], + "total": 1 + } +``` + +The following fields are returned in the JSON response body: + +* `deleted_media`: an array of strings - List of deleted `media_id` +* `total`: integer - Total number of deleted `media_id` + +## Delete local media by date or size + +Request: + +``` +POST /_synapse/admin/v1/media//delete?before_ts= + +{} +``` + +URL Parameters + +* `server_name`: string - The name of your local server (e.g `matrix.org`). +* `before_ts`: string representing a positive integer - Unix timestamp in ms. +Files that were last used before this timestamp will be deleted. It is the timestamp of +last access and not the timestamp creation. +* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes. +Files that are larger will be deleted. Defaults to `0`. +* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files +that are still used in image data (e.g user profile, room avatar). +If `false` these files will be deleted. Defaults to `true`. + +Response: + +```json + { + "deleted_media": [ + "abcdefghijklmnopqrstuvwx", + "abcdefghijklmnopqrstuvwz" + ], + "total": 2 + } +``` + +The following fields are returned in the JSON response body: + +* `deleted_media`: an array of strings - List of deleted `media_id` +* `total`: integer - Total number of deleted `media_id` diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index ee75095c0e..ba50cb876d 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -16,9 +16,10 @@ import logging -from synapse.api.errors import AuthError -from synapse.http.servlet import RestServlet, parse_integer +from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.http.servlet import RestServlet, parse_boolean, parse_integer from synapse.rest.admin._base import ( + admin_patterns, assert_requester_is_admin, assert_user_is_admin, historical_admin_path_patterns, @@ -150,6 +151,80 @@ class PurgeMediaCacheRestServlet(RestServlet): return 200, ret +class DeleteMediaByID(RestServlet): + """Delete local media by a given ID. Removes it from this server. + """ + + PATTERNS = admin_patterns("/media/(?P[^/]+)/(?P[^/]+)") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.server_name = hs.hostname + self.media_repository = hs.get_media_repository() + + async def on_DELETE(self, request, server_name: str, media_id: str): + await assert_requester_is_admin(self.auth, request) + + if self.server_name != server_name: + raise SynapseError(400, "Can only delete local media") + + if await self.store.get_local_media(media_id) is None: + raise NotFoundError("Unknown media") + + logging.info("Deleting local media by ID: %s", media_id) + + deleted_media, total = await self.media_repository.delete_local_media(media_id) + return 200, {"deleted_media": deleted_media, "total": total} + + +class DeleteMediaByDateSize(RestServlet): + """Delete local media and local copies of remote media by + timestamp and size. + """ + + PATTERNS = admin_patterns("/media/(?P[^/]+)/delete") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.server_name = hs.hostname + self.media_repository = hs.get_media_repository() + + async def on_POST(self, request, server_name: str): + await assert_requester_is_admin(self.auth, request) + + before_ts = parse_integer(request, "before_ts", required=True) + size_gt = parse_integer(request, "size_gt", default=0) + keep_profiles = parse_boolean(request, "keep_profiles", default=True) + + if before_ts < 0: + raise SynapseError( + 400, + "Query parameter before_ts must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + if size_gt < 0: + raise SynapseError( + 400, + "Query parameter size_gt must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if self.server_name != server_name: + raise SynapseError(400, "Can only delete local media") + + logging.info( + "Deleting local media by timestamp: %s, size larger than: %s, keep profile media: %s" + % (before_ts, size_gt, keep_profiles) + ) + + deleted_media, total = await self.media_repository.delete_old_local_media( + before_ts, size_gt, keep_profiles + ) + return 200, {"deleted_media": deleted_media, "total": total} + + def register_servlets_for_media_repo(hs, http_server): """ Media repo specific APIs. @@ -159,3 +234,5 @@ def register_servlets_for_media_repo(hs, http_server): QuarantineMediaByID(hs).register(http_server) QuarantineMediaByUser(hs).register(http_server) ListMediaInRoom(hs).register(http_server) + DeleteMediaByID(hs).register(http_server) + DeleteMediaByDateSize(hs).register(http_server) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 7447eeaebe..9e079f672f 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -69,6 +69,23 @@ class MediaFilePaths: local_media_thumbnail = _wrap_in_base_path(local_media_thumbnail_rel) + def local_media_thumbnail_dir(self, media_id: str) -> str: + """ + Retrieve the local store path of thumbnails of a given media_id + + Args: + media_id: The media ID to query. + Returns: + Path of local_thumbnails from media_id + """ + return os.path.join( + self.base_path, + "local_thumbnails", + media_id[0:2], + media_id[2:4], + media_id[4:], + ) + def remote_media_filepath_rel(self, server_name, file_id): return os.path.join( "remote_content", server_name, file_id[0:2], file_id[2:4], file_id[4:] diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index e1192b47cd..5cce7237a0 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -18,7 +18,7 @@ import errno import logging import os import shutil -from typing import IO, Dict, Optional, Tuple +from typing import IO, Dict, List, Optional, Tuple import twisted.internet.error import twisted.web.http @@ -767,6 +767,76 @@ class MediaRepository: return {"deleted": deleted} + async def delete_local_media(self, media_id: str) -> Tuple[List[str], int]: + """ + Delete the given local or remote media ID from this server + + Args: + media_id: The media ID to delete. + Returns: + A tuple of (list of deleted media IDs, total deleted media IDs). + """ + return await self._remove_local_media_from_disk([media_id]) + + async def delete_old_local_media( + self, before_ts: int, size_gt: int = 0, keep_profiles: bool = True, + ) -> Tuple[List[str], int]: + """ + Delete local or remote media from this server by size and timestamp. Removes + media files, any thumbnails and cached URLs. + + Args: + before_ts: Unix timestamp in ms. + Files that were last used before this timestamp will be deleted + size_gt: Size of the media in bytes. Files that are larger will be deleted + keep_profiles: Switch to delete also files that are still used in image data + (e.g user profile, room avatar) + If false these files will be deleted + Returns: + A tuple of (list of deleted media IDs, total deleted media IDs). + """ + old_media = await self.store.get_local_media_before( + before_ts, size_gt, keep_profiles, + ) + return await self._remove_local_media_from_disk(old_media) + + async def _remove_local_media_from_disk( + self, media_ids: List[str] + ) -> Tuple[List[str], int]: + """ + Delete local or remote media from this server. Removes media files, + any thumbnails and cached URLs. + + Args: + media_ids: List of media_id to delete + Returns: + A tuple of (list of deleted media IDs, total deleted media IDs). + """ + removed_media = [] + for media_id in media_ids: + logger.info("Deleting media with ID '%s'", media_id) + full_path = self.filepaths.local_media_filepath(media_id) + try: + os.remove(full_path) + except OSError as e: + logger.warning("Failed to remove file: %r: %s", full_path, e) + if e.errno == errno.ENOENT: + pass + else: + continue + + thumbnail_dir = self.filepaths.local_media_thumbnail_dir(media_id) + shutil.rmtree(thumbnail_dir, ignore_errors=True) + + await self.store.delete_remote_media(self.server_name, media_id) + + await self.store.delete_url_cache((media_id,)) + await self.store.delete_url_cache_media((media_id,)) + + removed_media.append(media_id) + + return removed_media, len(removed_media) + class MediaRepositoryResource(Resource): """File uploading and downloading. diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index cc538c5c10..7ef5f1bf2b 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -93,6 +93,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) + self.server_name = hs.hostname async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: """Get the metadata for a local piece of media @@ -115,6 +116,58 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_local_media", ) + async def get_local_media_before( + self, before_ts: int, size_gt: int, keep_profiles: bool, + ) -> Optional[List[str]]: + + # to find files that have never been accessed (last_access_ts IS NULL) + # compare with `created_ts` + sql = """ + SELECT media_id + FROM local_media_repository AS lmr + WHERE + ( last_access_ts < ? + OR ( created_ts < ? AND last_access_ts IS NULL ) ) + AND media_length > ? + """ + + if keep_profiles: + sql_keep = """ + AND ( + NOT EXISTS + (SELECT 1 + FROM profiles + WHERE profiles.avatar_url = '{media_prefix}' || lmr.media_id) + AND NOT EXISTS + (SELECT 1 + FROM groups + WHERE groups.avatar_url = '{media_prefix}' || lmr.media_id) + AND NOT EXISTS + (SELECT 1 + FROM room_memberships + WHERE room_memberships.avatar_url = '{media_prefix}' || lmr.media_id) + AND NOT EXISTS + (SELECT 1 + FROM user_directory + WHERE user_directory.avatar_url = '{media_prefix}' || lmr.media_id) + AND NOT EXISTS + (SELECT 1 + FROM room_stats_state + WHERE room_stats_state.avatar = '{media_prefix}' || lmr.media_id) + ) + """.format( + media_prefix="mxc://%s/" % (self.server_name,), + ) + sql += sql_keep + + def _get_local_media_before_txn(txn): + txn.execute(sql, (before_ts, before_ts, size_gt)) + return [row[0] for row in txn] + + return await self.db_pool.runInteraction( + "get_local_media_before", _get_local_media_before_txn + ) + async def store_local_media( self, media_id, diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py new file mode 100644 index 0000000000..721fa1ed51 --- /dev/null +++ b/tests/rest/admin/test_media.py @@ -0,0 +1,568 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from binascii import unhexlify + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, profile, room +from synapse.rest.media.v1.filepath import MediaFilePaths + +from tests import unittest + + +class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = hs.get_device_handler() + self.media_repo = hs.get_media_repository_resource() + self.server_name = hs.hostname + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.filepaths = MediaFilePaths(hs.config.media_store_path) + + def test_no_auth(self): + """ + Try to delete media without authentication. + """ + url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345") + + request, channel = self.make_request("DELETE", url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345") + + request, channel = self.make_request( + "DELETE", url, access_token=self.other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_media_does_not_exist(self): + """ + Tests that a lookup for a media that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345") + + request, channel = self.make_request( + "DELETE", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_media_is_not_local(self): + """ + Tests that a lookup for a media that is not a local returns a 400 + """ + url = "/_synapse/admin/v1/media/%s/%s" % ("unknown_domain", "12345") + + request, channel = self.make_request( + "DELETE", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only delete local media", channel.json_body["error"]) + + def test_delete_media(self): + """ + Tests that delete a media is successfully + """ + + download_resource = self.media_repo.children[b"download"] + upload_resource = self.media_repo.children[b"upload"] + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + server_name, media_id = server_and_media_id.split("/") + + self.assertEqual(server_name, self.server_name) + + # Attempt to access media + request, channel = self.make_request( + "GET", + server_and_media_id, + shorthand=False, + access_token=self.admin_user_tok, + ) + request.render(download_resource) + self.pump(1.0) + + # Should be successful + self.assertEqual( + 200, + channel.code, + msg=( + "Expected to receive a 200 on accessing media: %s" % server_and_media_id + ), + ) + + # Test if the file exists + local_path = self.filepaths.local_media_filepath(media_id) + self.assertTrue(os.path.exists(local_path)) + + url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id) + + # Delete media + request, channel = self.make_request( + "DELETE", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + media_id, channel.json_body["deleted_media"][0], + ) + + # Attempt to access media + request, channel = self.make_request( + "GET", + server_and_media_id, + shorthand=False, + access_token=self.admin_user_tok, + ) + request.render(download_resource) + self.pump(1.0) + self.assertEqual( + 404, + channel.code, + msg=( + "Expected to receive a 404 on accessing deleted media: %s" + % server_and_media_id + ), + ) + + # Test if the file is deleted + self.assertFalse(os.path.exists(local_path)) + + +class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + profile.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = hs.get_device_handler() + self.media_repo = hs.get_media_repository_resource() + self.server_name = hs.hostname + self.clock = hs.clock + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.filepaths = MediaFilePaths(hs.config.media_store_path) + self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name + + def test_no_auth(self): + """ + Try to delete media without authentication. + """ + + request, channel = self.make_request("POST", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "POST", self.url, access_token=self.other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_media_is_not_local(self): + """ + Tests that a lookup for media that is not local returns a 400 + """ + url = "/_synapse/admin/v1/media/%s/delete" % "unknown_domain" + + request, channel = self.make_request( + "POST", url + "?before_ts=1234", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only delete local media", channel.json_body["error"]) + + def test_missing_parameter(self): + """ + If the parameter `before_ts` is missing, an error is returned. + """ + request, channel = self.make_request( + "POST", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "Missing integer query parameter b'before_ts'", channel.json_body["error"] + ) + + def test_invalid_parameter(self): + """ + If parameters are invalid, an error is returned. + """ + request, channel = self.make_request( + "POST", self.url + "?before_ts=-1234", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "Query parameter before_ts must be a string representing a positive integer.", + channel.json_body["error"], + ) + + request, channel = self.make_request( + "POST", + self.url + "?before_ts=1234&size_gt=-1234", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "Query parameter size_gt must be a string representing a positive integer.", + channel.json_body["error"], + ) + + request, channel = self.make_request( + "POST", + self.url + "?before_ts=1234&keep_profiles=not_bool", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + self.assertEqual( + "Boolean query parameter b'keep_profiles' must be one of ['true', 'false']", + channel.json_body["error"], + ) + + def test_delete_media_never_accessed(self): + """ + Tests that media deleted if it is older than `before_ts` and never accessed + `last_access_ts` is `NULL` and `created_ts` < `before_ts` + """ + + # upload and do not access + server_and_media_id = self._create_media() + self.pump(1.0) + + # test that the file exists + media_id = server_and_media_id.split("/")[1] + local_path = self.filepaths.local_media_filepath(media_id) + self.assertTrue(os.path.exists(local_path)) + + # timestamp after upload/create + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + media_id, channel.json_body["deleted_media"][0], + ) + + self._access_media(server_and_media_id, False) + + def test_keep_media_by_date(self): + """ + Tests that media is not deleted if it is newer than `before_ts` + """ + + # timestamp before upload + now_ms = self.clock.time_msec() + server_and_media_id = self._create_media() + + self._access_media(server_and_media_id) + + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + + self._access_media(server_and_media_id) + + # timestamp after upload + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + server_and_media_id.split("/")[1], channel.json_body["deleted_media"][0], + ) + + self._access_media(server_and_media_id, False) + + def test_keep_media_by_size(self): + """ + Tests that media is not deleted if its size is smaller than or equal + to `size_gt` + """ + server_and_media_id = self._create_media() + + self._access_media(server_and_media_id) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&size_gt=67", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + + self._access_media(server_and_media_id) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&size_gt=66", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + server_and_media_id.split("/")[1], channel.json_body["deleted_media"][0], + ) + + self._access_media(server_and_media_id, False) + + def test_keep_media_by_user_avatar(self): + """ + Tests that we do not delete media if is used as a user avatar + Tests parameter `keep_profiles` + """ + server_and_media_id = self._create_media() + + self._access_media(server_and_media_id) + + # set media as avatar + request, channel = self.make_request( + "PUT", + "/profile/%s/avatar_url" % (self.admin_user,), + content=json.dumps({"avatar_url": "mxc://%s" % (server_and_media_id,)}), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + + self._access_media(server_and_media_id) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + server_and_media_id.split("/")[1], channel.json_body["deleted_media"][0], + ) + + self._access_media(server_and_media_id, False) + + def test_keep_media_by_room_avatar(self): + """ + Tests that we do not delete media if it is used as a room avatar + Tests parameter `keep_profiles` + """ + server_and_media_id = self._create_media() + + self._access_media(server_and_media_id) + + # set media as room avatar + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + request, channel = self.make_request( + "PUT", + "/rooms/%s/state/m.room.avatar" % (room_id,), + content=json.dumps({"url": "mxc://%s" % (server_and_media_id,)}), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + + self._access_media(server_and_media_id) + + now_ms = self.clock.time_msec() + request, channel = self.make_request( + "POST", + self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual( + server_and_media_id.split("/")[1], channel.json_body["deleted_media"][0], + ) + + self._access_media(server_and_media_id, False) + + def _create_media(self): + """ + Create a media and return media_id and server_and_media_id + """ + upload_resource = self.media_repo.children[b"upload"] + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + server_name = server_and_media_id.split("/")[0] + + # Check that new media is a local and not remote + self.assertEqual(server_name, self.server_name) + + return server_and_media_id + + def _access_media(self, server_and_media_id, expect_success=True): + """ + Try to access a media and check the result + """ + download_resource = self.media_repo.children[b"download"] + + media_id = server_and_media_id.split("/")[1] + local_path = self.filepaths.local_media_filepath(media_id) + + request, channel = self.make_request( + "GET", + server_and_media_id, + shorthand=False, + access_token=self.admin_user_tok, + ) + request.render(download_resource) + self.pump(1.0) + + if expect_success: + self.assertEqual( + 200, + channel.code, + msg=( + "Expected to receive a 200 on accessing media: %s" + % server_and_media_id + ), + ) + # Test that the file exists + self.assertTrue(os.path.exists(local_path)) + else: + self.assertEqual( + 404, + channel.code, + msg=( + "Expected to receive a 404 on accessing deleted media: %s" + % (server_and_media_id) + ), + ) + # Test that the file is deleted + self.assertFalse(os.path.exists(local_path)) -- cgit 1.5.1 From 66e6801c3e6e8a0e8c6370079e72a4230e08b3eb Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 26 Oct 2020 19:16:37 +0100 Subject: Split admin API for reported events into a detail and a list view (#8539) Split admin API for reported events in detail und list view. API was introduced with #8217 in synapse v.1.21.0. It makes the list (`GET /_synapse/admin/v1/event_reports`) less complex and provides a better overview. The details can be queried with: `GET /_synapse/admin/v1/event_reports/`. It is similar to room and users API. It is a kind of regression in `GET /_synapse/admin/v1/event_reports`. `event_json` was removed. But the api was introduced one version before and it is an admin API (not under spec). Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/8539.feature | 1 + docs/admin_api/event_reports.rst | 144 +++++++++++++++--------- synapse/rest/admin/__init__.py | 6 +- synapse/rest/admin/event_reports.py | 46 +++++++- synapse/storage/databases/main/room.py | 104 ++++++++++++++--- tests/rest/admin/test_event_reports.py | 196 ++++++++++++++++++++++++++++++--- 6 files changed, 411 insertions(+), 86 deletions(-) create mode 100644 changelog.d/8539.feature (limited to 'tests') diff --git a/changelog.d/8539.feature b/changelog.d/8539.feature new file mode 100644 index 0000000000..15ce02fb86 --- /dev/null +++ b/changelog.d/8539.feature @@ -0,0 +1 @@ +Split admin API for reported events (`GET /_synapse/admin/v1/event_reports`) into detail and list endpoints. This is a breaking change to #8217 which was introduced in Synapse v1.21.0. Those who already use this API should check their scripts. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/event_reports.rst b/docs/admin_api/event_reports.rst index 461be01230..5f7b0fa6bb 100644 --- a/docs/admin_api/event_reports.rst +++ b/docs/admin_api/event_reports.rst @@ -17,67 +17,26 @@ It returns a JSON body like the following: { "event_reports": [ { - "content": { - "reason": "foo", - "score": -100 - }, "event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY", - "event_json": { - "auth_events": [ - "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M", - "$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws" - ], - "content": { - "body": "matrix.org: This Week in Matrix", - "format": "org.matrix.custom.html", - "formatted_body": "matrix.org:
This Week in Matrix", - "msgtype": "m.notice" - }, - "depth": 546, - "hashes": { - "sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw" - }, - "origin": "matrix.org", - "origin_server_ts": 1592291711430, - "prev_events": [ - "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M" - ], - "prev_state": [], - "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", - "sender": "@foobar:matrix.org", - "signatures": { - "matrix.org": { - "ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg" - } - }, - "type": "m.room.message", - "unsigned": { - "age_ts": 1592291711430, - } - }, "id": 2, "reason": "foo", + "score": -100, "received_ts": 1570897107409, - "room_alias": "#alias1:matrix.org", + "canonical_alias": "#alias1:matrix.org", "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "name": "Matrix HQ", "sender": "@foobar:matrix.org", "user_id": "@foo:matrix.org" }, { - "content": { - "reason": "bar", - "score": -100 - }, "event_id": "$3IcdZsDaN_En-S1DF4EMCy3v4gNRKeOJs8W5qTOKj4I", - "event_json": { - // hidden items - // see above - }, "id": 3, "reason": "bar", + "score": -100, "received_ts": 1598889612059, - "room_alias": "#alias2:matrix.org", + "canonical_alias": "#alias2:matrix.org", "room_id": "!eGvUQuTCkHGVwNMOjv:matrix.org", + "name": "Your room name here", "sender": "@foobar:matrix.org", "user_id": "@bar:matrix.org" } @@ -113,17 +72,94 @@ The following fields are returned in the JSON response body: - ``id``: integer - ID of event report. - ``received_ts``: integer - The timestamp (in milliseconds since the unix epoch) when this report was sent. - ``room_id``: string - The ID of the room in which the event being reported is located. +- ``name``: string - The name of the room. - ``event_id``: string - The ID of the reported event. - ``user_id``: string - This is the user who reported the event and wrote the reason. - ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. -- ``content``: object - Content of reported event. - - - ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. - - ``score``: integer - Content is reported based upon a negative score, where -100 is "most offensive" and 0 is "inoffensive". - +- ``score``: integer - Content is reported based upon a negative score, where -100 is "most offensive" and 0 is "inoffensive". - ``sender``: string - This is the ID of the user who sent the original message/event that was reported. -- ``room_alias``: string - The alias of the room. ``null`` if the room does not have a canonical alias set. -- ``event_json``: object - Details of the original event that was reported. +- ``canonical_alias``: string - The canonical alias of the room. ``null`` if the room does not have a canonical alias set. - ``next_token``: integer - Indication for pagination. See above. - ``total``: integer - Total number of event reports related to the query (``user_id`` and ``room_id``). +Show details of a specific event report +======================================= + +This API returns information about a specific event report. + +The api is:: + + GET /_synapse/admin/v1/event_reports/ + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +It returns a JSON body like the following: + +.. code:: jsonc + + { + "event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY", + "event_json": { + "auth_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M", + "$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws" + ], + "content": { + "body": "matrix.org: This Week in Matrix", + "format": "org.matrix.custom.html", + "formatted_body": "matrix.org:
This Week in Matrix", + "msgtype": "m.notice" + }, + "depth": 546, + "hashes": { + "sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw" + }, + "origin": "matrix.org", + "origin_server_ts": 1592291711430, + "prev_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M" + ], + "prev_state": [], + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "sender": "@foobar:matrix.org", + "signatures": { + "matrix.org": { + "ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg" + } + }, + "type": "m.room.message", + "unsigned": { + "age_ts": 1592291711430, + } + }, + "id": , + "reason": "foo", + "score": -100, + "received_ts": 1570897107409, + "canonical_alias": "#alias1:matrix.org", + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "name": "Matrix HQ", + "sender": "@foobar:matrix.org", + "user_id": "@foo:matrix.org" + } + +**URL parameters:** + +- ``report_id``: string - The ID of the event report. + +**Response** + +The following fields are returned in the JSON response body: + +- ``id``: integer - ID of event report. +- ``received_ts``: integer - The timestamp (in milliseconds since the unix epoch) when this report was sent. +- ``room_id``: string - The ID of the room in which the event being reported is located. +- ``name``: string - The name of the room. +- ``event_id``: string - The ID of the reported event. +- ``user_id``: string - This is the user who reported the event and wrote the reason. +- ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. +- ``score``: integer - Content is reported based upon a negative score, where -100 is "most offensive" and 0 is "inoffensive". +- ``sender``: string - This is the ID of the user who sent the original message/event that was reported. +- ``canonical_alias``: string - The canonical alias of the room. ``null`` if the room does not have a canonical alias set. +- ``event_json``: object - Details of the original event that was reported. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 789431ef25..df14bdf26e 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -31,7 +31,10 @@ from synapse.rest.admin.devices import ( DeviceRestServlet, DevicesRestServlet, ) -from synapse.rest.admin.event_reports import EventReportsRestServlet +from synapse.rest.admin.event_reports import ( + EventReportDetailRestServlet, + EventReportsRestServlet, +) from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet @@ -222,6 +225,7 @@ def register_servlets(hs, http_server): DevicesRestServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server) EventReportsRestServlet(hs).register(http_server) + EventReportDetailRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index 5b8d0594cd..fd482f0e32 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -15,7 +15,7 @@ import logging -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin @@ -86,3 +86,47 @@ class EventReportsRestServlet(RestServlet): ret["next_token"] = start + len(event_reports) return 200, ret + + +class EventReportDetailRestServlet(RestServlet): + """ + Get a specific reported event that is known to the homeserver. Results are returned + in a dictionary containing report information. + The requester must have administrator access in Synapse. + + GET /_synapse/admin/v1/event_reports/ + returns: + 200 OK with details report if success otherwise an error. + + Args: + The parameter `report_id` is the ID of the event report in the database. + Returns: + JSON blob of information about the event report + """ + + PATTERNS = admin_patterns("/event_reports/(?P[^/]*)$") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request, report_id): + await assert_requester_is_admin(self.auth, request) + + message = ( + "The report_id parameter must be a string representing a positive integer." + ) + try: + report_id = int(report_id) + except ValueError: + raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) + + if report_id < 0: + raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) + + ret = await self.store.get_event_report(report_id) + if not ret: + raise NotFoundError("Event report not found") + + return 200, ret diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index e83d961c20..dc0c4b5499 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1411,6 +1411,65 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): desc="add_event_report", ) + async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: + """Retrieve an event report + + Args: + report_id: ID of reported event in database + Returns: + event_report: json list of information from event report + """ + + def _get_event_report_txn(txn, report_id): + + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.content, + events.sender, + room_stats_state.canonical_alias, + room_stats_state.name, + event_json.json AS event_json + FROM event_reports AS er + LEFT JOIN events + ON events.event_id = er.event_id + JOIN event_json + ON event_json.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id + WHERE er.id = ? + """ + + txn.execute(sql, [report_id]) + row = txn.fetchone() + + if not row: + return None + + event_report = { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": db_to_json(row[5]).get("score"), + "reason": db_to_json(row[5]).get("reason"), + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + "event_json": db_to_json(row[9]), + } + + return event_report + + return await self.db_pool.runInteraction( + "get_event_report", _get_event_report_txn, report_id + ) + async def get_event_reports_paginate( self, start: int, @@ -1468,18 +1527,15 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): er.room_id, er.event_id, er.user_id, - er.reason, er.content, events.sender, - room_aliases.room_alias, - event_json.json AS event_json + room_stats_state.canonical_alias, + room_stats_state.name FROM event_reports AS er - LEFT JOIN room_aliases - ON room_aliases.room_id = er.room_id - JOIN events + LEFT JOIN events ON events.event_id = er.event_id - JOIN event_json - ON event_json.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id {where_clause} ORDER BY er.received_ts {order} LIMIT ? @@ -1490,15 +1546,29 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): args += [limit, start] txn.execute(sql, args) - event_reports = self.db_pool.cursor_to_dict(txn) - - if count > 0: - for row in event_reports: - try: - row["content"] = db_to_json(row["content"]) - row["event_json"] = db_to_json(row["event_json"]) - except Exception: - continue + + event_reports = [] + for row in txn: + try: + s = db_to_json(row[5]).get("score") + r = db_to_json(row[5]).get("reason") + except Exception: + logger.error("Unable to parse json from event_reports: %s", row[0]) + continue + event_reports.append( + { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": s, + "reason": r, + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + } + ) return event_reports, count diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index bf79086f78..303622217f 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -70,6 +70,16 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.url = "/_synapse/admin/v1/event_reports" + def test_no_auth(self): + """ + Try to get an event report without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + def test_requester_is_no_admin(self): """ If the user is not a server admin, an error 403 is returned. @@ -266,7 +276,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): def test_limit_is_negative(self): """ - Testing that a negative list parameter returns a 400 + Testing that a negative limit parameter returns a 400 """ request, channel = self.make_request( @@ -360,7 +370,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) def _check_fields(self, content): - """Checks that all attributes are present in a event report + """Checks that all attributes are present in an event report """ for c in content: self.assertIn("id", c) @@ -368,15 +378,175 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.assertIn("room_id", c) self.assertIn("event_id", c) self.assertIn("user_id", c) - self.assertIn("reason", c) - self.assertIn("content", c) self.assertIn("sender", c) - self.assertIn("room_alias", c) - self.assertIn("event_json", c) - self.assertIn("score", c["content"]) - self.assertIn("reason", c["content"]) - self.assertIn("auth_events", c["event_json"]) - self.assertIn("type", c["event_json"]) - self.assertIn("room_id", c["event_json"]) - self.assertIn("sender", c["event_json"]) - self.assertIn("content", c["event_json"]) + self.assertIn("canonical_alias", c) + self.assertIn("name", c) + self.assertIn("score", c) + self.assertIn("reason", c) + + +class EventReportDetailTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + report_event.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id1 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id1, user=self.admin_user, tok=self.admin_user_tok) + + self._create_event_and_report( + room_id=self.room_id1, user_tok=self.other_user_tok, + ) + + # first created event report gets `id`=2 + self.url = "/_synapse/admin/v1/event_reports/2" + + def test_no_auth(self): + """ + Try to get event report without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error 403 is returned. + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.other_user_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_default_success(self): + """ + Testing get a reported event + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self._check_fields(channel.json_body) + + def test_invalid_report_id(self): + """ + Testing that an invalid `report_id` returns a 400. + """ + + # `report_id` is negative + request, channel = self.make_request( + "GET", + "/_synapse/admin/v1/event_reports/-123", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is a non-numerical string + request, channel = self.make_request( + "GET", + "/_synapse/admin/v1/event_reports/abcdef", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is undefined + request, channel = self.make_request( + "GET", + "/_synapse/admin/v1/event_reports/", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + def test_report_id_not_found(self): + """ + Testing that a not existing `report_id` returns a 404. + """ + + request, channel = self.make_request( + "GET", + "/_synapse/admin/v1/event_reports/123", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + self.assertEqual("Event report not found", channel.json_body["error"]) + + def _create_event_and_report(self, room_id, user_tok): + """Create and report events + """ + resp = self.helper.send(room_id, tok=user_tok) + event_id = resp["event_id"] + + request, channel = self.make_request( + "POST", + "rooms/%s/report/%s" % (room_id, event_id), + json.dumps({"score": -100, "reason": "this makes me sad"}), + access_token=user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + def _check_fields(self, content): + """Checks that all attributes are present in a event report + """ + self.assertIn("id", content) + self.assertIn("received_ts", content) + self.assertIn("room_id", content) + self.assertIn("event_id", content) + self.assertIn("user_id", content) + self.assertIn("sender", content) + self.assertIn("canonical_alias", content) + self.assertIn("name", content) + self.assertIn("event_json", content) + self.assertIn("score", content) + self.assertIn("reason", content) + self.assertIn("auth_events", content["event_json"]) + self.assertIn("type", content["event_json"]) + self.assertIn("room_id", content["event_json"]) + self.assertIn("sender", content["event_json"]) + self.assertIn("content", content["event_json"]) -- cgit 1.5.1 From 9b7c28283abf62378cecbde3523b5709448e4140 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 27 Oct 2020 15:12:31 +0100 Subject: Add admin API to list users' local media (#8647) Add admin API `GET /_synapse/admin/v1/users//media` to get information of users' uploaded files. --- changelog.d/8647.feature | 1 + docs/admin_api/user_admin_api.rst | 83 ++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 67 ++++- .../storage/databases/main/events_bg_updates.py | 7 + synapse/storage/databases/main/media_repository.py | 51 ++++ .../schema/delta/58/22users_have_local_media.sql | 2 + tests/rest/admin/test_user.py | 282 +++++++++++++++++++++ 8 files changed, 494 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8647.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql (limited to 'tests') diff --git a/changelog.d/8647.feature b/changelog.d/8647.feature new file mode 100644 index 0000000000..79e98f6e90 --- /dev/null +++ b/changelog.d/8647.feature @@ -0,0 +1 @@ +Add an admin API `GET /_synapse/admin/v1/users//media` to get information about uploaded media. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 0f3d99c826..636fc284e4 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -341,6 +341,89 @@ The following fields are returned in the JSON response body: - ``total`` - Number of rooms. +List media of an user +================================ +Gets a list of all local media that a specific ``user_id`` has created. +The response is ordered by creation date descending and media ID descending. +The newest media is on top. + +The API is:: + + GET /_synapse/admin/v1/users//media + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "media": [ + { + "created_ts": 100400, + "last_access_ts": null, + "media_id": "qXhyRzulkwLsNHTbpHreuEgo", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test1.png" + }, + { + "created_ts": 200400, + "last_access_ts": null, + "media_id": "FHfiSnzoINDatrXHQIXBtahw", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test2.png" + } + ], + "next_token": 3, + "total": 2 + } + +To paginate, check for ``next_token`` and if present, call the endpoint again +with ``from`` set to the value of ``next_token``. This will return a new page. + +If the endpoint does not return a ``next_token`` then there are no more +reports to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - string - fully qualified: for example, ``@user:server.com``. +- ``limit``: string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to ``100``. +- ``from``: string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of ``next_token`` from a previous call. + Defaults to ``0``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``media`` - An array of objects, each containing information about a media. + Media objects contain the following fields: + + - ``created_ts`` - integer - Timestamp when the content was uploaded in ms. + - ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms. + - ``media_id`` - string - The id used to refer to the media. + - ``media_length`` - integer - Length of the media in bytes. + - ``media_type`` - string - The MIME-type of the media. + - ``quarantined_by`` - string - The user ID that initiated the quarantine request + for this media. + + - ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining. + - ``upload_name`` - string - The name the media was uploaded with. + +- ``next_token``: integer - Indication for pagination. See above. +- ``total`` - integer - Total number of media. + User devices ============ diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index df14bdf26e..a79996cae1 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -53,6 +53,7 @@ from synapse.rest.admin.users import ( ResetPasswordRestServlet, SearchUsersRestServlet, UserAdminServlet, + UserMediaRestServlet, UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, @@ -218,6 +219,7 @@ def register_servlets(hs, http_server): SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) UserAdminServlet(hs).register(http_server) + UserMediaRestServlet(hs).register(http_server) UserMembershipRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index e71d9b0e1c..933bb45346 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -16,6 +16,7 @@ import hashlib import hmac import logging from http import HTTPStatus +from typing import Tuple from synapse.api.constants import UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError @@ -27,13 +28,14 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.http.site import SynapseRequest from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, historical_admin_path_patterns, ) -from synapse.types import UserID +from synapse.types import JsonDict, UserID logger = logging.getLogger(__name__) @@ -709,3 +711,66 @@ class UserMembershipRestServlet(RestServlet): room_ids = await self.store.get_rooms_for_user(user_id) ret = {"joined_rooms": list(room_ids), "total": len(room_ids)} return 200, ret + + +class UserMediaRestServlet(RestServlet): + """ + Gets information about all uploaded local media for a specific `user_id`. + + Example: + http://localhost:8008/_synapse/admin/v1/users/ + @user:server/media + + Args: + The parameters `from` and `limit` are required for pagination. + By default, a `limit` of 100 is used. + Returns: + A list of media and an integer representing the total number of + media that exist given for this user + """ + + PATTERNS = admin_patterns("/users/(?P[^/]+)/media$") + + def __init__(self, hs): + self.is_mine = hs.is_mine + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "Can only lookup local users") + + user = await self.store.get_user_by_id(user_id) + if user is None: + raise NotFoundError("Unknown user") + + start = parse_integer(request, "from", default=0) + limit = parse_integer(request, "limit", default=100) + + if start < 0: + raise SynapseError( + 400, + "Query parameter from must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if limit < 0: + raise SynapseError( + 400, + "Query parameter limit must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + media, total = await self.store.get_local_media_by_user_paginate( + start, limit, user_id + ) + + ret = {"media": media, "total": total} + if (start + limit) < total: + ret["next_token"] = start + len(media) + + return 200, ret diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 5e4af2eb51..97b6754846 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -92,6 +92,13 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): where_clause="NOT have_censored", ) + self.db_pool.updates.register_background_index_update( + "users_have_local_media", + index_name="users_have_local_media", + table="local_media_repository", + columns=["user_id", "created_ts"], + ) + async def _background_reindex_fields_sender(self, progress, batch_size): target_min_stream_id = progress["target_min_stream_id_inclusive"] max_stream_id = progress["max_stream_id_exclusive"] diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 7ef5f1bf2b..daf57675d8 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -116,6 +116,57 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_local_media", ) + async def get_local_media_by_user_paginate( + self, start: int, limit: int, user_id: str + ) -> Tuple[List[Dict[str, Any]], int]: + """Get a paginated list of metadata for a local piece of media + which an user_id has uploaded + + Args: + start: offset in the list + limit: maximum amount of media_ids to retrieve + user_id: fully-qualified user id + Returns: + A paginated list of all metadata of user's media, + plus the total count of all the user's media + """ + + def get_local_media_by_user_paginate_txn(txn): + + args = [user_id] + sql = """ + SELECT COUNT(*) as total_media + FROM local_media_repository + WHERE user_id = ? + """ + txn.execute(sql, args) + count = txn.fetchone()[0] + + sql = """ + SELECT + "media_id", + "media_type", + "media_length", + "upload_name", + "created_ts", + "last_access_ts", + "quarantined_by", + "safe_from_quarantine" + FROM local_media_repository + WHERE user_id = ? + ORDER BY created_ts DESC, media_id DESC + LIMIT ? OFFSET ? + """ + + args += [limit, start] + txn.execute(sql, args) + media = self.db_pool.cursor_to_dict(txn) + return media, count + + return await self.db_pool.runInteraction( + "get_local_media_by_user_paginate_txn", get_local_media_by_user_paginate_txn + ) + async def get_local_media_before( self, before_ts: int, size_gt: int, keep_profiles: bool, ) -> Optional[List[str]]: diff --git a/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql b/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql new file mode 100644 index 0000000000..a2842687f1 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql @@ -0,0 +1,2 @@ +INSERT INTO background_updates (update_name, progress_json) VALUES + ('users_have_local_media', '{}'); \ No newline at end of file diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index d4b7ae21d1..e815b92329 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -17,6 +17,7 @@ import hashlib import hmac import json import urllib.parse +from binascii import unhexlify from mock import Mock @@ -1115,3 +1116,284 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_rooms, channel.json_body["total"]) self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) + + +class UserMediaRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + self.media_repo = hs.get_media_repository_resource() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = "/_synapse/admin/v1/users/%s/media" % urllib.parse.quote( + self.other_user + ) + + def test_no_auth(self): + """ + Try to list media of an user without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "GET", self.url, access_token=other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/media" + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media" + + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + def test_limit(self): + """ + Testing list of media with limit + """ + + number_media = 20 + other_user_tok = self.login("user", "pass") + self._create_media(other_user_tok, number_media) + + request, channel = self.make_request( + "GET", self.url + "?limit=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), 5) + self.assertEqual(channel.json_body["next_token"], 5) + self._check_fields(channel.json_body["media"]) + + def test_from(self): + """ + Testing list of media with a defined starting point (from) + """ + + number_media = 20 + other_user_tok = self.login("user", "pass") + self._create_media(other_user_tok, number_media) + + request, channel = self.make_request( + "GET", self.url + "?from=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), 15) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["media"]) + + def test_limit_and_from(self): + """ + Testing list of media with a defined starting point and limit + """ + + number_media = 20 + other_user_tok = self.login("user", "pass") + self._create_media(other_user_tok, number_media) + + request, channel = self.make_request( + "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(channel.json_body["next_token"], 15) + self.assertEqual(len(channel.json_body["media"]), 10) + self._check_fields(channel.json_body["media"]) + + def test_limit_is_negative(self): + """ + Testing that a negative limit parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_from_is_negative(self): + """ + Testing that a negative from parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?from=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_next_token(self): + """ + Testing that `next_token` appears at the right place + """ + + number_media = 20 + other_user_tok = self.login("user", "pass") + self._create_media(other_user_tok, number_media) + + # `next_token` does not appear + # Number of results is the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=20", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), number_media) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does not appear + # Number of max results is larger than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=21", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), number_media) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does appear + # Number of max results is smaller than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), 19) + self.assertEqual(channel.json_body["next_token"], 19) + + # Check + # Set `from` to value of `next_token` for request remaining entries + # `next_token` does not appear + request, channel = self.make_request( + "GET", self.url + "?from=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_media) + self.assertEqual(len(channel.json_body["media"]), 1) + self.assertNotIn("next_token", channel.json_body) + + def test_user_has_no_media(self): + """ + Tests that a normal lookup for media is successfully + if user has no media created + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + self.assertEqual(0, len(channel.json_body["media"])) + + def test_get_media(self): + """ + Tests that a normal lookup for media is successfully + """ + + number_media = 5 + other_user_tok = self.login("user", "pass") + self._create_media(other_user_tok, number_media) + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(number_media, channel.json_body["total"]) + self.assertEqual(number_media, len(channel.json_body["media"])) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["media"]) + + def _create_media(self, user_token, number_media): + """ + Create a number of media for a specific user + """ + upload_resource = self.media_repo.children[b"upload"] + for i in range(number_media): + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + self.helper.upload_media( + upload_resource, image_data, tok=user_token, expect_code=200 + ) + + def _check_fields(self, content): + """Checks that all attributes are present in content + """ + for m in content: + self.assertIn("media_id", m) + self.assertIn("media_type", m) + self.assertIn("media_length", m) + self.assertIn("upload_name", m) + self.assertIn("created_ts", m) + self.assertIn("last_access_ts", m) + self.assertIn("quarantined_by", m) + self.assertIn("safe_from_quarantine", m) -- cgit 1.5.1 From aff1eb7c671b0a3813407321d2702ec46c71fa56 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Tue, 27 Oct 2020 23:26:36 +0000 Subject: Tell Black to format code for Python 3.5 (#8664) This allows trailing commas in multi-line arg lists. Minor, but we might as well keep our formatting current with regard to our minimum supported Python version. Signed-off-by: Dan Callahan --- changelog.d/8664.misc | 1 + pyproject.toml | 2 +- synapse/http/client.py | 2 +- synapse/storage/database.py | 4 ++-- synapse/util/retryutils.py | 2 +- tests/replication/_base.py | 2 +- tests/replication/tcp/streams/test_events.py | 2 +- tests/server.py | 4 ++-- tests/storage/test_client_ips.py | 2 +- tests/test_utils/event_injection.py | 2 +- 10 files changed, 12 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8664.misc (limited to 'tests') diff --git a/changelog.d/8664.misc b/changelog.d/8664.misc new file mode 100644 index 0000000000..278cf53adc --- /dev/null +++ b/changelog.d/8664.misc @@ -0,0 +1 @@ +Tell Black to format code for Python 3.5. diff --git a/pyproject.toml b/pyproject.toml index db4a2e41e4..cd880d4e39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ showcontent = true [tool.black] -target-version = ['py34'] +target-version = ['py35'] exclude = ''' ( diff --git a/synapse/http/client.py b/synapse/http/client.py index 8324632cb6..f409368802 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -359,7 +359,7 @@ class SimpleHttpClient: agent=self.agent, data=body_producer, headers=headers, - **self._extra_treq_args + **self._extra_treq_args, ) # type: defer.Deferred # we use our own timeout mechanism rather than treq's as a workaround diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0217e63108..a0572b2952 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -94,7 +94,7 @@ def make_pool( cp_openfun=lambda conn: engine.on_new_connection( LoggingDatabaseConnection(conn, engine, "on_new_connection") ), - **db_config.config.get("args", {}) + **db_config.config.get("args", {}), ) @@ -632,7 +632,7 @@ class DatabasePool: func, *args, db_autocommit=db_autocommit, - **kwargs + **kwargs, ) for after_callback, after_args, after_kwargs in after_callbacks: diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index a5cc9d0551..4ab379e429 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -110,7 +110,7 @@ async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **k failure_ts, retry_interval, backoff_on_failure=backoff_on_failure, - **kwargs + **kwargs, ) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 093e2faac7..f1e53f33cd 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -269,7 +269,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): homeserver_to_use=GenericWorkerServer, config=config, reactor=self.reactor, - **kwargs + **kwargs, ) # If the instance is in the `instance_map` config then workers may try diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index c9998e88e6..bad0df08cf 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -449,7 +449,7 @@ class EventsStreamTestCase(BaseStreamTestCase): sender=sender, type="test_event", content={"body": body}, - **kwargs + **kwargs, ) ) diff --git a/tests/server.py b/tests/server.py index 4d33b84097..ea9c22bc51 100644 --- a/tests/server.py +++ b/tests/server.py @@ -380,7 +380,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): pool._runWithConnection, func, *args, - **kwargs + **kwargs, ) def runInteraction(interaction, *args, **kwargs): @@ -390,7 +390,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): pool._runInteraction, interaction, *args, - **kwargs + **kwargs, ) pool.runWithConnection = runWithConnection diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 755c70db31..e96ca1c8ca 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -412,7 +412,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/r0/admin/users/" + self.user_id, access_token=access_token, - **make_request_args + **make_request_args, ) request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza") diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index e93aa84405..c3c4a93e1f 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -50,7 +50,7 @@ async def inject_member_event( sender=sender, state_key=target, content=content, - **kwargs + **kwargs, ) -- cgit 1.5.1 From 22398132785b9f87930d06fbff28e572166beec9 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 28 Oct 2020 16:02:42 +0100 Subject: Add an admin APIs to allow server admins to list users' pushers (#8610) Add an admin API `GET /_synapse/admin/v1/users//pushers` like https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers --- changelog.d/8610.feature | 1 + docs/admin_api/user_admin_api.rst | 79 ++++++++++++++++++++++++ synapse/rest/admin/__init__.py | 4 +- synapse/rest/admin/users.py | 52 ++++++++++++++++ tests/rest/admin/test_user.py | 124 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8610.feature (limited to 'tests') diff --git a/changelog.d/8610.feature b/changelog.d/8610.feature new file mode 100644 index 0000000000..ed8d926964 --- /dev/null +++ b/changelog.d/8610.feature @@ -0,0 +1 @@ +Add an admin APIs to allow server admins to list users' pushers. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 636fc284e4..d4051d0257 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -611,3 +611,82 @@ The following parameters should be set in the URL: - ``user_id`` - fully qualified: for example, ``@user:server.com``. - ``device_id`` - The device to delete. + +List all pushers +================ +Gets information about all pushers for a specific ``user_id``. + +The API is:: + + GET /_synapse/admin/v1/users//pushers + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "pushers": [ + { + "app_display_name":"HTTP Push Notifications", + "app_id":"m.http", + "data": { + "url":"example.com" + }, + "device_display_name":"pushy push", + "kind":"http", + "lang":"None", + "profile_tag":"", + "pushkey":"a@example.com" + } + ], + "total": 1 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - fully qualified: for example, ``@user:server.com``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``pushers`` - An array containing the current pushers for the user + + - ``app_display_name`` - string - A string that will allow the user to identify + what application owns this pusher. + + - ``app_id`` - string - This is a reverse-DNS style identifier for the application. + Max length, 64 chars. + + - ``data`` - A dictionary of information for the pusher implementation itself. + + - ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send + notifications to. + + - ``format`` - string - The format to use when sending notifications to the + Push Gateway. + + - ``device_display_name`` - string - A string that will allow the user to identify + what device owns this pusher. + + - ``profile_tag`` - string - This string determines which set of device specific rules + this pusher executes. + + - ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes. + - ``lang`` - string - The preferred language for receiving notifications + (e.g. 'en' or 'en-US') + + - ``profile_tag`` - string - This string determines which set of device specific rules + this pusher executes. + + - ``pushkey`` - string - This is a unique identifier for this pusher. + Max length, 512 bytes. + +- ``total`` - integer - Number of pushers. + +See also `Client-Server API Spec `_ diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index a79996cae1..fa7e9e4043 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -50,6 +50,7 @@ from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.users import ( AccountValidityRenewServlet, DeactivateAccountRestServlet, + PushersRestServlet, ResetPasswordRestServlet, SearchUsersRestServlet, UserAdminServlet, @@ -226,8 +227,9 @@ def register_servlets(hs, http_server): DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server) - EventReportsRestServlet(hs).register(http_server) EventReportDetailRestServlet(hs).register(http_server) + EventReportsRestServlet(hs).register(http_server) + PushersRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 933bb45346..b337311a37 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -39,6 +39,17 @@ from synapse.types import JsonDict, UserID logger = logging.getLogger(__name__) +_GET_PUSHERS_ALLOWED_KEYS = { + "app_display_name", + "app_id", + "data", + "device_display_name", + "kind", + "lang", + "profile_tag", + "pushkey", +} + class UsersRestServlet(RestServlet): PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$") @@ -713,6 +724,47 @@ class UserMembershipRestServlet(RestServlet): return 200, ret +class PushersRestServlet(RestServlet): + """ + Gets information about all pushers for a specific `user_id`. + + Example: + http://localhost:8008/_synapse/admin/v1/users/ + @user:server/pushers + + Returns: + pushers: Dictionary containing pushers information. + total: Number of pushers in dictonary `pushers`. + """ + + PATTERNS = admin_patterns("/users/(?P[^/]*)/pushers$") + + def __init__(self, hs): + self.is_mine = hs.is_mine + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "Can only lookup local users") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + pushers = await self.store.get_pushers_by_user_id(user_id) + + filtered_pushers = [ + {k: v for k, v in p.items() if k in _GET_PUSHERS_ALLOWED_KEYS} + for p in pushers + ] + + return 200, {"pushers": filtered_pushers, "total": len(filtered_pushers)} + + class UserMediaRestServlet(RestServlet): """ Gets information about all uploaded local media for a specific `user_id`. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index e815b92329..0b728e7ffc 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1118,6 +1118,130 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) +class PushersRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = "/_synapse/admin/v1/users/%s/pushers" % urllib.parse.quote( + self.other_user + ) + + def test_no_auth(self): + """ + Try to list pushers of an user without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "GET", self.url, access_token=other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/pushers" + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/pushers" + + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + def test_get_pushers(self): + """ + Tests that a normal lookup for pushers is successfully + """ + + # Get pushers + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + + # Register the pusher + other_user_token = self.login("user", "pass") + user_tuple = self.get_success( + self.store.get_user_by_access_token(other_user_token) + ) + token_id = user_tuple["token_id"] + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=self.other_user, + access_token=token_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "example.com"}, + ) + ) + + # Get pushers + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(1, channel.json_body["total"]) + + for p in channel.json_body["pushers"]: + self.assertIn("pushkey", p) + self.assertIn("kind", p) + self.assertIn("app_id", p) + self.assertIn("app_display_name", p) + self.assertIn("device_display_name", p) + self.assertIn("profile_tag", p) + self.assertIn("lang", p) + self.assertIn("url", p["data"]) + + class UserMediaRestTestCase(unittest.HomeserverTestCase): servlets = [ -- cgit 1.5.1 From 9a7e0d2ea675278510b79fd9b87260c13cc27a32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Oct 2020 11:17:35 +0000 Subject: Don't require hiredis to run unit tests (#8680) --- changelog.d/8680.misc | 1 + tests/replication/_base.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8680.misc (limited to 'tests') diff --git a/changelog.d/8680.misc b/changelog.d/8680.misc new file mode 100644 index 0000000000..2ca2975464 --- /dev/null +++ b/changelog.d/8680.misc @@ -0,0 +1 @@ +Don't require `hiredis` package to be installed to run unit tests. diff --git a/tests/replication/_base.py b/tests/replication/_base.py index f1e53f33cd..5c633ac6df 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -16,7 +16,6 @@ import logging from typing import Any, Callable, List, Optional, Tuple import attr -import hiredis from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime from twisted.internet.protocol import Protocol @@ -39,12 +38,22 @@ from synapse.util import Clock from tests import unittest from tests.server import FakeTransport, render +try: + import hiredis +except ImportError: + hiredis = None + logger = logging.getLogger(__name__) class BaseStreamTestCase(unittest.HomeserverTestCase): """Base class for tests of the replication streams""" + # hiredis is an optional dependency so we don't want to require it for running + # the tests. + if not hiredis: + skip = "Requires hiredis" + servlets = [ streams.register_servlets, ] -- cgit 1.5.1 From 00b24aa545091395f9a92d531836f6bf7b4460e0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 29 Oct 2020 07:27:37 -0400 Subject: Support generating structured logs in addition to standard logs. (#8607) This modifies the configuration of structured logging to be usable from the standard Python logging configuration. This also separates the formatting of logs from the transport allowing JSON logs to files or standard logs to sockets. --- UPGRADE.rst | 16 ++ changelog.d/8607.misc | 1 + docs/sample_log_config.yaml | 4 + docs/structured_logging.md | 164 ++++++++++++----- scripts-dev/lint.sh | 2 +- synapse/config/logger.py | 96 +++++----- synapse/logging/__init__.py | 20 +++ synapse/logging/_remote.py | 97 ++++++----- synapse/logging/_structured.py | 329 ++++++----------------------------- synapse/logging/_terse_json.py | 192 ++++++-------------- synapse/logging/filter.py | 33 ++++ synmark/__init__.py | 39 ----- synmark/__main__.py | 6 +- synmark/suites/logging.py | 60 ++++--- tests/logging/__init__.py | 34 ++++ tests/logging/test_remote_handler.py | 153 ++++++++++++++++ tests/logging/test_structured.py | 214 ----------------------- tests/logging/test_terse_json.py | 253 ++++++++------------------- tests/server.py | 4 +- 19 files changed, 706 insertions(+), 1011 deletions(-) create mode 100644 changelog.d/8607.misc create mode 100644 synapse/logging/filter.py create mode 100644 tests/logging/test_remote_handler.py delete mode 100644 tests/logging/test_structured.py (limited to 'tests') diff --git a/UPGRADE.rst b/UPGRADE.rst index 5a68312217..960c2aeb2b 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -75,6 +75,22 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.23.0 +==================== + +Structured logging configuration breaking changes +------------------------------------------------- + +This release deprecates use of the ``structured: true`` logging configuration for +structured logging. If your logging configuration contains ``structured: true`` +then it should be modified based on the `structured logging documentation +`_. + +The ``structured`` and ``drains`` logging options are now deprecated and should +be replaced by standard logging configuration of ``handlers`` and ``formatters`. + +A future will release of Synapse will make using ``structured: true`` an error. + Upgrading to v1.22.0 ==================== diff --git a/changelog.d/8607.misc b/changelog.d/8607.misc new file mode 100644 index 0000000000..9e56551a34 --- /dev/null +++ b/changelog.d/8607.misc @@ -0,0 +1 @@ +Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index e26657f9fe..ff3c747180 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -3,7 +3,11 @@ # This is a YAML file containing a standard Python logging configuration # dictionary. See [1] for details on the valid settings. # +# Synapse also supports structured logging for machine readable logs which can +# be ingested by ELK stacks. See [2] for details. +# # [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema +# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md version: 1 diff --git a/docs/structured_logging.md b/docs/structured_logging.md index decec9b8fa..b1281667e0 100644 --- a/docs/structured_logging.md +++ b/docs/structured_logging.md @@ -1,83 +1,161 @@ # Structured Logging -A structured logging system can be useful when your logs are destined for a machine to parse and process. By maintaining its machine-readable characteristics, it enables more efficient searching and aggregations when consumed by software such as the "ELK stack". +A structured logging system can be useful when your logs are destined for a +machine to parse and process. By maintaining its machine-readable characteristics, +it enables more efficient searching and aggregations when consumed by software +such as the "ELK stack". -Synapse's structured logging system is configured via the file that Synapse's `log_config` config option points to. The file must be YAML and contain `structured: true`. It must contain a list of "drains" (places where logs go to). +Synapse's structured logging system is configured via the file that Synapse's +`log_config` config option points to. The file should include a formatter which +uses the `synapse.logging.TerseJsonFormatter` class included with Synapse and a +handler which uses the above formatter. + +There is also a `synapse.logging.JsonFormatter` option which does not include +a timestamp in the resulting JSON. This is useful if the log ingester adds its +own timestamp. A structured logging configuration looks similar to the following: ```yaml -structured: true +version: 1 + +formatters: + structured: + class: synapse.logging.TerseJsonFormatter + +handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: structured + filename: /path/to/my/logs/homeserver.log + when: midnight + backupCount: 3 # Does not include the current log file. + encoding: utf8 loggers: synapse: level: INFO + handlers: [remote] synapse.storage.SQL: level: WARNING - -drains: - console: - type: console - location: stdout - file: - type: file_json - location: homeserver.log ``` -The above logging config will set Synapse as 'INFO' logging level by default, with the SQL layer at 'WARNING', and will have two logging drains (to the console and to a file, stored as JSON). - -## Drain Types +The above logging config will set Synapse as 'INFO' logging level by default, +with the SQL layer at 'WARNING', and will log to a file, stored as JSON. -Drain types can be specified by the `type` key. +It is also possible to figure Synapse to log to a remote endpoint by using the +`synapse.logging.RemoteHandler` class included with Synapse. It takes the +following arguments: -### `console` +- `host`: Hostname or IP address of the log aggregator. +- `port`: Numerical port to contact on the host. +- `maximum_buffer`: (Optional, defaults to 1000) The maximum buffer size to allow. -Outputs human-readable logs to the console. +A remote structured logging configuration looks similar to the following: -Arguments: +```yaml +version: 1 -- `location`: Either `stdout` or `stderr`. +formatters: + structured: + class: synapse.logging.TerseJsonFormatter -### `console_json` +handlers: + remote: + class: synapse.logging.RemoteHandler + formatter: structured + host: 10.1.2.3 + port: 9999 -Outputs machine-readable JSON logs to the console. +loggers: + synapse: + level: INFO + handlers: [remote] + synapse.storage.SQL: + level: WARNING +``` -Arguments: +The above logging config will set Synapse as 'INFO' logging level by default, +with the SQL layer at 'WARNING', and will log JSON formatted messages to a +remote endpoint at 10.1.2.3:9999. -- `location`: Either `stdout` or `stderr`. +## Upgrading from legacy structured logging configuration -### `console_json_terse` +Versions of Synapse prior to v1.23.0 included a custom structured logging +configuration which is deprecated. It used a `structured: true` flag and +configured `drains` instead of ``handlers`` and `formatters`. -Outputs machine-readable JSON logs to the console, separated by newlines. This -format is not designed to be read and re-formatted into human-readable text, but -is optimal for a logging aggregation system. +Synapse currently automatically converts the old configuration to the new +configuration, but this will be removed in a future version of Synapse. The +following reference can be used to update your configuration. Based on the drain +`type`, we can pick a new handler: -Arguments: +1. For a type of `console`, `console_json`, or `console_json_terse`: a handler + with a class of `logging.StreamHandler` and a `stream` of `ext://sys.stdout` + or `ext://sys.stderr` should be used. +2. For a type of `file` or `file_json`: a handler of `logging.FileHandler` with + a location of the file path should be used. +3. For a type of `network_json_terse`: a handler of `synapse.logging.RemoteHandler` + with the host and port should be used. -- `location`: Either `stdout` or `stderr`. +Then based on the drain `type` we can pick a new formatter: -### `file` +1. For a type of `console` or `file` no formatter is necessary. +2. For a type of `console_json` or `file_json`: a formatter of + `synapse.logging.JsonFormatter` should be used. +3. For a type of `console_json_terse` or `network_json_terse`: a formatter of + `synapse.logging.TerseJsonFormatter` should be used. -Outputs human-readable logs to a file. +For each new handler and formatter they should be added to the logging configuration +and then assigned to either a logger or the root logger. -Arguments: +An example legacy configuration: -- `location`: An absolute path to the file to log to. +```yaml +structured: true -### `file_json` +loggers: + synapse: + level: INFO + synapse.storage.SQL: + level: WARNING -Outputs machine-readable logs to a file. +drains: + console: + type: console + location: stdout + file: + type: file_json + location: homeserver.log +``` -Arguments: +Would be converted into a new configuration: -- `location`: An absolute path to the file to log to. +```yaml +version: 1 -### `network_json_terse` +formatters: + json: + class: synapse.logging.JsonFormatter -Delivers machine-readable JSON logs to a log aggregator over TCP. This is -compatible with LogStash's TCP input with the codec set to `json_lines`. +handlers: + console: + class: logging.StreamHandler + location: ext://sys.stdout + file: + class: logging.FileHandler + formatter: json + filename: homeserver.log -Arguments: +loggers: + synapse: + level: INFO + handlers: [console, file] + synapse.storage.SQL: + level: WARNING +``` -- `host`: Hostname or IP address of the log aggregator. -- `port`: Numerical port to contact on the host. \ No newline at end of file +The new logging configuration is a bit more verbose, but significantly more +flexible. It allows for configuration that were not previously possible, such as +sending plain logs over the network, or using different handlers for different +modules. diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index f141805519..f328ab57d5 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -80,7 +80,7 @@ else # then lint everything! if [[ -z ${files+x} ]]; then # Lint all source code files and directories - files=("synapse" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py") + files=("synapse" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark") fi fi diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 6b7be28aee..d4e887a3e0 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -23,7 +23,6 @@ from string import Template import yaml from twisted.logger import ( - ILogObserver, LogBeginner, STDLibLogObserver, eventAsText, @@ -32,11 +31,9 @@ from twisted.logger import ( import synapse from synapse.app import _base as appbase -from synapse.logging._structured import ( - reload_structured_logging, - setup_structured_logging, -) +from synapse.logging._structured import setup_structured_logging from synapse.logging.context import LoggingContextFilter +from synapse.logging.filter import MetadataFilter from synapse.util.versionstring import get_version_string from ._base import Config, ConfigError @@ -48,7 +45,11 @@ DEFAULT_LOG_CONFIG = Template( # This is a YAML file containing a standard Python logging configuration # dictionary. See [1] for details on the valid settings. # +# Synapse also supports structured logging for machine readable logs which can +# be ingested by ELK stacks. See [2] for details. +# # [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema +# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md version: 1 @@ -176,11 +177,11 @@ class LoggingConfig(Config): log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file)) -def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): +def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) -> None: """ - Set up Python stdlib logging. + Set up Python standard library logging. """ - if log_config is None: + if log_config_path is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" @@ -196,7 +197,8 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): handler.setFormatter(formatter) logger.addHandler(handler) else: - logging.config.dictConfig(log_config) + # Load the logging configuration. + _load_logging_config(log_config_path) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* @@ -204,12 +206,14 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): # filter options, but care must when using e.g. MemoryHandler to buffer # writes. - log_filter = LoggingContextFilter(request="") + log_context_filter = LoggingContextFilter(request="") + log_metadata_filter = MetadataFilter({"server_name": config.server_name}) old_factory = logging.getLogRecordFactory() def factory(*args, **kwargs): record = old_factory(*args, **kwargs) - log_filter.filter(record) + log_context_filter.filter(record) + log_metadata_filter.filter(record) return record logging.setLogRecordFactory(factory) @@ -255,21 +259,40 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") - return observer - -def _reload_stdlib_logging(*args, log_config=None): - logger = logging.getLogger("") +def _load_logging_config(log_config_path: str) -> None: + """ + Configure logging from a log config path. + """ + with open(log_config_path, "rb") as f: + log_config = yaml.safe_load(f.read()) if not log_config: - logger.warning("Reloaded a blank config?") + logging.warning("Loaded a blank logging config?") + + # If the old structured logging configuration is being used, convert it to + # the new style configuration. + if "structured" in log_config and log_config.get("structured"): + log_config = setup_structured_logging(log_config) logging.config.dictConfig(log_config) +def _reload_logging_config(log_config_path): + """ + Reload the log configuration from the file and apply it. + """ + # If no log config path was given, it cannot be reloaded. + if log_config_path is None: + return + + _load_logging_config(log_config_path) + logging.info("Reloaded log config from %s due to SIGHUP", log_config_path) + + def setup_logging( hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner -) -> ILogObserver: +) -> None: """ Set up the logging subsystem. @@ -282,41 +305,18 @@ def setup_logging( logBeginner: The Twisted logBeginner to use. - Returns: - The "root" Twisted Logger observer, suitable for sending logs to from a - Logger instance. """ - log_config = config.worker_log_config if use_worker_options else config.log_config - - def read_config(*args, callback=None): - if log_config is None: - return None - - with open(log_config, "rb") as f: - log_config_body = yaml.safe_load(f.read()) - - if callback: - callback(log_config=log_config_body) - logging.info("Reloaded log config from %s due to SIGHUP", log_config) - - return log_config_body + log_config_path = ( + config.worker_log_config if use_worker_options else config.log_config + ) - log_config_body = read_config() + # Perform one-time logging configuration. + _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner) + # Add a SIGHUP handler to reload the logging configuration, if one is available. + appbase.register_sighup(_reload_logging_config, log_config_path) - if log_config_body and log_config_body.get("structured") is True: - logger = setup_structured_logging( - hs, config, log_config_body, logBeginner=logBeginner - ) - appbase.register_sighup(read_config, callback=reload_structured_logging) - else: - logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner) - appbase.register_sighup(read_config, callback=_reload_stdlib_logging) - - # make sure that the first thing we log is a thing we can grep backwards - # for + # Log immediately so we can grep backwards. logging.warning("***** STARTING SERVER *****") logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) logging.info("Instance name: %s", hs.get_instance_name()) - - return logger diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py index e69de29bb2..b28b7b2ef7 100644 --- a/synapse/logging/__init__.py +++ b/synapse/logging/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These are imported to allow for nicer logging configuration files. +from synapse.logging._remote import RemoteHandler +from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter + +__all__ = ["RemoteHandler", "JsonFormatter", "TerseJsonFormatter"] diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 0caf325916..ba45424f02 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import sys import traceback from collections import deque @@ -21,6 +22,7 @@ from math import floor from typing import Callable, Optional import attr +from typing_extensions import Deque from zope.interface import implementer from twisted.application.internet import ClientService @@ -32,7 +34,8 @@ from twisted.internet.endpoints import ( ) from twisted.internet.interfaces import IPushProducer, ITransport from twisted.internet.protocol import Factory, Protocol -from twisted.logger import ILogObserver, Logger, LogLevel + +logger = logging.getLogger(__name__) @attr.s @@ -45,11 +48,11 @@ class LogProducer: Args: buffer: Log buffer to read logs from. transport: Transport to write to. - format_event: A callable to format the log entry to a string. + format: A callable to format the log record to a string. """ transport = attr.ib(type=ITransport) - format_event = attr.ib(type=Callable[[dict], str]) + _format = attr.ib(type=Callable[[logging.LogRecord], str]) _buffer = attr.ib(type=deque) _paused = attr.ib(default=False, type=bool, init=False) @@ -61,16 +64,19 @@ class LogProducer: self._buffer = deque() def resumeProducing(self): + # If we're already producing, nothing to do. self._paused = False + # Loop until paused. while self._paused is False and (self._buffer and self.transport.connected): try: - # Request the next event and format it. - event = self._buffer.popleft() - msg = self.format_event(event) + # Request the next record and format it. + record = self._buffer.popleft() + msg = self._format(record) # Send it as a new line over the transport. self.transport.write(msg.encode("utf8")) + self.transport.write(b"\n") except Exception: # Something has gone wrong writing to the transport -- log it # and break out of the while. @@ -78,60 +84,63 @@ class LogProducer: break -@attr.s -@implementer(ILogObserver) -class TCPLogObserver: +class RemoteHandler(logging.Handler): """ - An IObserver that writes JSON logs to a TCP target. + An logging handler that writes logs to a TCP target. Args: - hs (HomeServer): The homeserver that is being logged for. host: The host of the logging target. port: The logging target's port. - format_event: A callable to format the log entry to a string. maximum_buffer: The maximum buffer size. """ - hs = attr.ib() - host = attr.ib(type=str) - port = attr.ib(type=int) - format_event = attr.ib(type=Callable[[dict], str]) - maximum_buffer = attr.ib(type=int) - _buffer = attr.ib(default=attr.Factory(deque), type=deque) - _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) - _logger = attr.ib(default=attr.Factory(Logger)) - _producer = attr.ib(default=None, type=Optional[LogProducer]) - - def start(self) -> None: + def __init__( + self, + host: str, + port: int, + maximum_buffer: int = 1000, + level=logging.NOTSET, + _reactor=None, + ): + super().__init__(level=level) + self.host = host + self.port = port + self.maximum_buffer = maximum_buffer + + self._buffer = deque() # type: Deque[logging.LogRecord] + self._connection_waiter = None # type: Optional[Deferred] + self._producer = None # type: Optional[LogProducer] # Connect without DNS lookups if it's a direct IP. + if _reactor is None: + from twisted.internet import reactor + + _reactor = reactor + try: ip = ip_address(self.host) if isinstance(ip, IPv4Address): - endpoint = TCP4ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) + endpoint = TCP4ClientEndpoint(_reactor, self.host, self.port) elif isinstance(ip, IPv6Address): - endpoint = TCP6ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) + endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port) else: raise ValueError("Unknown IP address provided: %s" % (self.host,)) except ValueError: - endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + endpoint = HostnameEndpoint(_reactor, self.host, self.port) factory = Factory.forProtocol(Protocol) - self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service = ClientService(endpoint, factory, clock=_reactor) self._service.startService() self._connect() - def stop(self): + def close(self): self._service.stopService() def _connect(self) -> None: """ Triggers an attempt to connect then write to the remote if not already writing. """ + # Do not attempt to open multiple connections. if self._connection_waiter: return @@ -158,9 +167,7 @@ class TCPLogObserver: # Make a new producer and start it. self._producer = LogProducer( - buffer=self._buffer, - transport=r.transport, - format_event=self.format_event, + buffer=self._buffer, transport=r.transport, format=self.format, ) r.transport.registerProducer(self._producer, True) self._producer.resumeProducing() @@ -168,19 +175,19 @@ class TCPLogObserver: def _handle_pressure(self) -> None: """ - Handle backpressure by shedding events. + Handle backpressure by shedding records. The buffer will, in this order, until the buffer is below the maximum: - - Shed DEBUG events - - Shed INFO events - - Shed the middle 50% of the events. + - Shed DEBUG records. + - Shed INFO records. + - Shed the middle 50% of the records. """ if len(self._buffer) <= self.maximum_buffer: return # Strip out DEBUGs self._buffer = deque( - filter(lambda event: event["log_level"] != LogLevel.debug, self._buffer) + filter(lambda record: record.levelno > logging.DEBUG, self._buffer) ) if len(self._buffer) <= self.maximum_buffer: @@ -188,7 +195,7 @@ class TCPLogObserver: # Strip out INFOs self._buffer = deque( - filter(lambda event: event["log_level"] != LogLevel.info, self._buffer) + filter(lambda record: record.levelno > logging.INFO, self._buffer) ) if len(self._buffer) <= self.maximum_buffer: @@ -209,17 +216,17 @@ class TCPLogObserver: self._buffer.extend(reversed(end_buffer)) - def __call__(self, event: dict) -> None: - self._buffer.append(event) + def emit(self, record: logging.LogRecord) -> None: + self._buffer.append(record) # Handle backpressure, if it exists. try: self._handle_pressure() except Exception: - # If handling backpressure fails,clear the buffer and log the + # If handling backpressure fails, clear the buffer and log the # exception. self._buffer.clear() - self._logger.failure("Failed clearing backpressure") + logger.warning("Failed clearing backpressure") # Try and write immediately. self._connect() diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 0fc2ea609e..14d9c104c2 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -12,138 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging import os.path -import sys -import typing -import warnings -from typing import List +from typing import Any, Dict, Generator, Optional, Tuple -import attr -from constantly import NamedConstant, Names, ValueConstant, Values -from zope.interface import implementer - -from twisted.logger import ( - FileLogObserver, - FilteringLogObserver, - ILogObserver, - LogBeginner, - Logger, - LogLevel, - LogLevelFilterPredicate, - LogPublisher, - eventAsText, - jsonFileLogObserver, -) +from constantly import NamedConstant, Names from synapse.config._base import ConfigError -from synapse.logging._terse_json import ( - TerseJSONToConsoleLogObserver, - TerseJSONToTCPLogObserver, -) -from synapse.logging.context import current_context - - -def stdlib_log_level_to_twisted(level: str) -> LogLevel: - """ - Convert a stdlib log level to Twisted's log level. - """ - lvl = level.lower().replace("warning", "warn") - return LogLevel.levelWithName(lvl) - - -@attr.s -@implementer(ILogObserver) -class LogContextObserver: - """ - An ILogObserver which adds Synapse-specific log context information. - - Attributes: - observer (ILogObserver): The target parent observer. - """ - - observer = attr.ib() - - def __call__(self, event: dict) -> None: - """ - Consume a log event and emit it to the parent observer after filtering - and adding log context information. - - Args: - event (dict) - """ - # Filter out some useless events that Twisted outputs - if "log_text" in event: - if event["log_text"].startswith("DNSDatagramProtocol starting on "): - return - - if event["log_text"].startswith("(UDP Port "): - return - - if event["log_text"].startswith("Timing out client") or event[ - "log_format" - ].startswith("Timing out client"): - return - - context = current_context() - - # Copy the context information to the log event. - context.copy_to_twisted_log_entry(event) - - self.observer(event) - - -class PythonStdlibToTwistedLogger(logging.Handler): - """ - Transform a Python stdlib log message into a Twisted one. - """ - - def __init__(self, observer, *args, **kwargs): - """ - Args: - observer (ILogObserver): A Twisted logging observer. - *args, **kwargs: Args/kwargs to be passed to logging.Handler. - """ - self.observer = observer - super().__init__(*args, **kwargs) - - def emit(self, record: logging.LogRecord) -> None: - """ - Emit a record to Twisted's observer. - - Args: - record (logging.LogRecord) - """ - - self.observer( - { - "log_time": record.created, - "log_text": record.getMessage(), - "log_format": "{log_text}", - "log_namespace": record.name, - "log_level": stdlib_log_level_to_twisted(record.levelname), - } - ) - - -def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver: - """ - A log observer that formats events like the traditional log formatter and - sends them to `outFile`. - - Args: - outFile (file object): The file object to write to. - """ - - def formatEvent(_event: dict) -> str: - event = dict(_event) - event["log_level"] = event["log_level"].name.upper() - event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + ( - event.get("log_format", "{log_text}") or "{log_text}" - ) - return eventAsText(event, includeSystem=False) + "\n" - - return FileLogObserver(outFile, formatEvent) class DrainType(Names): @@ -155,30 +29,12 @@ class DrainType(Names): NETWORK_JSON_TERSE = NamedConstant() -class OutputPipeType(Values): - stdout = ValueConstant(sys.__stdout__) - stderr = ValueConstant(sys.__stderr__) - - -@attr.s -class DrainConfiguration: - name = attr.ib() - type = attr.ib() - location = attr.ib() - options = attr.ib(default=None) - - -@attr.s -class NetworkJSONTerseOptions: - maximum_buffer = attr.ib(type=int) - - -DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}} +DEFAULT_LOGGERS = {"synapse": {"level": "info"}} def parse_drain_configs( drains: dict, -) -> typing.Generator[DrainConfiguration, None, None]: +) -> Generator[Tuple[str, Dict[str, Any]], None, None]: """ Parse the drain configurations. @@ -186,11 +42,12 @@ def parse_drain_configs( drains (dict): A list of drain configurations. Yields: - DrainConfiguration instances. + dict instances representing a logging handler. Raises: ConfigError: If any of the drain configuration items are invalid. """ + for name, config in drains.items(): if "type" not in config: raise ConfigError("Logging drains require a 'type' key.") @@ -202,6 +59,18 @@ def parse_drain_configs( "%s is not a known logging drain type." % (config["type"],) ) + # Either use the default formatter or the tersejson one. + if logging_type in (DrainType.CONSOLE_JSON, DrainType.FILE_JSON,): + formatter = "json" # type: Optional[str] + elif logging_type in ( + DrainType.CONSOLE_JSON_TERSE, + DrainType.NETWORK_JSON_TERSE, + ): + formatter = "tersejson" + else: + # A formatter of None implies using the default formatter. + formatter = None + if logging_type in [ DrainType.CONSOLE, DrainType.CONSOLE_JSON, @@ -217,9 +86,11 @@ def parse_drain_configs( % (logging_type,) ) - pipe = OutputPipeType.lookupByName(location).value - - yield DrainConfiguration(name=name, type=logging_type, location=pipe) + yield name, { + "class": "logging.StreamHandler", + "formatter": formatter, + "stream": "ext://sys." + location, + } elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]: if "location" not in config: @@ -233,18 +104,25 @@ def parse_drain_configs( "File paths need to be absolute, '%s' is a relative path" % (location,) ) - yield DrainConfiguration(name=name, type=logging_type, location=location) + + yield name, { + "class": "logging.FileHandler", + "formatter": formatter, + "filename": location, + } elif logging_type in [DrainType.NETWORK_JSON_TERSE]: host = config.get("host") port = config.get("port") maximum_buffer = config.get("maximum_buffer", 1000) - yield DrainConfiguration( - name=name, - type=logging_type, - location=(host, port), - options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer), - ) + + yield name, { + "class": "synapse.logging.RemoteHandler", + "formatter": formatter, + "host": host, + "port": port, + "maximum_buffer": maximum_buffer, + } else: raise ConfigError( @@ -253,126 +131,29 @@ def parse_drain_configs( ) -class StoppableLogPublisher(LogPublisher): +def setup_structured_logging(log_config: dict,) -> dict: """ - A log publisher that can tell its observers to shut down any external - communications. - """ - - def stop(self): - for obs in self._observers: - if hasattr(obs, "stop"): - obs.stop() - - -def setup_structured_logging( - hs, - config, - log_config: dict, - logBeginner: LogBeginner, - redirect_stdlib_logging: bool = True, -) -> LogPublisher: - """ - Set up Twisted's structured logging system. - - Args: - hs: The homeserver to use. - config (HomeserverConfig): The configuration of the Synapse homeserver. - log_config (dict): The log configuration to use. + Convert a legacy structured logging configuration (from Synapse < v1.23.0) + to one compatible with the new standard library handlers. """ - if config.no_redirect_stdio: - raise ConfigError( - "no_redirect_stdio cannot be defined using structured logging." - ) - - logger = Logger() - if "drains" not in log_config: raise ConfigError("The logging configuration requires a list of drains.") - observers = [] # type: List[ILogObserver] - - for observer in parse_drain_configs(log_config["drains"]): - # Pipe drains - if observer.type == DrainType.CONSOLE: - logger.debug( - "Starting up the {name} console logger drain", name=observer.name - ) - observers.append(SynapseFileLogObserver(observer.location)) - elif observer.type == DrainType.CONSOLE_JSON: - logger.debug( - "Starting up the {name} JSON console logger drain", name=observer.name - ) - observers.append(jsonFileLogObserver(observer.location)) - elif observer.type == DrainType.CONSOLE_JSON_TERSE: - logger.debug( - "Starting up the {name} terse JSON console logger drain", - name=observer.name, - ) - observers.append( - TerseJSONToConsoleLogObserver(observer.location, metadata={}) - ) - - # File drains - elif observer.type == DrainType.FILE: - logger.debug("Starting up the {name} file logger drain", name=observer.name) - log_file = open(observer.location, "at", buffering=1, encoding="utf8") - observers.append(SynapseFileLogObserver(log_file)) - elif observer.type == DrainType.FILE_JSON: - logger.debug( - "Starting up the {name} JSON file logger drain", name=observer.name - ) - log_file = open(observer.location, "at", buffering=1, encoding="utf8") - observers.append(jsonFileLogObserver(log_file)) - - elif observer.type == DrainType.NETWORK_JSON_TERSE: - metadata = {"server_name": hs.config.server_name} - log_observer = TerseJSONToTCPLogObserver( - hs=hs, - host=observer.location[0], - port=observer.location[1], - metadata=metadata, - maximum_buffer=observer.options.maximum_buffer, - ) - log_observer.start() - observers.append(log_observer) - else: - # We should never get here, but, just in case, throw an error. - raise ConfigError("%s drain type cannot be configured" % (observer.type,)) - - publisher = StoppableLogPublisher(*observers) - log_filter = LogLevelFilterPredicate() - - for namespace, namespace_config in log_config.get( - "loggers", DEFAULT_LOGGERS - ).items(): - # Set the log level for twisted.logger.Logger namespaces - log_filter.setLogLevelForNamespace( - namespace, - stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")), - ) - - # Also set the log levels for the stdlib logger namespaces, to prevent - # them getting to PythonStdlibToTwistedLogger and having to be formatted - if "level" in namespace_config: - logging.getLogger(namespace).setLevel(namespace_config.get("level")) - - f = FilteringLogObserver(publisher, [log_filter]) - lco = LogContextObserver(f) - - if redirect_stdlib_logging: - stuff_into_twisted = PythonStdlibToTwistedLogger(lco) - stdliblogger = logging.getLogger() - stdliblogger.addHandler(stuff_into_twisted) - - # Always redirect standard I/O, otherwise other logging outputs might miss - # it. - logBeginner.beginLoggingTo([lco], redirectStandardIO=True) + new_config = { + "version": 1, + "formatters": { + "json": {"class": "synapse.logging.JsonFormatter"}, + "tersejson": {"class": "synapse.logging.TerseJsonFormatter"}, + }, + "handlers": {}, + "loggers": log_config.get("loggers", DEFAULT_LOGGERS), + "root": {"handlers": []}, + } - return publisher + for handler_name, handler in parse_drain_configs(log_config["drains"]): + new_config["handlers"][handler_name] = handler + # Add each handler to the root logger. + new_config["root"]["handlers"].append(handler_name) -def reload_structured_logging(*args, log_config=None) -> None: - warnings.warn( - "Currently the structured logging system can not be reloaded, doing nothing" - ) + return new_config diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 9b46956ca9..2fbf5549a1 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -16,141 +16,65 @@ """ Log formatters that output terse JSON. """ - import json -from typing import IO - -from twisted.logger import FileLogObserver - -from synapse.logging._remote import TCPLogObserver +import logging _encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":")) - -def flatten_event(event: dict, metadata: dict, include_time: bool = False): - """ - Flatten a Twisted logging event to an dictionary capable of being sent - as a log event to a logging aggregation system. - - The format is vastly simplified and is not designed to be a "human readable - string" in the sense that traditional logs are. Instead, the structure is - optimised for searchability and filtering, with human-understandable log - keys. - - Args: - event (dict): The Twisted logging event we are flattening. - metadata (dict): Additional data to include with each log message. This - can be information like the server name. Since the target log - consumer does not know who we are other than by host IP, this - allows us to forward through static information. - include_time (bool): Should we include the `time` key? If False, the - event time is stripped from the event. - """ - new_event = {} - - # If it's a failure, make the new event's log_failure be the traceback text. - if "log_failure" in event: - new_event["log_failure"] = event["log_failure"].getTraceback() - - # If it's a warning, copy over a string representation of the warning. - if "warning" in event: - new_event["warning"] = str(event["warning"]) - - # Stdlib logging events have "log_text" as their human-readable portion, - # Twisted ones have "log_format". For now, include the log_format, so that - # context only given in the log format (e.g. what is being logged) is - # available. - if "log_text" in event: - new_event["log"] = event["log_text"] - else: - new_event["log"] = event["log_format"] - - # We want to include the timestamp when forwarding over the network, but - # exclude it when we are writing to stdout. This is because the log ingester - # (e.g. logstash, fluentd) can add its own timestamp. - if include_time: - new_event["time"] = round(event["log_time"], 2) - - # Convert the log level to a textual representation. - new_event["level"] = event["log_level"].name.upper() - - # Ignore these keys, and do not transfer them over to the new log object. - # They are either useless (isError), transferred manually above (log_time, - # log_level, etc), or contain Python objects which are not useful for output - # (log_logger, log_source). - keys_to_delete = [ - "isError", - "log_failure", - "log_format", - "log_level", - "log_logger", - "log_source", - "log_system", - "log_time", - "log_text", - "observer", - "warning", - ] - - # If it's from the Twisted legacy logger (twisted.python.log), it adds some - # more keys we want to purge. - if event.get("log_namespace") == "log_legacy": - keys_to_delete.extend(["message", "system", "time"]) - - # Rather than modify the dictionary in place, construct a new one with only - # the content we want. The original event should be considered 'frozen'. - for key in event.keys(): - - if key in keys_to_delete: - continue - - if isinstance(event[key], (str, int, bool, float)) or event[key] is None: - # If it's a plain type, include it as is. - new_event[key] = event[key] - else: - # If it's not one of those basic types, write out a string - # representation. This should probably be a warning in development, - # so that we are sure we are only outputting useful data. - new_event[key] = str(event[key]) - - # Add the metadata information to the event (e.g. the server_name). - new_event.update(metadata) - - return new_event - - -def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver: - """ - A log observer that formats events to a flattened JSON representation. - - Args: - outFile: The file object to write to. - metadata: Metadata to be added to each log object. - """ - - def formatEvent(_event: dict) -> str: - flattened = flatten_event(_event, metadata) - return _encoder.encode(flattened) + "\n" - - return FileLogObserver(outFile, formatEvent) - - -def TerseJSONToTCPLogObserver( - hs, host: str, port: int, metadata: dict, maximum_buffer: int -) -> FileLogObserver: - """ - A log observer that formats events to a flattened JSON representation. - - Args: - hs (HomeServer): The homeserver that is being logged for. - host: The host of the logging target. - port: The logging target's port. - metadata: Metadata to be added to each log object. - maximum_buffer: The maximum buffer size. - """ - - def formatEvent(_event: dict) -> str: - flattened = flatten_event(_event, metadata, include_time=True) - return _encoder.encode(flattened) + "\n" - - return TCPLogObserver(hs, host, port, formatEvent, maximum_buffer) +# The properties of a standard LogRecord. +_LOG_RECORD_ATTRIBUTES = { + "args", + "asctime", + "created", + "exc_info", + # exc_text isn't a public attribute, but is used to cache the result of formatException. + "exc_text", + "filename", + "funcName", + "levelname", + "levelno", + "lineno", + "message", + "module", + "msecs", + "msg", + "name", + "pathname", + "process", + "processName", + "relativeCreated", + "stack_info", + "thread", + "threadName", +} + + +class JsonFormatter(logging.Formatter): + def format(self, record: logging.LogRecord) -> str: + event = { + "log": record.getMessage(), + "namespace": record.name, + "level": record.levelname, + } + + return self._format(record, event) + + def _format(self, record: logging.LogRecord, event: dict) -> str: + # Add any extra attributes to the event. + for key, value in record.__dict__.items(): + if key not in _LOG_RECORD_ATTRIBUTES: + event[key] = value + + return _encoder.encode(event) + + +class TerseJsonFormatter(JsonFormatter): + def format(self, record: logging.LogRecord) -> str: + event = { + "log": record.getMessage(), + "namespace": record.name, + "level": record.levelname, + "time": round(record.created, 2), + } + + return self._format(record, event) diff --git a/synapse/logging/filter.py b/synapse/logging/filter.py new file mode 100644 index 0000000000..1baf8dd679 --- /dev/null +++ b/synapse/logging/filter.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from typing_extensions import Literal + + +class MetadataFilter(logging.Filter): + """Logging filter that adds constant values to each record. + + Args: + metadata: Key-value pairs to add to each record. + """ + + def __init__(self, metadata: dict): + self._metadata = metadata + + def filter(self, record: logging.LogRecord) -> Literal[True]: + for key, value in self._metadata.items(): + setattr(record, key, value) + return True diff --git a/synmark/__init__.py b/synmark/__init__.py index 09bc7e7927..3d4ec3e184 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -21,45 +21,6 @@ except ImportError: from twisted.internet.pollreactor import PollReactor as Reactor from twisted.internet.main import installReactor -from synapse.config.homeserver import HomeServerConfig -from synapse.util import Clock - -from tests.utils import default_config, setup_test_homeserver - - -async def make_homeserver(reactor, config=None): - """ - Make a Homeserver suitable for running benchmarks against. - - Args: - reactor: A Twisted reactor to run under. - config: A HomeServerConfig to use, or None. - """ - cleanup_tasks = [] - clock = Clock(reactor) - - if not config: - config = default_config("test") - - config_obj = HomeServerConfig() - config_obj.parse_config_dict(config, "", "") - - hs = setup_test_homeserver( - cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock - ) - stor = hs.get_datastore() - - # Run the database background updates. - if hasattr(stor.db_pool.updates, "do_next_background_update"): - while not await stor.db_pool.updates.has_completed_background_updates(): - await stor.db_pool.updates.do_next_background_update(1) - - def cleanup(): - for i in cleanup_tasks: - i() - - return hs, clock.sleep, cleanup - def make_reactor(): """ diff --git a/synmark/__main__.py b/synmark/__main__.py index 17df9ddeb7..de13c1a909 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -12,20 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import sys from argparse import REMAINDER from contextlib import redirect_stderr from io import StringIO import pyperf -from synmark import make_reactor -from synmark.suites import SUITES from twisted.internet.defer import Deferred, ensureDeferred from twisted.logger import globalLogBeginner, textFileLogObserver from twisted.python.failure import Failure +from synmark import make_reactor +from synmark.suites import SUITES + from tests.utils import setupdb diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index d8e4c7d58f..c9d9cf761e 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -13,20 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import warnings from io import StringIO from mock import Mock from pyperf import perf_counter -from synmark import make_homeserver from twisted.internet.defer import Deferred from twisted.internet.protocol import ServerFactory -from twisted.logger import LogBeginner, Logger, LogPublisher +from twisted.logger import LogBeginner, LogPublisher from twisted.protocols.basic import LineOnlyReceiver -from synapse.logging._structured import setup_structured_logging +from synapse.config.logger import _setup_stdlib_logging +from synapse.logging import RemoteHandler +from synapse.util import Clock class LineCounter(LineOnlyReceiver): @@ -62,7 +64,15 @@ async def main(reactor, loops): logger_factory.on_done = Deferred() port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") - hs, wait, cleanup = await make_homeserver(reactor) + # A fake homeserver config. + class Config: + server_name = "synmark-" + str(loops) + no_redirect_stdio = True + + hs_config = Config() + + # To be able to sleep. + clock = Clock(reactor) errors = StringIO() publisher = LogPublisher() @@ -72,47 +82,49 @@ async def main(reactor, loops): ) log_config = { - "loggers": {"synapse": {"level": "DEBUG"}}, - "drains": { + "version": 1, + "loggers": {"synapse": {"level": "DEBUG", "handlers": ["tersejson"]}}, + "formatters": {"tersejson": {"class": "synapse.logging.TerseJsonFormatter"}}, + "handlers": { "tersejson": { - "type": "network_json_terse", + "class": "synapse.logging.RemoteHandler", "host": "127.0.0.1", "port": port.getHost().port, "maximum_buffer": 100, + "_reactor": reactor, } }, } - logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher) - logging_system = setup_structured_logging( - hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False + logger = logging.getLogger("synapse.logging.test_terse_json") + _setup_stdlib_logging( + hs_config, log_config, logBeginner=beginner, ) # Wait for it to connect... - await logging_system._observers[0]._service.whenConnected() + for handler in logging.getLogger("synapse").handlers: + if isinstance(handler, RemoteHandler): + break + else: + raise RuntimeError("Improperly configured: no RemoteHandler found.") + + await handler._service.whenConnected() start = perf_counter() # Send a bunch of useful messages for i in range(0, loops): - logger.info("test message %s" % (i,)) - - if ( - len(logging_system._observers[0]._buffer) - == logging_system._observers[0].maximum_buffer - ): - while ( - len(logging_system._observers[0]._buffer) - > logging_system._observers[0].maximum_buffer / 2 - ): - await wait(0.01) + logger.info("test message %s", i) + + if len(handler._buffer) == handler.maximum_buffer: + while len(handler._buffer) > handler.maximum_buffer / 2: + await clock.sleep(0.01) await logger_factory.on_done end = perf_counter() - start - logging_system.stop() + handler.close() port.stopListening() - cleanup() return end diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py index e69de29bb2..a58d51441c 100644 --- a/tests/logging/__init__.py +++ b/tests/logging/__init__.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + + +class LoggerCleanupMixin: + def get_logger(self, handler): + """ + Attach a handler to a logger and add clean-ups to remove revert this. + """ + # Create a logger and add the handler to it. + logger = logging.getLogger(__name__) + logger.addHandler(handler) + + # Ensure the logger actually logs something. + logger.setLevel(logging.INFO) + + # Ensure the logger gets cleaned-up appropriately. + self.addCleanup(logger.removeHandler, handler) + self.addCleanup(logger.setLevel, logging.NOTSET) + + return logger diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py new file mode 100644 index 0000000000..58ee1f2f3c --- /dev/null +++ b/tests/logging/test_remote_handler.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.test.proto_helpers import AccumulatingProtocol + +from synapse.logging import RemoteHandler + +from tests.logging import LoggerCleanupMixin +from tests.server import FakeTransport, get_clock +from tests.unittest import TestCase + + +def connect_logging_client(reactor, client_id): + # This is essentially tests.server.connect_client, but disabling autoflush on + # the client transport. This is necessary to avoid an infinite loop due to + # sending of data via the logging transport causing additional logs to be + # written. + factory = reactor.tcpClients.pop(client_id)[2] + client = factory.buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, reactor)) + client.makeConnection(FakeTransport(server, reactor, autoflush=False)) + + return client, server + + +class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): + def setUp(self): + self.reactor, _ = get_clock() + + def test_log_output(self): + """ + The remote handler delivers logs over TCP. + """ + handler = RemoteHandler("127.0.0.1", 9000, _reactor=self.reactor) + logger = self.get_logger(handler) + + logger.info("Hello there, %s!", "wally") + + # Trigger the connection + client, server = connect_logging_client(self.reactor, 0) + + # Trigger data being sent + client.transport.flush() + + # One log message, with a single trailing newline + logs = server.data.decode("utf8").splitlines() + self.assertEqual(len(logs), 1) + self.assertEqual(server.data.count(b"\n"), 1) + + # Ensure the data passed through properly. + self.assertEqual(logs[0], "Hello there, wally!") + + def test_log_backpressure_debug(self): + """ + When backpressure is hit, DEBUG logs will be shed. + """ + handler = RemoteHandler( + "127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor + ) + logger = self.get_logger(handler) + + # Send some debug messages + for i in range(0, 3): + logger.debug("debug %s" % (i,)) + + # Send a bunch of useful messages + for i in range(0, 7): + logger.info("info %s" % (i,)) + + # The last debug message pushes it past the maximum buffer + logger.debug("too much debug") + + # Allow the reconnection + client, server = connect_logging_client(self.reactor, 0) + client.transport.flush() + + # Only the 7 infos made it through, the debugs were elided + logs = server.data.splitlines() + self.assertEqual(len(logs), 7) + self.assertNotIn(b"debug", server.data) + + def test_log_backpressure_info(self): + """ + When backpressure is hit, DEBUG and INFO logs will be shed. + """ + handler = RemoteHandler( + "127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor + ) + logger = self.get_logger(handler) + + # Send some debug messages + for i in range(0, 3): + logger.debug("debug %s" % (i,)) + + # Send a bunch of useful messages + for i in range(0, 10): + logger.warning("warn %s" % (i,)) + + # Send a bunch of info messages + for i in range(0, 3): + logger.info("info %s" % (i,)) + + # The last debug message pushes it past the maximum buffer + logger.debug("too much debug") + + # Allow the reconnection + client, server = connect_logging_client(self.reactor, 0) + client.transport.flush() + + # The 10 warnings made it through, the debugs and infos were elided + logs = server.data.splitlines() + self.assertEqual(len(logs), 10) + self.assertNotIn(b"debug", server.data) + self.assertNotIn(b"info", server.data) + + def test_log_backpressure_cut_middle(self): + """ + When backpressure is hit, and no more DEBUG and INFOs cannot be culled, + it will cut the middle messages out. + """ + handler = RemoteHandler( + "127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor + ) + logger = self.get_logger(handler) + + # Send a bunch of useful messages + for i in range(0, 20): + logger.warning("warn %s" % (i,)) + + # Allow the reconnection + client, server = connect_logging_client(self.reactor, 0) + client.transport.flush() + + # The first five and last five warnings made it through, the debugs and + # infos were elided + logs = server.data.decode("utf8").splitlines() + self.assertEqual( + ["warn %s" % (i,) for i in range(5)] + + ["warn %s" % (i,) for i in range(15, 20)], + logs, + ) diff --git a/tests/logging/test_structured.py b/tests/logging/test_structured.py deleted file mode 100644 index d36f5f426c..0000000000 --- a/tests/logging/test_structured.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import os.path -import shutil -import sys -import textwrap - -from twisted.logger import Logger, eventAsText, eventsFromJSONLogFile - -from synapse.config.logger import setup_logging -from synapse.logging._structured import setup_structured_logging -from synapse.logging.context import LoggingContext - -from tests.unittest import DEBUG, HomeserverTestCase - - -class FakeBeginner: - def beginLoggingTo(self, observers, **kwargs): - self.observers = observers - - -class StructuredLoggingTestBase: - """ - Test base that registers a cleanup handler to reset the stdlib log handler - to 'unset'. - """ - - def prepare(self, reactor, clock, hs): - def _cleanup(): - logging.getLogger("synapse").setLevel(logging.NOTSET) - - self.addCleanup(_cleanup) - - -class StructuredLoggingTestCase(StructuredLoggingTestBase, HomeserverTestCase): - """ - Tests for Synapse's structured logging support. - """ - - def test_output_to_json_round_trip(self): - """ - Synapse logs can be outputted to JSON and then read back again. - """ - temp_dir = self.mktemp() - os.mkdir(temp_dir) - self.addCleanup(shutil.rmtree, temp_dir) - - json_log_file = os.path.abspath(os.path.join(temp_dir, "out.json")) - - log_config = { - "drains": {"jsonfile": {"type": "file_json", "location": json_log_file}} - } - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, self.hs.config, log_config, logBeginner=beginner - ) - - # Make a logger and send an event - logger = Logger( - namespace="tests.logging.test_structured", observer=beginner.observers[0] - ) - logger.info("Hello there, {name}!", name="wally") - - # Read the log file and check it has the event we sent - with open(json_log_file, "r") as f: - logged_events = list(eventsFromJSONLogFile(f)) - self.assertEqual(len(logged_events), 1) - - # The event pulled from the file should render fine - self.assertEqual( - eventAsText(logged_events[0], includeTimestamp=False), - "[tests.logging.test_structured#info] Hello there, wally!", - ) - - def test_output_to_text(self): - """ - Synapse logs can be outputted to text. - """ - temp_dir = self.mktemp() - os.mkdir(temp_dir) - self.addCleanup(shutil.rmtree, temp_dir) - - log_file = os.path.abspath(os.path.join(temp_dir, "out.log")) - - log_config = {"drains": {"file": {"type": "file", "location": log_file}}} - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, self.hs.config, log_config, logBeginner=beginner - ) - - # Make a logger and send an event - logger = Logger( - namespace="tests.logging.test_structured", observer=beginner.observers[0] - ) - logger.info("Hello there, {name}!", name="wally") - - # Read the log file and check it has the event we sent - with open(log_file, "r") as f: - logged_events = f.read().strip().split("\n") - self.assertEqual(len(logged_events), 1) - - # The event pulled from the file should render fine - self.assertTrue( - logged_events[0].endswith( - " - tests.logging.test_structured - INFO - None - Hello there, wally!" - ) - ) - - def test_collects_logcontext(self): - """ - Test that log outputs have the attached logging context. - """ - log_config = {"drains": {}} - - # Begin the logger with our config - beginner = FakeBeginner() - publisher = setup_structured_logging( - self.hs, self.hs.config, log_config, logBeginner=beginner - ) - - logs = [] - - publisher.addObserver(logs.append) - - # Make a logger and send an event - logger = Logger( - namespace="tests.logging.test_structured", observer=beginner.observers[0] - ) - - with LoggingContext("testcontext", request="somereq"): - logger.info("Hello there, {name}!", name="steve") - - self.assertEqual(len(logs), 1) - self.assertEqual(logs[0]["request"], "somereq") - - -class StructuredLoggingConfigurationFileTestCase( - StructuredLoggingTestBase, HomeserverTestCase -): - def make_homeserver(self, reactor, clock): - - tempdir = self.mktemp() - os.mkdir(tempdir) - log_config_file = os.path.abspath(os.path.join(tempdir, "log.config.yaml")) - self.homeserver_log = os.path.abspath(os.path.join(tempdir, "homeserver.log")) - - config = self.default_config() - config["log_config"] = log_config_file - - with open(log_config_file, "w") as f: - f.write( - textwrap.dedent( - """\ - structured: true - - drains: - file: - type: file_json - location: %s - """ - % (self.homeserver_log,) - ) - ) - - self.addCleanup(self._sys_cleanup) - - return self.setup_test_homeserver(config=config) - - def _sys_cleanup(self): - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ - - # Do not remove! We need the logging system to be set other than WARNING. - @DEBUG - def test_log_output(self): - """ - When a structured logging config is given, Synapse will use it. - """ - beginner = FakeBeginner() - publisher = setup_logging(self.hs, self.hs.config, logBeginner=beginner) - - # Make a logger and send an event - logger = Logger(namespace="tests.logging.test_structured", observer=publisher) - - with LoggingContext("testcontext", request="somereq"): - logger.info("Hello there, {name}!", name="steve") - - with open(self.homeserver_log, "r") as f: - logged_events = [ - eventAsText(x, includeTimestamp=False) for x in eventsFromJSONLogFile(f) - ] - - logs = "\n".join(logged_events) - self.assertTrue("***** STARTING SERVER *****" in logs) - self.assertTrue("Hello there, steve!" in logs) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index fd128b88e0..73f469b802 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -14,57 +14,33 @@ # limitations under the License. import json -from collections import Counter +import logging +from io import StringIO -from twisted.logger import Logger +from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter -from synapse.logging._structured import setup_structured_logging +from tests.logging import LoggerCleanupMixin +from tests.unittest import TestCase -from tests.server import connect_client -from tests.unittest import HomeserverTestCase -from .test_structured import FakeBeginner, StructuredLoggingTestBase - - -class TerseJSONTCPTestCase(StructuredLoggingTestBase, HomeserverTestCase): - def test_log_output(self): +class TerseJsonTestCase(LoggerCleanupMixin, TestCase): + def test_terse_json_output(self): """ - The Terse JSON outputter delivers simplified structured logs over TCP. + The Terse JSON formatter converts log messages to JSON. """ - log_config = { - "drains": { - "tersejson": { - "type": "network_json_terse", - "host": "127.0.0.1", - "port": 8000, - } - } - } - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, self.hs.config, log_config, logBeginner=beginner - ) - - logger = Logger( - namespace="tests.logging.test_terse_json", observer=beginner.observers[0] - ) - logger.info("Hello there, {name}!", name="wally") - - # Trigger the connection - self.pump() + output = StringIO() - _, server = connect_client(self.reactor, 0) + handler = logging.StreamHandler(output) + handler.setFormatter(TerseJsonFormatter()) + logger = self.get_logger(handler) - # Trigger data being sent - self.pump() + logger.info("Hello there, %s!", "wally") - # One log message, with a single trailing newline - logs = server.data.decode("utf8").splitlines() + # One log message, with a single trailing newline. + data = output.getvalue() + logs = data.splitlines() self.assertEqual(len(logs), 1) - self.assertEqual(server.data.count(b"\n"), 1) - + self.assertEqual(data.count("\n"), 1) log = json.loads(logs[0]) # The terse logger should give us these keys. @@ -72,163 +48,74 @@ class TerseJSONTCPTestCase(StructuredLoggingTestBase, HomeserverTestCase): "log", "time", "level", - "log_namespace", - "request", - "scope", - "server_name", - "name", + "namespace", ] self.assertCountEqual(log.keys(), expected_log_keys) + self.assertEqual(log["log"], "Hello there, wally!") - # It contains the data we expect. - self.assertEqual(log["name"], "wally") - - def test_log_backpressure_debug(self): + def test_extra_data(self): """ - When backpressure is hit, DEBUG logs will be shed. + Additional information can be included in the structured logging. """ - log_config = { - "loggers": {"synapse": {"level": "DEBUG"}}, - "drains": { - "tersejson": { - "type": "network_json_terse", - "host": "127.0.0.1", - "port": 8000, - "maximum_buffer": 10, - } - }, - } - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, - self.hs.config, - log_config, - logBeginner=beginner, - redirect_stdlib_logging=False, - ) - - logger = Logger( - namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] - ) + output = StringIO() - # Send some debug messages - for i in range(0, 3): - logger.debug("debug %s" % (i,)) + handler = logging.StreamHandler(output) + handler.setFormatter(TerseJsonFormatter()) + logger = self.get_logger(handler) - # Send a bunch of useful messages - for i in range(0, 7): - logger.info("test message %s" % (i,)) - - # The last debug message pushes it past the maximum buffer - logger.debug("too much debug") - - # Allow the reconnection - _, server = connect_client(self.reactor, 0) - self.pump() - - # Only the 7 infos made it through, the debugs were elided - logs = server.data.splitlines() - self.assertEqual(len(logs), 7) - - def test_log_backpressure_info(self): - """ - When backpressure is hit, DEBUG and INFO logs will be shed. - """ - log_config = { - "loggers": {"synapse": {"level": "DEBUG"}}, - "drains": { - "tersejson": { - "type": "network_json_terse", - "host": "127.0.0.1", - "port": 8000, - "maximum_buffer": 10, - } - }, - } - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, - self.hs.config, - log_config, - logBeginner=beginner, - redirect_stdlib_logging=False, - ) - - logger = Logger( - namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] + logger.info( + "Hello there, %s!", "wally", extra={"foo": "bar", "int": 3, "bool": True} ) - # Send some debug messages - for i in range(0, 3): - logger.debug("debug %s" % (i,)) - - # Send a bunch of useful messages - for i in range(0, 10): - logger.warn("test warn %s" % (i,)) - - # Send a bunch of info messages - for i in range(0, 3): - logger.info("test message %s" % (i,)) - - # The last debug message pushes it past the maximum buffer - logger.debug("too much debug") - - # Allow the reconnection - client, server = connect_client(self.reactor, 0) - self.pump() + # One log message, with a single trailing newline. + data = output.getvalue() + logs = data.splitlines() + self.assertEqual(len(logs), 1) + self.assertEqual(data.count("\n"), 1) + log = json.loads(logs[0]) - # The 10 warnings made it through, the debugs and infos were elided - logs = list(map(json.loads, server.data.decode("utf8").splitlines())) - self.assertEqual(len(logs), 10) + # The terse logger should give us these keys. + expected_log_keys = [ + "log", + "time", + "level", + "namespace", + # The additional keys given via extra. + "foo", + "int", + "bool", + ] + self.assertCountEqual(log.keys(), expected_log_keys) - self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10}) + # Check the values of the extra fields. + self.assertEqual(log["foo"], "bar") + self.assertEqual(log["int"], 3) + self.assertIs(log["bool"], True) - def test_log_backpressure_cut_middle(self): + def test_json_output(self): """ - When backpressure is hit, and no more DEBUG and INFOs cannot be culled, - it will cut the middle messages out. + The Terse JSON formatter converts log messages to JSON. """ - log_config = { - "loggers": {"synapse": {"level": "DEBUG"}}, - "drains": { - "tersejson": { - "type": "network_json_terse", - "host": "127.0.0.1", - "port": 8000, - "maximum_buffer": 10, - } - }, - } - - # Begin the logger with our config - beginner = FakeBeginner() - setup_structured_logging( - self.hs, - self.hs.config, - log_config, - logBeginner=beginner, - redirect_stdlib_logging=False, - ) + output = StringIO() - logger = Logger( - namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] - ) + handler = logging.StreamHandler(output) + handler.setFormatter(JsonFormatter()) + logger = self.get_logger(handler) - # Send a bunch of useful messages - for i in range(0, 20): - logger.warn("test warn", num=i) + logger.info("Hello there, %s!", "wally") - # Allow the reconnection - client, server = connect_client(self.reactor, 0) - self.pump() + # One log message, with a single trailing newline. + data = output.getvalue() + logs = data.splitlines() + self.assertEqual(len(logs), 1) + self.assertEqual(data.count("\n"), 1) + log = json.loads(logs[0]) - # The first five and last five warnings made it through, the debugs and - # infos were elided - logs = list(map(json.loads, server.data.decode("utf8").splitlines())) - self.assertEqual(len(logs), 10) - self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10}) - self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19], [x["num"] for x in logs]) + # The terse logger should give us these keys. + expected_log_keys = [ + "log", + "level", + "namespace", + ] + self.assertCountEqual(log.keys(), expected_log_keys) + self.assertEqual(log["log"], "Hello there, wally!") diff --git a/tests/server.py b/tests/server.py index ea9c22bc51..b97003fa5a 100644 --- a/tests/server.py +++ b/tests/server.py @@ -571,12 +571,10 @@ def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol reactor factory: The connecting factory to build. """ - factory = reactor.tcpClients[client_id][2] + factory = reactor.tcpClients.pop(client_id)[2] client = factory.buildProtocol(None) server = AccumulatingProtocol() server.makeConnection(FakeTransport(client, reactor)) client.makeConnection(FakeTransport(server, reactor)) - reactor.tcpClients.pop(client_id) - return client, server -- cgit 1.5.1 From f21e24ffc22a5eb01f242f47fa30979321cf20fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Oct 2020 15:58:44 +0000 Subject: Add ability for access tokens to belong to one user but grant access to another user. (#8616) We do it this way round so that only the "owner" can delete the access token (i.e. `/logout/all` by the "owner" also deletes that token, but `/logout/all` by the "target user" doesn't). A future PR will add an API for creating such a token. When the target user and authenticated entity are different the `Processed request` log line will be logged with a: `{@admin:server as @bob:server} ...`. I'm not convinced by that format (especially since it adds spaces in there, making it harder to use `cut -d ' '` to chop off the start of log lines). Suggestions welcome. --- changelog.d/8616.misc | 1 + synapse/api/auth.py | 113 +++++++++------------ synapse/appservice/__init__.py | 4 +- synapse/federation/transport/server.py | 2 +- synapse/handlers/auth.py | 8 +- synapse/handlers/register.py | 7 +- synapse/http/site.py | 30 ++++-- synapse/replication/http/membership.py | 6 +- synapse/replication/http/send_event.py | 3 +- synapse/storage/databases/main/registration.py | 48 +++++++-- .../main/schema/delta/58/22puppet_token.sql | 17 ++++ synapse/types.py | 33 ++++-- tests/api/test_auth.py | 29 +++--- tests/api/test_ratelimiting.py | 4 +- tests/appservice/test_appservice.py | 1 + tests/handlers/test_device.py | 2 +- tests/handlers/test_message.py | 2 +- tests/push/test_email.py | 2 +- tests/push/test_http.py | 10 +- tests/replication/test_pusher_shard.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 1 + tests/storage/test_registration.py | 10 +- 22 files changed, 197 insertions(+), 138 deletions(-) create mode 100644 changelog.d/8616.misc create mode 100644 synapse/storage/databases/main/schema/delta/58/22puppet_token.sql (limited to 'tests') diff --git a/changelog.d/8616.misc b/changelog.d/8616.misc new file mode 100644 index 0000000000..385b14063e --- /dev/null +++ b/changelog.d/8616.misc @@ -0,0 +1 @@ +Change schema to support access tokens belonging to one user but granting access to another. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 526cb58c5f..bfcaf68b2a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -33,6 +33,7 @@ from synapse.api.errors import ( from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.logging import opentracing as opentracing +from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import StateMap, UserID from synapse.util.caches.lrucache import LruCache from synapse.util.metrics import Measure @@ -190,10 +191,6 @@ class Auth: user_id, app_service = await self._get_appservice_user_id(request) if user_id: - request.authenticated_entity = user_id - opentracing.set_tag("authenticated_entity", user_id) - opentracing.set_tag("appservice_id", app_service.id) - if ip_addr and self._track_appservice_user_ips: await self.store.insert_client_ip( user_id=user_id, @@ -203,31 +200,38 @@ class Auth: device_id="dummy-device", # stubbed ) - return synapse.types.create_requester(user_id, app_service=app_service) + requester = synapse.types.create_requester( + user_id, app_service=app_service + ) + + request.requester = user_id + opentracing.set_tag("authenticated_entity", user_id) + opentracing.set_tag("user_id", user_id) + opentracing.set_tag("appservice_id", app_service.id) + + return requester user_info = await self.get_user_by_access_token( access_token, rights, allow_expired=allow_expired ) - user = user_info["user"] - token_id = user_info["token_id"] - is_guest = user_info["is_guest"] - shadow_banned = user_info["shadow_banned"] + token_id = user_info.token_id + is_guest = user_info.is_guest + shadow_banned = user_info.shadow_banned # Deny the request if the user account has expired. if self._account_validity.enabled and not allow_expired: - user_id = user.to_string() - if await self.store.is_account_expired(user_id, self.clock.time_msec()): + if await self.store.is_account_expired( + user_info.user_id, self.clock.time_msec() + ): raise AuthError( 403, "User account has expired", errcode=Codes.EXPIRED_ACCOUNT ) - # device_id may not be present if get_user_by_access_token has been - # stubbed out. - device_id = user_info.get("device_id") + device_id = user_info.device_id - if user and access_token and ip_addr: + if access_token and ip_addr: await self.store.insert_client_ip( - user_id=user.to_string(), + user_id=user_info.token_owner, access_token=access_token, ip=ip_addr, user_agent=user_agent, @@ -241,19 +245,23 @@ class Auth: errcode=Codes.GUEST_ACCESS_FORBIDDEN, ) - request.authenticated_entity = user.to_string() - opentracing.set_tag("authenticated_entity", user.to_string()) - if device_id: - opentracing.set_tag("device_id", device_id) - - return synapse.types.create_requester( - user, + requester = synapse.types.create_requester( + user_info.user_id, token_id, is_guest, shadow_banned, device_id, app_service=app_service, + authenticated_entity=user_info.token_owner, ) + + request.requester = requester + opentracing.set_tag("authenticated_entity", user_info.token_owner) + opentracing.set_tag("user_id", user_info.user_id) + if device_id: + opentracing.set_tag("device_id", device_id) + + return requester except KeyError: raise MissingClientTokenError() @@ -284,7 +292,7 @@ class Auth: async def get_user_by_access_token( self, token: str, rights: str = "access", allow_expired: bool = False, - ) -> dict: + ) -> TokenLookupResult: """ Validate access token and get user_id from it Args: @@ -293,13 +301,7 @@ class Auth: allow this allow_expired: If False, raises an InvalidClientTokenError if the token is expired - Returns: - dict that includes: - `user` (UserID) - `is_guest` (bool) - `shadow_banned` (bool) - `token_id` (int|None): access token id. May be None if guest - `device_id` (str|None): device corresponding to access token + Raises: InvalidClientTokenError if a user by that token exists, but the token is expired @@ -309,9 +311,9 @@ class Auth: if rights == "access": # first look in the database - r = await self._look_up_user_by_access_token(token) + r = await self.store.get_user_by_access_token(token) if r: - valid_until_ms = r["valid_until_ms"] + valid_until_ms = r.valid_until_ms if ( not allow_expired and valid_until_ms is not None @@ -328,7 +330,6 @@ class Auth: # otherwise it needs to be a valid macaroon try: user_id, guest = self._parse_and_validate_macaroon(token, rights) - user = UserID.from_string(user_id) if rights == "access": if not guest: @@ -354,23 +355,17 @@ class Auth: raise InvalidClientTokenError( "Guest access token used for regular user" ) - ret = { - "user": user, - "is_guest": True, - "shadow_banned": False, - "token_id": None, + + ret = TokenLookupResult( + user_id=user_id, + is_guest=True, # all guests get the same device id - "device_id": GUEST_DEVICE_ID, - } + device_id=GUEST_DEVICE_ID, + ) elif rights == "delete_pusher": # We don't store these tokens in the database - ret = { - "user": user, - "is_guest": False, - "shadow_banned": False, - "token_id": None, - "device_id": None, - } + + ret = TokenLookupResult(user_id=user_id, is_guest=False) else: raise RuntimeError("Unknown rights setting %s", rights) return ret @@ -479,31 +474,15 @@ class Auth: now = self.hs.get_clock().time_msec() return now < expiry - async def _look_up_user_by_access_token(self, token): - ret = await self.store.get_user_by_access_token(token) - if not ret: - return None - - # we use ret.get() below because *lots* of unit tests stub out - # get_user_by_access_token in a way where it only returns a couple of - # the fields. - user_info = { - "user": UserID.from_string(ret.get("name")), - "token_id": ret.get("token_id", None), - "is_guest": False, - "shadow_banned": ret.get("shadow_banned"), - "device_id": ret.get("device_id"), - "valid_until_ms": ret.get("valid_until_ms"), - } - return user_info - def get_appservice_by_req(self, request): token = self.get_access_token_from_request(request) service = self.store.get_app_service_by_token(token) if not service: logger.warning("Unrecognised appservice access token.") raise InvalidClientTokenError() - request.authenticated_entity = service.sender + request.requester = synapse.types.create_requester( + service.sender, app_service=service + ) return service async def is_server_admin(self, user: UserID) -> bool: diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 3862d9c08f..66d008d2f4 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -52,11 +52,11 @@ class ApplicationService: self, token, hostname, + id, + sender, url=None, namespaces=None, hs_token=None, - sender=None, - id=None, protocols=None, rate_limited=True, ip_range_whitelist=None, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3a6b95631e..a0933fae88 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -154,7 +154,7 @@ class Authenticator: ) logger.debug("Request from %s", origin) - request.authenticated_entity = origin + request.requester = origin # If we get a valid signed request from the other side, its probably # alive diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 276594f3d9..ff103cbb92 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -991,17 +991,17 @@ class AuthHandler(BaseHandler): # This might return an awaitable, if it does block the log out # until it completes. result = provider.on_logged_out( - user_id=str(user_info["user"]), - device_id=user_info["device_id"], + user_id=user_info.user_id, + device_id=user_info.device_id, access_token=access_token, ) if inspect.isawaitable(result): await result # delete pushers associated with this access token - if user_info["token_id"] is not None: + if user_info.token_id is not None: await self.hs.get_pusherpool().remove_pushers_by_access_token( - str(user_info["user"]), (user_info["token_id"],) + user_info.user_id, (user_info.token_id,) ) async def delete_access_tokens_for_user( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a6f1d21674..ed1ff62599 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -115,7 +115,10 @@ class RegistrationHandler(BaseHandler): 400, "User ID already taken.", errcode=Codes.USER_IN_USE ) user_data = await self.auth.get_user_by_access_token(guest_access_token) - if not user_data["is_guest"] or user_data["user"].localpart != localpart: + if ( + not user_data.is_guest + or UserID.from_string(user_data.user_id).localpart != localpart + ): raise AuthError( 403, "Cannot register taken user ID without valid guest " @@ -741,7 +744,7 @@ class RegistrationHandler(BaseHandler): # up when the access token is saved, but that's quite an # invasive change I'd rather do separately. user_tuple = await self.store.get_user_by_access_token(token) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id await self.pusher_pool.add_pusher( user_id=user_id, diff --git a/synapse/http/site.py b/synapse/http/site.py index ddb1770b09..5f0581dc3f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -14,7 +14,7 @@ import contextlib import logging import time -from typing import Optional +from typing import Optional, Union from twisted.python.failure import Failure from twisted.web.server import Request, Site @@ -23,6 +23,7 @@ from synapse.config.server import ListenerConfig from synapse.http import redact_uri from synapse.http.request_metrics import RequestMetrics, requests_counter from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.types import Requester logger = logging.getLogger(__name__) @@ -54,9 +55,12 @@ class SynapseRequest(Request): Request.__init__(self, channel, *args, **kw) self.site = channel.site self._channel = channel # this is used by the tests - self.authenticated_entity = None self.start_time = 0.0 + # The requester, if authenticated. For federation requests this is the + # server name, for client requests this is the Requester object. + self.requester = None # type: Optional[Union[Requester, str]] + # we can't yet create the logcontext, as we don't know the method. self.logcontext = None # type: Optional[LoggingContext] @@ -271,11 +275,23 @@ class SynapseRequest(Request): # to the client (nb may be negative) response_send_time = self.finish_time - self._processing_finished_time - # need to decode as it could be raw utf-8 bytes - # from a IDN servname in an auth header - authenticated_entity = self.authenticated_entity - if authenticated_entity is not None and isinstance(authenticated_entity, bytes): - authenticated_entity = authenticated_entity.decode("utf-8", "replace") + # Convert the requester into a string that we can log + authenticated_entity = None + if isinstance(self.requester, str): + authenticated_entity = self.requester + elif isinstance(self.requester, Requester): + authenticated_entity = self.requester.authenticated_entity + + # If this is a request where the target user doesn't match the user who + # authenticated (e.g. and admin is puppetting a user) then we log both. + if self.requester.user.to_string() != authenticated_entity: + authenticated_entity = "{},{}".format( + authenticated_entity, self.requester.user.to_string(), + ) + elif self.requester is not None: + # This shouldn't happen, but we log it so we don't lose information + # and can see that we're doing something wrong. + authenticated_entity = repr(self.requester) # type: ignore[unreachable] # ...or could be raw utf-8 bytes in the User-Agent header. # N.B. if you don't do this, the logger explodes cryptically diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index e7cc74a5d2..f0c37eaf5e 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -77,8 +77,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): requester = Requester.deserialize(self.store, content["requester"]) - if requester.user: - request.authenticated_entity = requester.user.to_string() + request.requester = requester logger.info("remote_join: %s into room: %s", user_id, room_id) @@ -142,8 +141,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): requester = Requester.deserialize(self.store, content["requester"]) - if requester.user: - request.authenticated_entity = requester.user.to_string() + request.requester = requester # hopefully we're now on the master, so this won't recurse! event_id, stream_id = await self.member_handler.remote_reject_invite( diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index fc129dbaa7..8fa104c8d3 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -115,8 +115,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): ratelimit = content["ratelimit"] extra_users = [UserID.from_string(u) for u in content["extra_users"]] - if requester.user: - request.authenticated_entity = requester.user.to_string() + request.requester = requester logger.info( "Got event to send with ID: %s into room: %s", event.event_id, event.room_id diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index e7b17a7385..e5d07ce72a 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -18,6 +18,8 @@ import logging import re from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +import attr + from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -38,6 +40,35 @@ THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 logger = logging.getLogger(__name__) +@attr.s(frozen=True, slots=True) +class TokenLookupResult: + """Result of looking up an access token. + + Attributes: + user_id: The user that this token authenticates as + is_guest + shadow_banned + token_id: The ID of the access token looked up + device_id: The device associated with the token, if any. + valid_until_ms: The timestamp the token expires, if any. + token_owner: The "owner" of the token. This is either the same as the + user, or a server admin who is logged in as the user. + """ + + user_id = attr.ib(type=str) + is_guest = attr.ib(type=bool, default=False) + shadow_banned = attr.ib(type=bool, default=False) + token_id = attr.ib(type=Optional[int], default=None) + device_id = attr.ib(type=Optional[str], default=None) + valid_until_ms = attr.ib(type=Optional[int], default=None) + token_owner = attr.ib(type=str) + + # Make the token owner default to the user ID, which is the common case. + @token_owner.default + def _default_token_owner(self): + return self.user_id + + class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): super().__init__(database, db_conn, hs) @@ -102,15 +133,13 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): return is_trial @cached() - async def get_user_by_access_token(self, token: str) -> Optional[dict]: + async def get_user_by_access_token(self, token: str) -> Optional[TokenLookupResult]: """Get a user from the given access token. Args: token: The access token of a user. Returns: - None, if the token did not match, otherwise dict - including the keys `name`, `is_guest`, `device_id`, `token_id`, - `valid_until_ms`. + None, if the token did not match, otherwise a `TokenLookupResult` """ return await self.db_pool.runInteraction( "get_user_by_access_token", self._query_for_auth, token @@ -331,23 +360,24 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): await self.db_pool.runInteraction("set_server_admin", set_server_admin_txn) - def _query_for_auth(self, txn, token): + def _query_for_auth(self, txn, token: str) -> Optional[TokenLookupResult]: sql = """ - SELECT users.name, + SELECT users.name as user_id, users.is_guest, users.shadow_banned, access_tokens.id as token_id, access_tokens.device_id, - access_tokens.valid_until_ms + access_tokens.valid_until_ms, + access_tokens.user_id as token_owner FROM users - INNER JOIN access_tokens on users.name = access_tokens.user_id + INNER JOIN access_tokens on users.name = COALESCE(puppets_user_id, access_tokens.user_id) WHERE token = ? """ txn.execute(sql, (token,)) rows = self.db_pool.cursor_to_dict(txn) if rows: - return rows[0] + return TokenLookupResult(**rows[0]) return None diff --git a/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql b/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql new file mode 100644 index 0000000000..00a9431a97 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql @@ -0,0 +1,17 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Whether the access token is an admin token for controlling another user. +ALTER TABLE access_tokens ADD COLUMN puppets_user_id TEXT; diff --git a/synapse/types.py b/synapse/types.py index 5bde67cc07..66bb5bac8d 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -29,6 +29,7 @@ from typing import ( Tuple, Type, TypeVar, + Union, ) import attr @@ -38,6 +39,7 @@ from unpaddedbase64 import decode_base64 from synapse.api.errors import Codes, SynapseError if TYPE_CHECKING: + from synapse.appservice.api import ApplicationService from synapse.storage.databases.main import DataStore # define a version of typing.Collection that works on python 3.5 @@ -74,6 +76,7 @@ class Requester( "shadow_banned", "device_id", "app_service", + "authenticated_entity", ], ) ): @@ -104,6 +107,7 @@ class Requester( "shadow_banned": self.shadow_banned, "device_id": self.device_id, "app_server_id": self.app_service.id if self.app_service else None, + "authenticated_entity": self.authenticated_entity, } @staticmethod @@ -129,16 +133,18 @@ class Requester( shadow_banned=input["shadow_banned"], device_id=input["device_id"], app_service=appservice, + authenticated_entity=input["authenticated_entity"], ) def create_requester( - user_id, - access_token_id=None, - is_guest=False, - shadow_banned=False, - device_id=None, - app_service=None, + user_id: Union[str, "UserID"], + access_token_id: Optional[int] = None, + is_guest: Optional[bool] = False, + shadow_banned: Optional[bool] = False, + device_id: Optional[str] = None, + app_service: Optional["ApplicationService"] = None, + authenticated_entity: Optional[str] = None, ): """ Create a new ``Requester`` object @@ -151,14 +157,27 @@ def create_requester( shadow_banned (bool): True if the user making this request is shadow-banned. device_id (str|None): device_id which was set at authentication time app_service (ApplicationService|None): the AS requesting on behalf of the user + authenticated_entity: The entity that authenticated when making the request. + This is different to the user_id when an admin user or the server is + "puppeting" the user. Returns: Requester """ if not isinstance(user_id, UserID): user_id = UserID.from_string(user_id) + + if authenticated_entity is None: + authenticated_entity = user_id.to_string() + return Requester( - user_id, access_token_id, is_guest, shadow_banned, device_id, app_service + user_id, + access_token_id, + is_guest, + shadow_banned, + device_id, + app_service, + authenticated_entity, ) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index cb6f29d670..0fd55f428a 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -29,6 +29,7 @@ from synapse.api.errors import ( MissingClientTokenError, ResourceLimitError, ) +from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import UserID from tests import unittest @@ -61,7 +62,9 @@ class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_user_by_req_user_valid_token(self): - user_info = {"name": self.test_user, "token_id": "ditto", "device_id": "device"} + user_info = TokenLookupResult( + user_id=self.test_user, token_id=5, device_id="device" + ) self.store.get_user_by_access_token = Mock( return_value=defer.succeed(user_info) ) @@ -84,7 +87,7 @@ class AuthTestCase(unittest.TestCase): self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_user_missing_token(self): - user_info = {"name": self.test_user, "token_id": "ditto"} + user_info = TokenLookupResult(user_id=self.test_user, token_id=5) self.store.get_user_by_access_token = Mock( return_value=defer.succeed(user_info) ) @@ -221,7 +224,7 @@ class AuthTestCase(unittest.TestCase): def test_get_user_from_macaroon(self): self.store.get_user_by_access_token = Mock( return_value=defer.succeed( - {"name": "@baldrick:matrix.org", "device_id": "device"} + TokenLookupResult(user_id="@baldrick:matrix.org", device_id="device") ) ) @@ -237,12 +240,11 @@ class AuthTestCase(unittest.TestCase): user_info = yield defer.ensureDeferred( self.auth.get_user_by_access_token(macaroon.serialize()) ) - user = user_info["user"] - self.assertEqual(UserID.from_string(user_id), user) + self.assertEqual(user_id, user_info.user_id) # TODO: device_id should come from the macaroon, but currently comes # from the db. - self.assertEqual(user_info["device_id"], "device") + self.assertEqual(user_info.device_id, "device") @defer.inlineCallbacks def test_get_guest_user_from_macaroon(self): @@ -264,10 +266,8 @@ class AuthTestCase(unittest.TestCase): user_info = yield defer.ensureDeferred( self.auth.get_user_by_access_token(serialized) ) - user = user_info["user"] - is_guest = user_info["is_guest"] - self.assertEqual(UserID.from_string(user_id), user) - self.assertTrue(is_guest) + self.assertEqual(user_id, user_info.user_id) + self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) @defer.inlineCallbacks @@ -289,12 +289,9 @@ class AuthTestCase(unittest.TestCase): if token != tok: return defer.succeed(None) return defer.succeed( - { - "name": USER_ID, - "is_guest": False, - "token_id": 1234, - "device_id": "DEVICE", - } + TokenLookupResult( + user_id=USER_ID, is_guest=False, token_id=1234, device_id="DEVICE", + ) ) self.store.get_user_by_access_token = get_user diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 1e1f30d790..fe504d0869 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -43,7 +43,7 @@ class TestRatelimiter(unittest.TestCase): def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): appservice = ApplicationService( - None, "example.com", id="foo", rate_limited=True, + None, "example.com", id="foo", rate_limited=True, sender="@as:example.com", ) as_requester = create_requester("@user:example.com", app_service=appservice) @@ -68,7 +68,7 @@ class TestRatelimiter(unittest.TestCase): def test_allowed_appservice_via_can_requester_do_action(self): appservice = ApplicationService( - None, "example.com", id="foo", rate_limited=False, + None, "example.com", id="foo", rate_limited=False, sender="@as:example.com", ) as_requester = create_requester("@user:example.com", app_service=appservice) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 236b608d58..0bffeb1150 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -31,6 +31,7 @@ class ApplicationServiceTestCase(unittest.TestCase): def setUp(self): self.service = ApplicationService( id="unique_identifier", + sender="@as:test", url="some_url", token="some_token", hostname="matrix.org", # only used by get_groups_for_user diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 4512c51311..875aaec2c6 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -289,7 +289,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): # make sure that our device ID has changed user_info = self.get_success(self.auth.get_user_by_access_token(access_token)) - self.assertEqual(user_info["device_id"], retrieved_device_id) + self.assertEqual(user_info.device_id, retrieved_device_id) # make sure the device has the display name that was set from the login res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 9f6f21a6e2..2e0fea04af 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -46,7 +46,7 @@ class EventCreationTestCase(unittest.HomeserverTestCase): self.info = self.get_success( self.hs.get_datastore().get_user_by_access_token(self.access_token,) ) - self.token_id = self.info["token_id"] + self.token_id = self.info.token_id self.requester = create_requester(self.user_id, access_token_id=self.token_id) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index d9993e6245..bcdcafa5a9 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -100,7 +100,7 @@ class EmailPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(self.access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.pusher = self.get_success( self.hs.get_pusherpool().add_pusher( diff --git a/tests/push/test_http.py b/tests/push/test_http.py index b567868b02..8571924b29 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -69,7 +69,7 @@ class HTTPPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( @@ -181,7 +181,7 @@ class HTTPPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( @@ -297,7 +297,7 @@ class HTTPPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( @@ -379,7 +379,7 @@ class HTTPPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( @@ -452,7 +452,7 @@ class HTTPPusherTests(HomeserverTestCase): user_tuple = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 2bdc6edbb1..67c27a089f 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -55,7 +55,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): user_dict = self.get_success( self.hs.get_datastore().get_user_by_access_token(access_token) ) - token_id = user_dict["token_id"] + token_id = user_dict.token_id self.get_success( self.hs.get_pusherpool().add_pusher( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 2fc3a60fc5..98c3887bbf 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -55,6 +55,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.hs.config.server_name, id="1234", namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", ) self.hs.get_datastore().services_cache.append(appservice) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 6b582771fe..c8c7a90e5d 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -69,11 +69,9 @@ class RegistrationStoreTestCase(unittest.TestCase): self.store.get_user_by_access_token(self.tokens[1]) ) - self.assertDictContainsSubset( - {"name": self.user_id, "device_id": self.device_id}, result - ) - - self.assertTrue("token_id" in result) + self.assertEqual(result.user_id, self.user_id) + self.assertEqual(result.device_id, self.device_id) + self.assertIsNotNone(result.token_id) @defer.inlineCallbacks def test_user_delete_access_tokens(self): @@ -105,7 +103,7 @@ class RegistrationStoreTestCase(unittest.TestCase): user = yield defer.ensureDeferred( self.store.get_user_by_access_token(self.tokens[0]) ) - self.assertEqual(self.user_id, user["name"]) + self.assertEqual(self.user_id, user.user_id) # now delete the rest yield defer.ensureDeferred(self.store.user_delete_access_tokens(self.user_id)) -- cgit 1.5.1 From 8b42a4eefda409dfef9a3d10c9f55a778cae46a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 29 Oct 2020 12:53:57 -0400 Subject: Gracefully handle a pending logging connection during shutdown. (#8685) --- changelog.d/8607.feature | 1 + changelog.d/8607.misc | 1 - changelog.d/8685.feature | 1 + synapse/logging/_remote.py | 27 ++++++++++++++++++--------- tests/logging/test_remote_handler.py | 16 ++++++++++++++++ 5 files changed, 36 insertions(+), 10 deletions(-) create mode 100644 changelog.d/8607.feature delete mode 100644 changelog.d/8607.misc create mode 100644 changelog.d/8685.feature (limited to 'tests') diff --git a/changelog.d/8607.feature b/changelog.d/8607.feature new file mode 100644 index 0000000000..fef1eccb92 --- /dev/null +++ b/changelog.d/8607.feature @@ -0,0 +1 @@ +Support generating structured logs via the standard logging configuration. diff --git a/changelog.d/8607.misc b/changelog.d/8607.misc deleted file mode 100644 index 9e56551a34..0000000000 --- a/changelog.d/8607.misc +++ /dev/null @@ -1 +0,0 @@ -Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. diff --git a/changelog.d/8685.feature b/changelog.d/8685.feature new file mode 100644 index 0000000000..fef1eccb92 --- /dev/null +++ b/changelog.d/8685.feature @@ -0,0 +1 @@ +Support generating structured logs via the standard logging configuration. diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index ba45424f02..fb937b3f28 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -26,7 +26,7 @@ from typing_extensions import Deque from zope.interface import implementer from twisted.application.internet import ClientService -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from twisted.internet.endpoints import ( HostnameEndpoint, TCP4ClientEndpoint, @@ -34,6 +34,7 @@ from twisted.internet.endpoints import ( ) from twisted.internet.interfaces import IPushProducer, ITransport from twisted.internet.protocol import Factory, Protocol +from twisted.python.failure import Failure logger = logging.getLogger(__name__) @@ -131,9 +132,11 @@ class RemoteHandler(logging.Handler): factory = Factory.forProtocol(Protocol) self._service = ClientService(endpoint, factory, clock=_reactor) self._service.startService() + self._stopping = False self._connect() def close(self): + self._stopping = True self._service.stopService() def _connect(self) -> None: @@ -146,17 +149,21 @@ class RemoteHandler(logging.Handler): self._connection_waiter = self._service.whenConnected(failAfterFailures=1) - @self._connection_waiter.addErrback - def fail(r): - r.printTraceback(file=sys.__stderr__) + def fail(failure: Failure) -> None: + # If the Deferred was cancelled (e.g. during shutdown) do not try to + # reconnect (this will cause an infinite loop of errors). + if failure.check(CancelledError) and self._stopping: + return + + # For a different error, print the traceback and re-connect. + failure.printTraceback(file=sys.__stderr__) self._connection_waiter = None self._connect() - @self._connection_waiter.addCallback - def writer(r): + def writer(result: Protocol) -> None: # We have a connection. If we already have a producer, and its # transport is the same, just trigger a resumeProducing. - if self._producer and r.transport is self._producer.transport: + if self._producer and result.transport is self._producer.transport: self._producer.resumeProducing() self._connection_waiter = None return @@ -167,12 +174,14 @@ class RemoteHandler(logging.Handler): # Make a new producer and start it. self._producer = LogProducer( - buffer=self._buffer, transport=r.transport, format=self.format, + buffer=self._buffer, transport=result.transport, format=self.format, ) - r.transport.registerProducer(self._producer, True) + result.transport.registerProducer(self._producer, True) self._producer.resumeProducing() self._connection_waiter = None + self._connection_waiter.addCallbacks(writer, fail) + def _handle_pressure(self) -> None: """ Handle backpressure by shedding records. diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 58ee1f2f3c..4bc27a1d7d 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -151,3 +151,19 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): + ["warn %s" % (i,) for i in range(15, 20)], logs, ) + + def test_cancel_connection(self): + """ + Gracefully handle the connection being cancelled. + """ + handler = RemoteHandler( + "127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor + ) + logger = self.get_logger(handler) + + # Send a message. + logger.info("Hello there, %s!", "wally") + + # Do not accept the connection and shutdown. This causes the pending + # connection to be cancelled (and should not raise any exceptions). + handler.close() -- cgit 1.5.1 From ef2d62701573098c1e4aa25196043cc6fea7675c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Oct 2020 18:21:49 +0000 Subject: Fix unit tests (#8689) * Fix unit tests * Newsfile --- changelog.d/8689.feature | 1 + tests/rest/admin/test_user.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8689.feature (limited to 'tests') diff --git a/changelog.d/8689.feature b/changelog.d/8689.feature new file mode 100644 index 0000000000..ed8d926964 --- /dev/null +++ b/changelog.d/8689.feature @@ -0,0 +1 @@ +Add an admin APIs to allow server admins to list users' pushers. Contributed by @dklimpel. \ No newline at end of file diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 0b728e7ffc..7df32e5093 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1206,7 +1206,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): user_tuple = self.get_success( self.store.get_user_by_access_token(other_user_token) ) - token_id = user_tuple["token_id"] + token_id = user_tuple.token_id self.get_success( self.hs.get_pusherpool().add_pusher( -- cgit 1.5.1 From 46f4be94b410776ef3f922af2f437eb17631d2fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Oct 2020 10:55:24 +0000 Subject: Fix race for concurrent downloads of remote media. (#8682) Fixes #6755 --- changelog.d/8682.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 165 +++++++----- synapse/rest/media/v1/media_storage.py | 30 ++- synapse/storage/databases/main/media_repository.py | 27 ++ tests/replication/test_multi_media_repo.py | 277 +++++++++++++++++++++ tests/server.py | 2 +- 6 files changed, 431 insertions(+), 71 deletions(-) create mode 100644 changelog.d/8682.bugfix create mode 100644 tests/replication/test_multi_media_repo.py (limited to 'tests') diff --git a/changelog.d/8682.bugfix b/changelog.d/8682.bugfix new file mode 100644 index 0000000000..e61276aa05 --- /dev/null +++ b/changelog.d/8682.bugfix @@ -0,0 +1 @@ +Fix exception during handling multiple concurrent requests for remote media when using multiple media repositories. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 5cce7237a0..9cac74ebd8 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -305,15 +305,12 @@ class MediaRepository: # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise genereate a new # one. - if media_info: - file_id = media_info["filesystem_id"] - else: - file_id = random_string(24) - - file_info = FileInfo(server_name, file_id) # If we have an entry in the DB, try and look for it if media_info: + file_id = media_info["filesystem_id"] + file_info = FileInfo(server_name, file_id) + if media_info["quarantined_by"]: logger.info("Media is quarantined") raise NotFoundError() @@ -324,14 +321,34 @@ class MediaRepository: # Failed to find the file anywhere, lets download it. - media_info = await self._download_remote_file(server_name, media_id, file_id) + try: + media_info = await self._download_remote_file(server_name, media_id,) + except SynapseError: + raise + except Exception as e: + # An exception may be because we downloaded media in another + # process, so let's check if we magically have the media. + media_info = await self.store.get_cached_remote_media(server_name, media_id) + if not media_info: + raise e + + file_id = media_info["filesystem_id"] + file_info = FileInfo(server_name, file_id) + + # We generate thumbnails even if another process downloaded the media + # as a) it's conceivable that the other download request dies before it + # generates thumbnails, but mainly b) we want to be sure the thumbnails + # have finished being generated before responding to the client, + # otherwise they'll request thumbnails and get a 404 if they're not + # ready yet. + await self._generate_thumbnails( + server_name, media_id, file_id, media_info["media_type"] + ) responder = await self.media_storage.fetch_media(file_info) return responder, media_info - async def _download_remote_file( - self, server_name: str, media_id: str, file_id: str - ) -> dict: + async def _download_remote_file(self, server_name: str, media_id: str,) -> dict: """Attempt to download the remote file from the given server name, using the given file_id as the local id. @@ -346,6 +363,8 @@ class MediaRepository: The media info of the file. """ + file_id = random_string(24) + file_info = FileInfo(server_name=server_name, file_id=file_id) with self.media_storage.store_into_file(file_info) as (f, fname, finish): @@ -401,22 +420,32 @@ class MediaRepository: await finish() - media_type = headers[b"Content-Type"][0].decode("ascii") - upload_name = get_filename_from_headers(headers) - time_now_ms = self.clock.time_msec() + media_type = headers[b"Content-Type"][0].decode("ascii") + upload_name = get_filename_from_headers(headers) + time_now_ms = self.clock.time_msec() + + # Multiple remote media download requests can race (when using + # multiple media repos), so this may throw a violation constraint + # exception. If it does we'll delete the newly downloaded file from + # disk (as we're in the ctx manager). + # + # However: we've already called `finish()` so we may have also + # written to the storage providers. This is preferable to the + # alternative where we call `finish()` *after* this, where we could + # end up having an entry in the DB but fail to write the files to + # the storage providers. + await self.store.store_cached_remote_media( + origin=server_name, + media_id=media_id, + media_type=media_type, + time_now_ms=self.clock.time_msec(), + upload_name=upload_name, + media_length=length, + filesystem_id=file_id, + ) logger.info("Stored remote media in file %r", fname) - await self.store.store_cached_remote_media( - origin=server_name, - media_id=media_id, - media_type=media_type, - time_now_ms=self.clock.time_msec(), - upload_name=upload_name, - media_length=length, - filesystem_id=file_id, - ) - media_info = { "media_type": media_type, "media_length": length, @@ -425,8 +454,6 @@ class MediaRepository: "filesystem_id": file_id, } - await self._generate_thumbnails(server_name, media_id, file_id, media_type) - return media_info def _get_thumbnail_requirements(self, media_type): @@ -692,42 +719,60 @@ class MediaRepository: if not t_byte_source: continue - try: - file_info = FileInfo( - server_name=server_name, - file_id=file_id, - thumbnail=True, - thumbnail_width=t_width, - thumbnail_height=t_height, - thumbnail_method=t_method, - thumbnail_type=t_type, - url_cache=url_cache, - ) - - output_path = await self.media_storage.store_file( - t_byte_source, file_info - ) - finally: - t_byte_source.close() - - t_len = os.path.getsize(output_path) + file_info = FileInfo( + server_name=server_name, + file_id=file_id, + thumbnail=True, + thumbnail_width=t_width, + thumbnail_height=t_height, + thumbnail_method=t_method, + thumbnail_type=t_type, + url_cache=url_cache, + ) - # Write to database - if server_name: - await self.store.store_remote_media_thumbnail( - server_name, - media_id, - file_id, - t_width, - t_height, - t_type, - t_method, - t_len, - ) - else: - await self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, t_len - ) + with self.media_storage.store_into_file(file_info) as (f, fname, finish): + try: + await self.media_storage.write_to_file(t_byte_source, f) + await finish() + finally: + t_byte_source.close() + + t_len = os.path.getsize(fname) + + # Write to database + if server_name: + # Multiple remote media download requests can race (when + # using multiple media repos), so this may throw a violation + # constraint exception. If it does we'll delete the newly + # generated thumbnail from disk (as we're in the ctx + # manager). + # + # However: we've already called `finish()` so we may have + # also written to the storage providers. This is preferable + # to the alternative where we call `finish()` *after* this, + # where we could end up having an entry in the DB but fail + # to write the files to the storage providers. + try: + await self.store.store_remote_media_thumbnail( + server_name, + media_id, + file_id, + t_width, + t_height, + t_type, + t_method, + t_len, + ) + except Exception as e: + thumbnail_exists = await self.store.get_remote_media_thumbnail( + server_name, media_id, t_width, t_height, t_type, + ) + if not thumbnail_exists: + raise e + else: + await self.store.store_local_thumbnail( + media_id, t_width, t_height, t_type, t_method, t_len + ) return {"width": m_width, "height": m_height} diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index a9586fb0b7..268e0c8f50 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -52,6 +52,7 @@ class MediaStorage: storage_providers: Sequence["StorageProviderWrapper"], ): self.hs = hs + self.reactor = hs.get_reactor() self.local_media_directory = local_media_directory self.filepaths = filepaths self.storage_providers = storage_providers @@ -70,13 +71,16 @@ class MediaStorage: with self.store_into_file(file_info) as (f, fname, finish_cb): # Write to the main repository - await defer_to_thread( - self.hs.get_reactor(), _write_file_synchronously, source, f - ) + await self.write_to_file(source, f) await finish_cb() return fname + async def write_to_file(self, source: IO, output: IO): + """Asynchronously write the `source` to `output`. + """ + await defer_to_thread(self.reactor, _write_file_synchronously, source, output) + @contextlib.contextmanager def store_into_file(self, file_info: FileInfo): """Context manager used to get a file like object to write into, as @@ -112,14 +116,20 @@ class MediaStorage: finished_called = [False] - async def finish(): - for provider in self.storage_providers: - await provider.store_file(path, file_info) - - finished_called[0] = True - try: with open(fname, "wb") as f: + + async def finish(): + # Ensure that all writes have been flushed and close the + # file. + f.flush() + f.close() + + for provider in self.storage_providers: + await provider.store_file(path, file_info) + + finished_called[0] = True + yield f, fname, finish except Exception: try: @@ -210,7 +220,7 @@ class MediaStorage: if res: with res: consumer = BackgroundFileConsumer( - open(local_path, "wb"), self.hs.get_reactor() + open(local_path, "wb"), self.reactor ) await res.write_to_consumer(consumer) await consumer.wait() diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index daf57675d8..4b2f224718 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -452,6 +452,33 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_remote_media_thumbnails", ) + async def get_remote_media_thumbnail( + self, origin: str, media_id: str, t_width: int, t_height: int, t_type: str, + ) -> Optional[Dict[str, Any]]: + """Fetch the thumbnail info of given width, height and type. + """ + + return await self.db_pool.simple_select_one( + table="remote_media_cache_thumbnails", + keyvalues={ + "media_origin": origin, + "media_id": media_id, + "thumbnail_width": t_width, + "thumbnail_height": t_height, + "thumbnail_type": t_type, + }, + retcols=( + "thumbnail_width", + "thumbnail_height", + "thumbnail_method", + "thumbnail_type", + "thumbnail_length", + "filesystem_id", + ), + allow_none=True, + desc="get_remote_media_thumbnail", + ) + async def store_remote_media_thumbnail( self, origin, diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py new file mode 100644 index 0000000000..77c261dbf7 --- /dev/null +++ b/tests/replication/test_multi_media_repo.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import os +from binascii import unhexlify +from typing import Tuple + +from twisted.internet.protocol import Factory +from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.web.http import HTTPChannel +from twisted.web.server import Request + +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.server import HomeServer + +from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import FakeChannel, FakeTransport + +logger = logging.getLogger(__name__) + +test_server_connection_factory = None + + +class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks running multiple media repos work correctly. + """ + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + self.reactor.lookups["example.com"] = "127.0.0.2" + + def default_config(self): + conf = super().default_config() + conf["federation_custom_ca_list"] = [get_test_ca_cert_file()] + return conf + + def _get_media_req( + self, hs: HomeServer, target: str, media_id: str + ) -> Tuple[FakeChannel, Request]: + """Request some remote media from the given HS by calling the download + API. + + This then triggers an outbound request from the HS to the target. + + Returns: + The channel for the *client* request and the *outbound* request for + the media which the caller should respond to. + """ + + request, channel = self.make_request( + "GET", + "/{}/{}".format(target, media_id), + shorthand=False, + access_token=self.access_token, + ) + request.render(hs.get_media_repository_resource().children[b"download"]) + self.pump() + + clients = self.reactor.tcpClients + self.assertGreaterEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop() + + # build the test server + server_tls_protocol = _build_test_server(get_connection_factory()) + + # now, tell the client protocol factory to build the client protocol (it will be a + # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an + # HTTP11ClientProtocol) and wire the output of said protocol up to the server via + # a FakeTransport. + # + # Normally this would be done by the TCP socket code in Twisted, but we are + # stubbing that out here. + client_protocol = client_factory.buildProtocol(None) + client_protocol.makeConnection( + FakeTransport(server_tls_protocol, self.reactor, client_protocol) + ) + + # tell the server tls protocol to send its stuff back to the client, too + server_tls_protocol.makeConnection( + FakeTransport(client_protocol, self.reactor, server_tls_protocol) + ) + + # fish the test server back out of the server-side TLS protocol. + http_server = server_tls_protocol.wrappedProtocol + + # give the reactor a pump to get the TLS juices flowing. + self.reactor.pump((0.1,)) + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + + self.assertEqual(request.method, b"GET") + self.assertEqual( + request.path, + "/_matrix/media/r0/download/{}/{}".format(target, media_id).encode("utf-8"), + ) + self.assertEqual( + request.requestHeaders.getRawHeaders(b"host"), [target.encode("utf-8")] + ) + + return channel, request + + def test_basic(self): + """Test basic fetching of remote media from a single worker. + """ + hs1 = self.make_worker_hs("synapse.app.generic_worker") + + channel, request = self._get_media_req(hs1, "example.com:443", "ABC123") + + request.setResponseCode(200) + request.responseHeaders.setRawHeaders(b"Content-Type", [b"text/plain"]) + request.write(b"Hello!") + request.finish() + + self.pump(0.1) + + self.assertEqual(channel.code, 200) + self.assertEqual(channel.result["body"], b"Hello!") + + def test_download_simple_file_race(self): + """Test that fetching remote media from two different processes at the + same time works. + """ + hs1 = self.make_worker_hs("synapse.app.generic_worker") + hs2 = self.make_worker_hs("synapse.app.generic_worker") + + start_count = self._count_remote_media() + + # Make two requests without responding to the outbound media requests. + channel1, request1 = self._get_media_req(hs1, "example.com:443", "ABC123") + channel2, request2 = self._get_media_req(hs2, "example.com:443", "ABC123") + + # Respond to the first outbound media request and check that the client + # request is successful + request1.setResponseCode(200) + request1.responseHeaders.setRawHeaders(b"Content-Type", [b"text/plain"]) + request1.write(b"Hello!") + request1.finish() + + self.pump(0.1) + + self.assertEqual(channel1.code, 200, channel1.result["body"]) + self.assertEqual(channel1.result["body"], b"Hello!") + + # Now respond to the second with the same content. + request2.setResponseCode(200) + request2.responseHeaders.setRawHeaders(b"Content-Type", [b"text/plain"]) + request2.write(b"Hello!") + request2.finish() + + self.pump(0.1) + + self.assertEqual(channel2.code, 200, channel2.result["body"]) + self.assertEqual(channel2.result["body"], b"Hello!") + + # We expect only one new file to have been persisted. + self.assertEqual(start_count + 1, self._count_remote_media()) + + def test_download_image_race(self): + """Test that fetching remote *images* from two different processes at + the same time works. + + This checks that races generating thumbnails are handled correctly. + """ + hs1 = self.make_worker_hs("synapse.app.generic_worker") + hs2 = self.make_worker_hs("synapse.app.generic_worker") + + start_count = self._count_remote_thumbnails() + + channel1, request1 = self._get_media_req(hs1, "example.com:443", "PIC1") + channel2, request2 = self._get_media_req(hs2, "example.com:443", "PIC1") + + png_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + request1.setResponseCode(200) + request1.responseHeaders.setRawHeaders(b"Content-Type", [b"image/png"]) + request1.write(png_data) + request1.finish() + + self.pump(0.1) + + self.assertEqual(channel1.code, 200, channel1.result["body"]) + self.assertEqual(channel1.result["body"], png_data) + + request2.setResponseCode(200) + request2.responseHeaders.setRawHeaders(b"Content-Type", [b"image/png"]) + request2.write(png_data) + request2.finish() + + self.pump(0.1) + + self.assertEqual(channel2.code, 200, channel2.result["body"]) + self.assertEqual(channel2.result["body"], png_data) + + # We expect only three new thumbnails to have been persisted. + self.assertEqual(start_count + 3, self._count_remote_thumbnails()) + + def _count_remote_media(self) -> int: + """Count the number of files in our remote media directory. + """ + path = os.path.join( + self.hs.get_media_repository().primary_base_path, "remote_content" + ) + return sum(len(files) for _, _, files in os.walk(path)) + + def _count_remote_thumbnails(self) -> int: + """Count the number of files in our remote thumbnails directory. + """ + path = os.path.join( + self.hs.get_media_repository().primary_base_path, "remote_thumbnail" + ) + return sum(len(files) for _, _, files in os.walk(path)) + + +def get_connection_factory(): + # this needs to happen once, but not until we are ready to run the first test + global test_server_connection_factory + if test_server_connection_factory is None: + test_server_connection_factory = TestServerTLSConnectionFactory( + sanlist=[b"DNS:example.com"] + ) + return test_server_connection_factory + + +def _build_test_server(connection_creator): + """Construct a test server + + This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol + + Args: + connection_creator (IOpenSSLServerConnectionCreator): thing to build + SSL connections + sanlist (list[bytes]): list of the SAN entries for the cert returned + by the server + + Returns: + TLSMemoryBIOProtocol + """ + server_factory = Factory.forProtocol(HTTPChannel) + # Request.finish expects the factory to have a 'log' method. + server_factory.log = _log_request + + server_tls_factory = TLSMemoryBIOFactory( + connection_creator, isClient=False, wrappedFactory=server_factory + ) + + return server_tls_factory.buildProtocol(None) + + +def _log_request(request): + """Implements Factory.log, which is expected by Request.finish""" + logger.info("Completed request %s", request) diff --git a/tests/server.py b/tests/server.py index b97003fa5a..3dd2cfc072 100644 --- a/tests/server.py +++ b/tests/server.py @@ -46,7 +46,7 @@ class FakeChannel: site = attr.ib(type=Site) _reactor = attr.ib() - result = attr.ib(default=attr.Factory(dict)) + result = attr.ib(type=dict, default=attr.Factory(dict)) _producer = None @property -- cgit 1.5.1 From fd7c7434457e215d73873748604f430c52586498 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 30 Oct 2020 07:15:07 -0400 Subject: Fail test cases if they fail to await all awaitables (#8690) --- changelog.d/8690.misc | 1 + tests/test_utils/__init__.py | 34 +++++++++++++++++++++++++++++++++- tests/unittest.py | 6 +++++- 3 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8690.misc (limited to 'tests') diff --git a/changelog.d/8690.misc b/changelog.d/8690.misc new file mode 100644 index 0000000000..0f38ba1f5d --- /dev/null +++ b/changelog.d/8690.misc @@ -0,0 +1 @@ +Fail tests if they do not await coroutines. diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index a298cc0fd3..d232b72264 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -17,8 +17,10 @@ """ Utilities for running the unit tests """ +import sys +import warnings from asyncio import Future -from typing import Any, Awaitable, TypeVar +from typing import Any, Awaitable, Callable, TypeVar TV = TypeVar("TV") @@ -48,3 +50,33 @@ def make_awaitable(result: Any) -> Awaitable[Any]: future = Future() # type: ignore future.set_result(result) return future + + +def setup_awaitable_errors() -> Callable[[], None]: + """ + Convert warnings from a non-awaited coroutines into errors. + """ + warnings.simplefilter("error", RuntimeWarning) + + # unraisablehook was added in Python 3.8. + if not hasattr(sys, "unraisablehook"): + return lambda: None + + # State shared between unraisablehook and check_for_unraisable_exceptions. + unraisable_exceptions = [] + orig_unraisablehook = sys.unraisablehook # type: ignore + + def unraisablehook(unraisable): + unraisable_exceptions.append(unraisable.exc_value) + + def cleanup(): + """ + A method to be used as a clean-up that fails a test-case if there are any new unraisable exceptions. + """ + sys.unraisablehook = orig_unraisablehook # type: ignore + if unraisable_exceptions: + raise unraisable_exceptions.pop() + + sys.unraisablehook = unraisablehook # type: ignore + + return cleanup diff --git a/tests/unittest.py b/tests/unittest.py index 257f465897..08cf9b10c5 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -54,7 +54,7 @@ from tests.server import ( render, setup_test_homeserver, ) -from tests.test_utils import event_injection +from tests.test_utils import event_injection, setup_awaitable_errors from tests.test_utils.logging_setup import setup_logging from tests.utils import default_config, setupdb @@ -119,6 +119,10 @@ class TestCase(unittest.TestCase): logging.getLogger().setLevel(level) + # Trial messes with the warnings configuration, thus this has to be + # done in the context of an individual TestCase. + self.addCleanup(setup_awaitable_errors()) + return orig() @around(self) -- cgit 1.5.1 From cbc82aa09faa59acc20865e8b5c36561acb9a570 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 30 Oct 2020 11:43:17 +0000 Subject: Implement and use an @lru_cache decorator (#8595) We don't always need the full power of a DeferredCache. --- changelog.d/8595.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 37 +++-- synapse/util/caches/descriptors.py | 235 ++++++++++++++++++++++++------- tests/util/caches/test_descriptors.py | 60 +++++++- 4 files changed, 272 insertions(+), 61 deletions(-) create mode 100644 changelog.d/8595.misc (limited to 'tests') diff --git a/changelog.d/8595.misc b/changelog.d/8595.misc new file mode 100644 index 0000000000..24fab65cda --- /dev/null +++ b/changelog.d/8595.misc @@ -0,0 +1 @@ +Implement and use an @lru_cache decorator. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index d9b5478b53..82a72dc34f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -15,8 +15,8 @@ # limitations under the License. import logging -from collections import namedtuple +import attr from prometheus_client import Counter from synapse.api.constants import EventTypes, Membership, RelationTypes @@ -26,7 +26,8 @@ from synapse.events.snapshot import EventContext from synapse.state import POWER_KEY from synapse.util.async_helpers import Linearizer from synapse.util.caches import register_cache -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import lru_cache +from synapse.util.caches.lrucache import LruCache from .push_rule_evaluator import PushRuleEvaluatorForEvent @@ -120,7 +121,7 @@ class BulkPushRuleEvaluator: dict of user_id -> push_rules """ room_id = event.room_id - rules_for_room = await self._get_rules_for_room(room_id) + rules_for_room = self._get_rules_for_room(room_id) rules_by_user = await rules_for_room.get_rules(event, context) @@ -138,7 +139,7 @@ class BulkPushRuleEvaluator: return rules_by_user - @cached() + @lru_cache() def _get_rules_for_room(self, room_id): """Get the current RulesForRoom object for the given room id @@ -275,12 +276,14 @@ class RulesForRoom: the entire cache for the room. """ - def __init__(self, hs, room_id, rules_for_room_cache, room_push_rule_cache_metrics): + def __init__( + self, hs, room_id, rules_for_room_cache: LruCache, room_push_rule_cache_metrics + ): """ Args: hs (HomeServer) room_id (str) - rules_for_room_cache(Cache): The cache object that caches these + rules_for_room_cache: The cache object that caches these RoomsForUser objects. room_push_rule_cache_metrics (CacheMetric) """ @@ -489,13 +492,21 @@ class RulesForRoom: self.state_group = state_group -class _Invalidation(namedtuple("_Invalidation", ("cache", "room_id"))): - # We rely on _CacheContext implementing __eq__ and __hash__ sensibly, - # which namedtuple does for us (i.e. two _CacheContext are the same if - # their caches and keys match). This is important in particular to - # dedupe when we add callbacks to lru cache nodes, otherwise the number - # of callbacks would grow. +@attr.attrs(slots=True, frozen=True) +class _Invalidation: + # _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules, + # which means that it it is stored on the bulk_get_push_rules cache entry. In order + # to ensure that we don't accumulate lots of redunant callbacks on the cache entry, + # we need to ensure that two _Invalidation objects are "equal" if they refer to the + # same `cache` and `room_id`. + # + # attrs provides suitable __hash__ and __eq__ methods, provided we remember to + # set `frozen=True`. + + cache = attr.ib(type=LruCache) + room_id = attr.ib(type=str) + def __call__(self): - rules = self.cache.get_immediate(self.room_id, None, update_metrics=False) + rules = self.cache.get(self.room_id, None, update_metrics=False) if rules: rules.invalidate_all() diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 5d7fffee66..a924140cdf 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -13,10 +13,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import enum import functools import inspect import logging -from typing import Any, Callable, Generic, Optional, Tuple, TypeVar, Union, cast +from typing import ( + Any, + Callable, + Generic, + Iterable, + Mapping, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) from weakref import WeakValueDictionary from twisted.internet import defer @@ -24,6 +37,7 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -48,7 +62,7 @@ class _CachedFunction(Generic[F]): class _CacheDescriptorBase: - def __init__(self, orig: _CachedFunction, num_args, cache_context=False): + def __init__(self, orig: Callable[..., Any], num_args, cache_context=False): self.orig = orig arg_spec = inspect.getfullargspec(orig) @@ -97,8 +111,107 @@ class _CacheDescriptorBase: self.add_cache_context = cache_context + self.cache_key_builder = get_cache_key_builder( + self.arg_names, self.arg_defaults + ) + + +class _LruCachedFunction(Generic[F]): + cache = None # type: LruCache[CacheKey, Any] + __call__ = None # type: F + + +def lru_cache( + max_entries: int = 1000, cache_context: bool = False, +) -> Callable[[F], _LruCachedFunction[F]]: + """A method decorator that applies a memoizing cache around the function. + + This is more-or-less a drop-in equivalent to functools.lru_cache, although note + that the signature is slightly different. + + The main differences with functools.lru_cache are: + (a) the size of the cache can be controlled via the cache_factor mechanism + (b) the wrapped function can request a "cache_context" which provides a + callback mechanism to indicate that the result is no longer valid + (c) prometheus metrics are exposed automatically. + + The function should take zero or more arguments, which are used as the key for the + cache. Single-argument functions use that argument as the cache key; otherwise the + arguments are built into a tuple. + + Cached functions can be "chained" (i.e. a cached function can call other cached + functions and get appropriately invalidated when they called caches are + invalidated) by adding a special "cache_context" argument to the function + and passing that as a kwarg to all caches called. For example: + + @lru_cache(cache_context=True) + def foo(self, key, cache_context): + r1 = self.bar1(key, on_invalidate=cache_context.invalidate) + r2 = self.bar2(key, on_invalidate=cache_context.invalidate) + return r1 + r2 + + The wrapped function also has a 'cache' property which offers direct access to the + underlying LruCache. + """ + + def func(orig: F) -> _LruCachedFunction[F]: + desc = LruCacheDescriptor( + orig, max_entries=max_entries, cache_context=cache_context, + ) + return cast(_LruCachedFunction[F], desc) + + return func + + +class LruCacheDescriptor(_CacheDescriptorBase): + """Helper for @lru_cache""" + + class _Sentinel(enum.Enum): + sentinel = object() + + def __init__( + self, orig, max_entries: int = 1000, cache_context: bool = False, + ): + super().__init__(orig, num_args=None, cache_context=cache_context) + self.max_entries = max_entries + + def __get__(self, obj, owner): + cache = LruCache( + cache_name=self.orig.__name__, max_size=self.max_entries, + ) # type: LruCache[CacheKey, Any] + + get_cache_key = self.cache_key_builder + sentinel = LruCacheDescriptor._Sentinel.sentinel + + @functools.wraps(self.orig) + def _wrapped(*args, **kwargs): + invalidate_callback = kwargs.pop("on_invalidate", None) + callbacks = (invalidate_callback,) if invalidate_callback else () + + cache_key = get_cache_key(args, kwargs) -class CacheDescriptor(_CacheDescriptorBase): + ret = cache.get(cache_key, default=sentinel, callbacks=callbacks) + if ret != sentinel: + return ret + + # Add our own `cache_context` to argument list if the wrapped function + # has asked for one + if self.add_cache_context: + kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) + + ret2 = self.orig(obj, *args, **kwargs) + cache.set(cache_key, ret2, callbacks=callbacks) + + return ret2 + + wrapped = cast(_CachedFunction, _wrapped) + wrapped.cache = cache + obj.__dict__[self.orig.__name__] = wrapped + + return wrapped + + +class DeferredCacheDescriptor(_CacheDescriptorBase): """ A method decorator that applies a memoizing cache around the function. This caches deferreds, rather than the results themselves. Deferreds that @@ -141,7 +254,6 @@ class CacheDescriptor(_CacheDescriptorBase): cache_context=False, iterable=False, ): - super().__init__(orig, num_args=num_args, cache_context=cache_context) self.max_entries = max_entries @@ -157,41 +269,7 @@ class CacheDescriptor(_CacheDescriptorBase): iterable=self.iterable, ) # type: DeferredCache[CacheKey, Any] - def get_cache_key_gen(args, kwargs): - """Given some args/kwargs return a generator that resolves into - the cache_key. - - We loop through each arg name, looking up if its in the `kwargs`, - otherwise using the next argument in `args`. If there are no more - args then we try looking the arg name up in the defaults - """ - pos = 0 - for nm in self.arg_names: - if nm in kwargs: - yield kwargs[nm] - elif pos < len(args): - yield args[pos] - pos += 1 - else: - yield self.arg_defaults[nm] - - # By default our cache key is a tuple, but if there is only one item - # then don't bother wrapping in a tuple. This is to save memory. - if self.num_args == 1: - nm = self.arg_names[0] - - def get_cache_key(args, kwargs): - if nm in kwargs: - return kwargs[nm] - elif len(args): - return args[0] - else: - return self.arg_defaults[nm] - - else: - - def get_cache_key(args, kwargs): - return tuple(get_cache_key_gen(args, kwargs)) + get_cache_key = self.cache_key_builder @functools.wraps(self.orig) def _wrapped(*args, **kwargs): @@ -223,7 +301,6 @@ class CacheDescriptor(_CacheDescriptorBase): wrapped.prefill = lambda key, val: cache.prefill(key[0], val) else: wrapped.invalidate = cache.invalidate - wrapped.invalidate_all = cache.invalidate_all wrapped.invalidate_many = cache.invalidate_many wrapped.prefill = cache.prefill @@ -236,7 +313,7 @@ class CacheDescriptor(_CacheDescriptorBase): return wrapped -class CacheListDescriptor(_CacheDescriptorBase): +class DeferredCacheListDescriptor(_CacheDescriptorBase): """Wraps an existing cache to support bulk fetching of keys. Given a list of keys it looks in the cache to find any hits, then passes @@ -382,11 +459,13 @@ class _CacheContext: on a lower level. """ + Cache = Union[DeferredCache, LruCache] + _cache_context_objects = ( WeakValueDictionary() - ) # type: WeakValueDictionary[Tuple[DeferredCache, CacheKey], _CacheContext] + ) # type: WeakValueDictionary[Tuple[_CacheContext.Cache, CacheKey], _CacheContext] - def __init__(self, cache, cache_key): # type: (DeferredCache, CacheKey) -> None + def __init__(self, cache: "_CacheContext.Cache", cache_key: CacheKey) -> None: self._cache = cache self._cache_key = cache_key @@ -396,8 +475,8 @@ class _CacheContext: @classmethod def get_instance( - cls, cache, cache_key - ): # type: (DeferredCache, CacheKey) -> _CacheContext + cls, cache: "_CacheContext.Cache", cache_key: CacheKey + ) -> "_CacheContext": """Returns an instance constructed with the given arguments. A new instance is only created if none already exists. @@ -418,7 +497,7 @@ def cached( cache_context: bool = False, iterable: bool = False, ) -> Callable[[F], _CachedFunction[F]]: - func = lambda orig: CacheDescriptor( + func = lambda orig: DeferredCacheDescriptor( orig, max_entries=max_entries, num_args=num_args, @@ -460,7 +539,7 @@ def cachedList( def batch_do_something(self, first_arg, second_args): ... """ - func = lambda orig: CacheListDescriptor( + func = lambda orig: DeferredCacheListDescriptor( orig, cached_method_name=cached_method_name, list_name=list_name, @@ -468,3 +547,65 @@ def cachedList( ) return cast(Callable[[F], _CachedFunction[F]], func) + + +def get_cache_key_builder( + param_names: Sequence[str], param_defaults: Mapping[str, Any] +) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: + """Construct a function which will build cache keys suitable for a cached function + + Args: + param_names: list of formal parameter names for the cached function + param_defaults: a mapping from parameter name to default value for that param + + Returns: + A function which will take an (args, kwargs) pair and return a cache key + """ + + # By default our cache key is a tuple, but if there is only one item + # then don't bother wrapping in a tuple. This is to save memory. + + if len(param_names) == 1: + nm = param_names[0] + + def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: + if nm in kwargs: + return kwargs[nm] + elif len(args): + return args[0] + else: + return param_defaults[nm] + + else: + + def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: + return tuple(_get_cache_key_gen(param_names, param_defaults, args, kwargs)) + + return get_cache_key + + +def _get_cache_key_gen( + param_names: Iterable[str], + param_defaults: Mapping[str, Any], + args: Sequence[Any], + kwargs: Mapping[str, Any], +) -> Iterable[Any]: + """Given some args/kwargs return a generator that resolves into + the cache_key. + + This is essentially the same operation as `inspect.getcallargs`, but optimised so + that we don't need to inspect the target function for each call. + """ + + # We loop through each arg name, looking up if its in the `kwargs`, + # otherwise using the next argument in `args`. If there are no more + # args then we try looking the arg name up in the defaults. + pos = 0 + for nm in param_names: + if nm in kwargs: + yield kwargs[nm] + elif pos < len(args): + yield args[pos] + pos += 1 + else: + yield param_defaults[nm] diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 2ad08f541b..cf1e3203a4 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -29,13 +29,46 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, lru_cache from tests import unittest +from tests.test_utils import get_awaitable_result logger = logging.getLogger(__name__) +class LruCacheDecoratorTestCase(unittest.TestCase): + def test_base(self): + class Cls: + def __init__(self): + self.mock = mock.Mock() + + @lru_cache() + def fn(self, arg1, arg2): + return self.mock(arg1, arg2) + + obj = Cls() + obj.mock.return_value = "fish" + r = obj.fn(1, 2) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, 2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = obj.fn(1, 3) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(1, 3) + obj.mock.reset_mock() + + # the two values should now be cached + r = obj.fn(1, 2) + self.assertEqual(r, "fish") + r = obj.fn(1, 3) + self.assertEqual(r, "chips") + obj.mock.assert_not_called() + + def run_on_reactor(): d = defer.Deferred() reactor.callLater(0, d.callback, 0) @@ -362,6 +395,31 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) + def test_invalidate_cascade(self): + """Invalidations should cascade up through cache contexts""" + + class Cls: + @cached(cache_context=True) + async def func1(self, key, cache_context): + return await self.func2(key, on_invalidate=cache_context.invalidate) + + @cached(cache_context=True) + async def func2(self, key, cache_context): + return self.func3(key, on_invalidate=cache_context.invalidate) + + @lru_cache(cache_context=True) + def func3(self, key, cache_context): + self.invalidate = cache_context.invalidate + return 42 + + obj = Cls() + + top_invalidate = mock.Mock() + r = get_awaitable_result(obj.func1("k1", on_invalidate=top_invalidate)) + self.assertEqual(r, 42) + obj.invalidate() + top_invalidate.assert_called_once() + class CacheDecoratorTestCase(unittest.HomeserverTestCase): """More tests for @cached -- cgit 1.5.1 From 243d427fbcb24c78c2df143767cd4636844fc82e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Nov 2020 12:13:48 +0000 Subject: Block clients from sending server ACLs that lock the local server out. (#8708) Fixes #4042 --- changelog.d/8708.misc | 1 + mypy.ini | 1 + synapse/events/validator.py | 27 +++++++++++++------- synapse/handlers/message.py | 3 +++ tests/handlers/test_message.py | 57 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 80 insertions(+), 9 deletions(-) create mode 100644 changelog.d/8708.misc (limited to 'tests') diff --git a/changelog.d/8708.misc b/changelog.d/8708.misc new file mode 100644 index 0000000000..be679fb0f8 --- /dev/null +++ b/changelog.d/8708.misc @@ -0,0 +1 @@ +Block attempts by clients to send server ACLs, or redactions of server ACLs, that would result in the local server being blocked from the room. diff --git a/mypy.ini b/mypy.ini index 1ece2ba082..fc9f8d8050 100644 --- a/mypy.ini +++ b/mypy.ini @@ -13,6 +13,7 @@ files = synapse/config, synapse/event_auth.py, synapse/events/builder.py, + synapse/events/validator.py, synapse/events/spamcheck.py, synapse/federation, synapse/handlers/_base.py, diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 5f9af8529b..f8f3b1a31e 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -13,20 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union + from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions +from synapse.config.homeserver import HomeServerConfig +from synapse.events import EventBase +from synapse.events.builder import EventBuilder from synapse.events.utils import validate_canonicaljson +from synapse.federation.federation_server import server_matches_acl_event from synapse.types import EventID, RoomID, UserID class EventValidator: - def validate_new(self, event, config): + def validate_new(self, event: EventBase, config: HomeServerConfig): """Validates the event has roughly the right format Args: - event (FrozenEvent): The event to validate. - config (Config): The homeserver's configuration. + event: The event to validate. + config: The homeserver's configuration. """ self.validate_builder(event) @@ -76,12 +82,18 @@ class EventValidator: if event.type == EventTypes.Retention: self._validate_retention(event) - def _validate_retention(self, event): + if event.type == EventTypes.ServerACL: + if not server_matches_acl_event(config.server_name, event): + raise SynapseError( + 400, "Can't create an ACL event that denies the local server" + ) + + def _validate_retention(self, event: EventBase): """Checks that an event that defines the retention policy for a room respects the format enforced by the spec. Args: - event (FrozenEvent): The event to validate. + event: The event to validate. """ if not event.is_state(): raise SynapseError(code=400, msg="must be a state event") @@ -116,13 +128,10 @@ class EventValidator: errcode=Codes.BAD_JSON, ) - def validate_builder(self, event): + def validate_builder(self, event: Union[EventBase, EventBuilder]): """Validates that the builder/event has roughly the right format. Only checks values that we expect a proto event to have, rather than all the fields an event would have - - Args: - event (EventBuilder|FrozenEvent) """ strings = ["room_id", "sender", "type"] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ca5602c13e..c6791fb912 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1138,6 +1138,9 @@ class EventCreationHandler: if original_event.room_id != event.room_id: raise SynapseError(400, "Cannot redact event from a different room") + if original_event.type == EventTypes.ServerACL: + raise AuthError(403, "Redacting server ACL events is not permitted") + prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self.auth.compute_auth_events( event, prev_state_ids, for_verification=True diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 2e0fea04af..8b57081cbe 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -154,3 +154,60 @@ class EventCreationTestCase(unittest.HomeserverTestCase): # Check that we've deduplicated the events. self.assertEqual(len(events), 2) self.assertEqual(events[0].event_id, events[1].event_id) + + +class ServerAclValidationTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("tester", "foobar") + self.access_token = self.login("tester", "foobar") + self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) + + def test_allow_server_acl(self): + """Test that sending an ACL that blocks everyone but ourselves works. + """ + + self.helper.send_state( + self.room_id, + EventTypes.ServerACL, + body={"allow": [self.hs.hostname]}, + tok=self.access_token, + expect_code=200, + ) + + def test_deny_server_acl_block_outselves(self): + """Test that sending an ACL that blocks ourselves does not work. + """ + self.helper.send_state( + self.room_id, + EventTypes.ServerACL, + body={}, + tok=self.access_token, + expect_code=400, + ) + + def test_deny_redact_server_acl(self): + """Test that attempting to redact an ACL is blocked. + """ + + body = self.helper.send_state( + self.room_id, + EventTypes.ServerACL, + body={"allow": [self.hs.hostname]}, + tok=self.access_token, + expect_code=200, + ) + event_id = body["event_id"] + + # Redaction of event should fail. + path = "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room_id, event_id) + request, channel = self.make_request( + "POST", path, content={}, access_token=self.access_token + ) + self.render(request) + self.assertEqual(int(channel.result["code"]), 403) -- cgit 1.5.1 From 6abb1ad0be1cde7cd414eee0e0a1f195462eb1bd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 4 Nov 2020 11:26:05 +0000 Subject: Consolidate purge table lists to prevent desyncronisation (#8713) I idly noticed that these lists were out of sync with each other, causing us to miss a table in a test case (`local_invites`). Let's consolidate this list instead to prevent this from happening in the future. --- changelog.d/8713.misc | 1 + tests/rest/admin/test_room.py | 105 +++++++++++++++--------------------------- 2 files changed, 39 insertions(+), 67 deletions(-) create mode 100644 changelog.d/8713.misc (limited to 'tests') diff --git a/changelog.d/8713.misc b/changelog.d/8713.misc new file mode 100644 index 0000000000..c5d3f3216b --- /dev/null +++ b/changelog.d/8713.misc @@ -0,0 +1 @@ +Consolidate duplicated lists of purged tables that are checked in tests. \ No newline at end of file diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6dfc709dc5..535d68f284 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -531,40 +531,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): def _is_purged(self, room_id): """Test that the following tables have been purged of all rows related to the room. """ - for table in ( - "current_state_events", - "event_backward_extremities", - "event_forward_extremities", - "event_json", - "event_push_actions", - "event_search", - "events", - "group_rooms", - "public_room_list_stream", - "receipts_graph", - "receipts_linearized", - "room_aliases", - "room_depth", - "room_memberships", - "room_stats_state", - "room_stats_current", - "room_stats_historical", - "room_stats_earliest_token", - "rooms", - "stream_ordering_to_exterm", - "users_in_public_rooms", - "users_who_share_private_rooms", - "appservice_room_list", - "e2e_room_keys", - "event_push_summary", - "pusher_throttle", - "group_summary_rooms", - "local_invites", - "room_account_data", - "room_tags", - # "state_groups", # Current impl leaves orphaned state groups around. - "state_groups_state", - ): + for table in PURGE_TABLES: count = self.get_success( self.store.db_pool.simple_select_one_onecol( table=table, @@ -633,39 +600,7 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Test that the following tables have been purged of all rows related to the room. - for table in ( - "current_state_events", - "event_backward_extremities", - "event_forward_extremities", - "event_json", - "event_push_actions", - "event_search", - "events", - "group_rooms", - "public_room_list_stream", - "receipts_graph", - "receipts_linearized", - "room_aliases", - "room_depth", - "room_memberships", - "room_stats_state", - "room_stats_current", - "room_stats_historical", - "room_stats_earliest_token", - "rooms", - "stream_ordering_to_exterm", - "users_in_public_rooms", - "users_who_share_private_rooms", - "appservice_room_list", - "e2e_room_keys", - "event_push_summary", - "pusher_throttle", - "group_summary_rooms", - "room_account_data", - "room_tags", - # "state_groups", # Current impl leaves orphaned state groups around. - "state_groups_state", - ): + for table in PURGE_TABLES: count = self.get_success( self.store.db_pool.simple_select_one_onecol( table=table, @@ -1500,3 +1435,39 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) + + +PURGE_TABLES = [ + "current_state_events", + "event_backward_extremities", + "event_forward_extremities", + "event_json", + "event_push_actions", + "event_search", + "events", + "group_rooms", + "public_room_list_stream", + "receipts_graph", + "receipts_linearized", + "room_aliases", + "room_depth", + "room_memberships", + "room_stats_state", + "room_stats_current", + "room_stats_historical", + "room_stats_earliest_token", + "rooms", + "stream_ordering_to_exterm", + "users_in_public_rooms", + "users_who_share_private_rooms", + "appservice_room_list", + "e2e_room_keys", + "event_push_summary", + "pusher_throttle", + "group_summary_rooms", + "local_invites", + "room_account_data", + "room_tags", + # "state_groups", # Current impl leaves orphaned state groups around. + "state_groups_state", +] -- cgit 1.5.1 From e4676bd8772275833857c803f8fe4025744cec01 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 5 Nov 2020 14:55:45 +0100 Subject: Add `displayname` to Shared-Secret Registration for admins (#8722) Add `displayname` to Shared-Secret Registration for admins to `POST /_synapse/admin/v1/register` --- changelog.d/8722.feature | 1 + docs/admin_api/register_api.rst | 4 +- synapse/rest/admin/users.py | 2 + tests/rest/admin/test_user.py | 121 +++++++++++++++++++++++++++++++++++++++- tests/unittest.py | 19 +++++-- 5 files changed, 138 insertions(+), 9 deletions(-) create mode 100644 changelog.d/8722.feature (limited to 'tests') diff --git a/changelog.d/8722.feature b/changelog.d/8722.feature new file mode 100644 index 0000000000..0413d8838b --- /dev/null +++ b/changelog.d/8722.feature @@ -0,0 +1 @@ +Add `displayname` to Shared-Secret Registration for admins. \ No newline at end of file diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst index 3a63109aa0..c3057b204b 100644 --- a/docs/admin_api/register_api.rst +++ b/docs/admin_api/register_api.rst @@ -18,7 +18,8 @@ To fetch the nonce, you need to request one from the API:: Once you have the nonce, you can make a ``POST`` to the same URL with a JSON body containing the nonce, username, password, whether they are an admin -(optional, False by default), and a HMAC digest of the content. +(optional, False by default), and a HMAC digest of the content. Also you can +set the displayname (optional, ``username`` by default). As an example:: @@ -26,6 +27,7 @@ As an example:: > { "nonce": "thisisanonce", "username": "pepper_roni", + "displayname": "Pepper Roni", "password": "pizza", "admin": true, "mac": "mac_digest_here" diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index b337311a37..3638e219f2 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -412,6 +412,7 @@ class UserRegisterServlet(RestServlet): admin = body.get("admin", None) user_type = body.get("user_type", None) + displayname = body.get("displayname", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") @@ -448,6 +449,7 @@ class UserRegisterServlet(RestServlet): password_hash=password_hash, admin=bool(admin), user_type=user_type, + default_display_name=displayname, by_admin=True, ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 7df32e5093..d74efede06 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -24,7 +24,7 @@ from mock import Mock import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError -from synapse.rest.client.v1 import login, room +from synapse.rest.client.v1 import login, profile, room from synapse.rest.client.v2_alpha import sync from tests import unittest @@ -34,7 +34,10 @@ from tests.unittest import override_config class UserRegisterTestCase(unittest.HomeserverTestCase): - servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + profile.register_servlets, + ] def make_homeserver(self, reactor, clock): @@ -325,6 +328,120 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid user type", channel.json_body["error"]) + def test_displayname(self): + """ + Test that displayname of new user is set + """ + + # set no displayname + request, channel = self.make_request("GET", self.url) + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode("ascii") + b"\x00bob1\x00abc123\x00notadmin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + {"nonce": nonce, "username": "bob1", "password": "abc123", "mac": want_mac} + ) + request, channel = self.make_request("POST", self.url, body.encode("utf8")) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob1:test", channel.json_body["user_id"]) + + request, channel = self.make_request("GET", "/profile/@bob1:test/displayname") + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("bob1", channel.json_body["displayname"]) + + # displayname is None + request, channel = self.make_request("GET", self.url) + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode("ascii") + b"\x00bob2\x00abc123\x00notadmin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": "bob2", + "displayname": None, + "password": "abc123", + "mac": want_mac, + } + ) + request, channel = self.make_request("POST", self.url, body.encode("utf8")) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob2:test", channel.json_body["user_id"]) + + request, channel = self.make_request("GET", "/profile/@bob2:test/displayname") + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("bob2", channel.json_body["displayname"]) + + # displayname is empty + request, channel = self.make_request("GET", self.url) + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode("ascii") + b"\x00bob3\x00abc123\x00notadmin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": "bob3", + "displayname": "", + "password": "abc123", + "mac": want_mac, + } + ) + request, channel = self.make_request("POST", self.url, body.encode("utf8")) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob3:test", channel.json_body["user_id"]) + + request, channel = self.make_request("GET", "/profile/@bob3:test/displayname") + self.render(request) + self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) + + # set displayname + request, channel = self.make_request("GET", self.url) + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update(nonce.encode("ascii") + b"\x00bob4\x00abc123\x00notadmin") + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": "bob4", + "displayname": "Bob's Name", + "password": "abc123", + "mac": want_mac, + } + ) + request, channel = self.make_request("POST", self.url, body.encode("utf8")) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob4:test", channel.json_body["user_id"]) + + request, channel = self.make_request("GET", "/profile/@bob4:test/displayname") + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("Bob's Name", channel.json_body["displayname"]) + @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) diff --git a/tests/unittest.py b/tests/unittest.py index 08cf9b10c5..e36ac89196 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -546,18 +546,24 @@ class HomeserverTestCase(TestCase): return result - def register_user(self, username, password, admin=False): + def register_user( + self, + username: str, + password: str, + admin: Optional[bool] = False, + displayname: Optional[str] = None, + ) -> str: """ Register a user. Requires the Admin API be registered. Args: - username (bytes/unicode): The user part of the new user. - password (bytes/unicode): The password of the new user. - admin (bool): Whether the user should be created as an admin - or not. + username: The user part of the new user. + password: The password of the new user. + admin: Whether the user should be created as an admin or not. + displayname: The displayname of the new user. Returns: - The MXID of the new user (unicode). + The MXID of the new user. """ self.hs.config.registration_shared_secret = "shared" @@ -581,6 +587,7 @@ class HomeserverTestCase(TestCase): { "nonce": nonce, "username": username, + "displayname": displayname, "password": password, "admin": admin, "mac": want_mac, -- cgit 1.5.1 From c3119d1536582c639bf67bf7e3c914935e3bbd7e Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 5 Nov 2020 19:59:12 +0100 Subject: Add an admin API for users' media statistics (#8700) Add `GET /_synapse/admin/v1/statistics/users/media` to get statisics about local media usage by users. Related to #6094 It is the first API for statistics. Goal is to avoid/reduce usage of sql queries like [Wiki analyzing Synapse](https://github.com/matrix-org/synapse/wiki/SQL-for-analyzing-Synapse-PostgreSQL-database-stats) Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/8700.feature | 1 + docs/admin_api/statistics.md | 83 ++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/statistics.py | 122 ++++++++ synapse/storage/databases/main/stats.py | 127 +++++++++ tests/rest/admin/test_statistics.py | 485 ++++++++++++++++++++++++++++++++ 6 files changed, 820 insertions(+) create mode 100644 changelog.d/8700.feature create mode 100644 docs/admin_api/statistics.md create mode 100644 synapse/rest/admin/statistics.py create mode 100644 tests/rest/admin/test_statistics.py (limited to 'tests') diff --git a/changelog.d/8700.feature b/changelog.d/8700.feature new file mode 100644 index 0000000000..47d63dce02 --- /dev/null +++ b/changelog.d/8700.feature @@ -0,0 +1 @@ +Add an admin API for local user media statistics. Contributed by @dklimpel. diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md new file mode 100644 index 0000000000..d398a120fb --- /dev/null +++ b/docs/admin_api/statistics.md @@ -0,0 +1,83 @@ +# Users' media usage statistics + +Returns information about all local media usage of users. Gives the +possibility to filter them by time and user. + +The API is: + +``` +GET /_synapse/admin/v1/statistics/users/media +``` + +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [README.rst](README.rst). + +A response body like the following is returned: + +```json +{ + "users": [ + { + "displayname": "foo_user_0", + "media_count": 2, + "media_length": 134, + "user_id": "@foo_user_0:test" + }, + { + "displayname": "foo_user_1", + "media_count": 2, + "media_length": 134, + "user_id": "@foo_user_1:test" + } + ], + "next_token": 3, + "total": 10 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint +again with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more +reports to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +* `limit`: string representing a positive integer - Is optional but is + used for pagination, denoting the maximum number of items to return + in this call. Defaults to `100`. +* `from`: string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value + and not explicitly set to anything other than the return value of `next_token` from a + previous call. Defaults to `0`. +* `order_by` - string - The method in which to sort the returned list of users. Valid values are: + - `user_id` - Users are ordered alphabetically by `user_id`. This is the default. + - `displayname` - Users are ordered alphabetically by `displayname`. + - `media_length` - Users are ordered by the total size of uploaded media in bytes. + Smallest to largest. + - `media_count` - Users are ordered by number of uploaded media. Smallest to largest. +* `from_ts` - string representing a positive integer - Considers only + files created at this timestamp or later. Unix timestamp in ms. +* `until_ts` - string representing a positive integer - Considers only + files created at this timestamp or earlier. Unix timestamp in ms. +* `search_term` - string - Filter users by their user ID localpart **or** displayname. + The search term can be found in any part of the string. + Defaults to no filtering. +* `dir` - string - Direction of order. Either `f` for forwards or `b` for backwards. + Setting this value to `b` will reverse the above sort order. Defaults to `f`. + + +**Response** + +The following fields are returned in the JSON response body: + +* `users` - An array of objects, each containing information + about the user and their local media. Objects contain the following fields: + - `displayname` - string - Displayname of this user. + - `media_count` - integer - Number of uploaded media by this user. + - `media_length` - integer - Size of uploaded media in bytes by this user. + - `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`). +* `next_token` - integer - Opaque value used for pagination. See above. +* `total` - integer - Total number of users after filtering. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index fa7e9e4043..2a4f7a1740 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -47,6 +47,7 @@ from synapse.rest.admin.rooms import ( ShutdownRoomRestServlet, ) from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet +from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet from synapse.rest.admin.users import ( AccountValidityRenewServlet, DeactivateAccountRestServlet, @@ -227,6 +228,7 @@ def register_servlets(hs, http_server): DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server) + UserMediaStatisticsRestServlet(hs).register(http_server) EventReportDetailRestServlet(hs).register(http_server) EventReportsRestServlet(hs).register(http_server) PushersRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py new file mode 100644 index 0000000000..f2490e382d --- /dev/null +++ b/synapse/rest/admin/statistics.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING, Tuple + +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.site import SynapseRequest +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin +from synapse.storage.databases.main.stats import UserSortOrder +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class UserMediaStatisticsRestServlet(RestServlet): + """ + Get statistics about uploaded media by users. + """ + + PATTERNS = admin_patterns("/statistics/users/media$") + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + order_by = parse_string( + request, "order_by", default=UserSortOrder.USER_ID.value + ) + if order_by not in ( + UserSortOrder.MEDIA_LENGTH.value, + UserSortOrder.MEDIA_COUNT.value, + UserSortOrder.USER_ID.value, + UserSortOrder.DISPLAYNAME.value, + ): + raise SynapseError( + 400, + "Unknown value for order_by: %s" % (order_by,), + errcode=Codes.INVALID_PARAM, + ) + + start = parse_integer(request, "from", default=0) + if start < 0: + raise SynapseError( + 400, + "Query parameter from must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + limit = parse_integer(request, "limit", default=100) + if limit < 0: + raise SynapseError( + 400, + "Query parameter limit must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + from_ts = parse_integer(request, "from_ts", default=0) + if from_ts < 0: + raise SynapseError( + 400, + "Query parameter from_ts must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + until_ts = parse_integer(request, "until_ts") + if until_ts is not None: + if until_ts < 0: + raise SynapseError( + 400, + "Query parameter until_ts must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + if until_ts <= from_ts: + raise SynapseError( + 400, + "Query parameter until_ts must be greater than from_ts.", + errcode=Codes.INVALID_PARAM, + ) + + search_term = parse_string(request, "search_term") + if search_term == "": + raise SynapseError( + 400, + "Query parameter search_term cannot be an empty string.", + errcode=Codes.INVALID_PARAM, + ) + + direction = parse_string(request, "dir", default="f") + if direction not in ("f", "b"): + raise SynapseError( + 400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM + ) + + users_media, total = await self.store.get_users_media_usage_paginate( + start, limit, from_ts, until_ts, order_by, direction, search_term + ) + ret = {"users": users_media, "total": total} + if (start + limit) < total: + ret["next_token"] = start + len(users_media) + + return 200, ret diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 5beb302be3..0cdb3ec1f7 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -16,15 +16,18 @@ import logging from collections import Counter +from enum import Enum from itertools import chain from typing import Any, Dict, List, Optional, Tuple from twisted.internet.defer import DeferredLock from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import StoreError from synapse.storage.database import DatabasePool from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -59,6 +62,23 @@ TYPE_TO_TABLE = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")} +class UserSortOrder(Enum): + """ + Enum to define the sorting method used when returning users + with get_users_media_usage_paginate + + MEDIA_LENGTH = ordered by size of uploaded media. Smallest to largest. + MEDIA_COUNT = ordered by number of uploaded media. Smallest to largest. + USER_ID = ordered alphabetically by `user_id`. + DISPLAYNAME = ordered alphabetically by `displayname` + """ + + MEDIA_LENGTH = "media_length" + MEDIA_COUNT = "media_count" + USER_ID = "user_id" + DISPLAYNAME = "displayname" + + class StatsStore(StateDeltasStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -882,3 +902,110 @@ class StatsStore(StateDeltasStore): complete_with_stream_id=pos, absolute_field_overrides={"joined_rooms": joined_rooms}, ) + + async def get_users_media_usage_paginate( + self, + start: int, + limit: int, + from_ts: Optional[int] = None, + until_ts: Optional[int] = None, + order_by: Optional[UserSortOrder] = UserSortOrder.USER_ID.value, + direction: Optional[str] = "f", + search_term: Optional[str] = None, + ) -> Tuple[List[JsonDict], Dict[str, int]]: + """Function to retrieve a paginated list of users and their uploaded local media + (size and number). This will return a json list of users and the + total number of users matching the filter criteria. + + Args: + start: offset to begin the query from + limit: number of rows to retrieve + from_ts: request only media that are created later than this timestamp (ms) + until_ts: request only media that are created earlier than this timestamp (ms) + order_by: the sort order of the returned list + direction: sort ascending or descending + search_term: a string to filter user names by + Returns: + A list of user dicts and an integer representing the total number of + users that exist given this query + """ + + def get_users_media_usage_paginate_txn(txn): + filters = [] + args = [self.hs.config.server_name] + + if search_term: + filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)") + args.extend(["@%" + search_term + "%:%", "%" + search_term + "%"]) + + if from_ts: + filters.append("created_ts >= ?") + args.extend([from_ts]) + if until_ts: + filters.append("created_ts <= ?") + args.extend([until_ts]) + + # Set ordering + if UserSortOrder(order_by) == UserSortOrder.MEDIA_LENGTH: + order_by_column = "media_length" + elif UserSortOrder(order_by) == UserSortOrder.MEDIA_COUNT: + order_by_column = "media_count" + elif UserSortOrder(order_by) == UserSortOrder.USER_ID: + order_by_column = "lmr.user_id" + elif UserSortOrder(order_by) == UserSortOrder.DISPLAYNAME: + order_by_column = "displayname" + else: + raise StoreError( + 500, "Incorrect value for order_by provided: %s" % order_by + ) + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + + sql_base = """ + FROM local_media_repository as lmr + LEFT JOIN profiles AS p ON lmr.user_id = '@' || p.user_id || ':' || ? + {} + GROUP BY lmr.user_id, displayname + """.format( + where_clause + ) + + # SQLite does not support SELECT COUNT(*) OVER() + sql = """ + SELECT COUNT(*) FROM ( + SELECT lmr.user_id + {sql_base} + ) AS count_user_ids + """.format( + sql_base=sql_base, + ) + txn.execute(sql, args) + count = txn.fetchone()[0] + + sql = """ + SELECT + lmr.user_id, + displayname, + COUNT(lmr.user_id) as media_count, + SUM(media_length) as media_length + {sql_base} + ORDER BY {order_by_column} {order} + LIMIT ? OFFSET ? + """.format( + sql_base=sql_base, order_by_column=order_by_column, order=order, + ) + + args += [limit, start] + txn.execute(sql, args) + users = self.db_pool.cursor_to_dict(txn) + + return users, count + + return await self.db_pool.runInteraction( + "get_users_media_usage_paginate_txn", get_users_media_usage_paginate_txn + ) diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py new file mode 100644 index 0000000000..816683a612 --- /dev/null +++ b/tests/rest/admin/test_statistics.py @@ -0,0 +1,485 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from binascii import unhexlify +from typing import Any, Dict, List, Optional + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login + +from tests import unittest + + +class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + self.media_repo = hs.get_media_repository_resource() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.url = "/_synapse/admin/v1/statistics/users/media" + + def test_no_auth(self): + """ + Try to list users without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error 403 is returned. + """ + request, channel = self.make_request( + "GET", self.url, json.dumps({}), access_token=self.other_user_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_invalid_parameter(self): + """ + If parameters are invalid, an error is returned. + """ + # unkown order_by + request, channel = self.make_request( + "GET", self.url + "?order_by=bar", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # negative from + request, channel = self.make_request( + "GET", self.url + "?from=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # negative limit + request, channel = self.make_request( + "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # negative from_ts + request, channel = self.make_request( + "GET", self.url + "?from_ts=-1234", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # negative until_ts + request, channel = self.make_request( + "GET", self.url + "?until_ts=-1234", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # until_ts smaller from_ts + request, channel = self.make_request( + "GET", + self.url + "?from_ts=10&until_ts=5", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # empty search term + request, channel = self.make_request( + "GET", self.url + "?search_term=", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # invalid search order + request, channel = self.make_request( + "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_limit(self): + """ + Testing list of media with limit + """ + self._create_users_with_media(10, 2) + + request, channel = self.make_request( + "GET", self.url + "?limit=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 10) + self.assertEqual(len(channel.json_body["users"]), 5) + self.assertEqual(channel.json_body["next_token"], 5) + self._check_fields(channel.json_body["users"]) + + def test_from(self): + """ + Testing list of media with a defined starting point (from) + """ + self._create_users_with_media(20, 2) + + request, channel = self.make_request( + "GET", self.url + "?from=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["users"]), 15) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["users"]) + + def test_limit_and_from(self): + """ + Testing list of media with a defined starting point and limit + """ + self._create_users_with_media(20, 2) + + request, channel = self.make_request( + "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(channel.json_body["next_token"], 15) + self.assertEqual(len(channel.json_body["users"]), 10) + self._check_fields(channel.json_body["users"]) + + def test_next_token(self): + """ + Testing that `next_token` appears at the right place + """ + + number_users = 20 + self._create_users_with_media(number_users, 3) + + # `next_token` does not appear + # Number of results is the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=20", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_users) + self.assertEqual(len(channel.json_body["users"]), number_users) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does not appear + # Number of max results is larger than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=21", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_users) + self.assertEqual(len(channel.json_body["users"]), number_users) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does appear + # Number of max results is smaller than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_users) + self.assertEqual(len(channel.json_body["users"]), 19) + self.assertEqual(channel.json_body["next_token"], 19) + + # Set `from` to value of `next_token` for request remaining entries + # Check `next_token` does not appear + request, channel = self.make_request( + "GET", self.url + "?from=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], number_users) + self.assertEqual(len(channel.json_body["users"]), 1) + self.assertNotIn("next_token", channel.json_body) + + def test_no_media(self): + """ + Tests that a normal lookup for statistics is successfully + if users have no media created + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + self.assertEqual(0, len(channel.json_body["users"])) + + def test_order_by(self): + """ + Testing order list with parameter `order_by` + """ + + # create users + self.register_user("user_a", "pass", displayname="UserZ") + userA_tok = self.login("user_a", "pass") + self._create_media(userA_tok, 1) + + self.register_user("user_b", "pass", displayname="UserY") + userB_tok = self.login("user_b", "pass") + self._create_media(userB_tok, 3) + + self.register_user("user_c", "pass", displayname="UserX") + userC_tok = self.login("user_c", "pass") + self._create_media(userC_tok, 2) + + # order by user_id + self._order_test("user_id", ["@user_a:test", "@user_b:test", "@user_c:test"]) + self._order_test( + "user_id", ["@user_a:test", "@user_b:test", "@user_c:test"], "f", + ) + self._order_test( + "user_id", ["@user_c:test", "@user_b:test", "@user_a:test"], "b", + ) + + # order by displayname + self._order_test( + "displayname", ["@user_c:test", "@user_b:test", "@user_a:test"] + ) + self._order_test( + "displayname", ["@user_c:test", "@user_b:test", "@user_a:test"], "f", + ) + self._order_test( + "displayname", ["@user_a:test", "@user_b:test", "@user_c:test"], "b", + ) + + # order by media_length + self._order_test( + "media_length", ["@user_a:test", "@user_c:test", "@user_b:test"], + ) + self._order_test( + "media_length", ["@user_a:test", "@user_c:test", "@user_b:test"], "f", + ) + self._order_test( + "media_length", ["@user_b:test", "@user_c:test", "@user_a:test"], "b", + ) + + # order by media_count + self._order_test( + "media_count", ["@user_a:test", "@user_c:test", "@user_b:test"], + ) + self._order_test( + "media_count", ["@user_a:test", "@user_c:test", "@user_b:test"], "f", + ) + self._order_test( + "media_count", ["@user_b:test", "@user_c:test", "@user_a:test"], "b", + ) + + def test_from_until_ts(self): + """ + Testing filter by time with parameters `from_ts` and `until_ts` + """ + # create media earlier than `ts1` to ensure that `from_ts` is working + self._create_media(self.other_user_tok, 3) + self.pump(1) + ts1 = self.clock.time_msec() + + # list all media when filter is not set + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["users"][0]["media_count"], 3) + + # filter media starting at `ts1` after creating first media + # result is 0 + request, channel = self.make_request( + "GET", self.url + "?from_ts=%s" % (ts1,), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 0) + + self._create_media(self.other_user_tok, 3) + self.pump(1) + ts2 = self.clock.time_msec() + # create media after `ts2` to ensure that `until_ts` is working + self._create_media(self.other_user_tok, 3) + + # filter media between `ts1` and `ts2` + request, channel = self.make_request( + "GET", + self.url + "?from_ts=%s&until_ts=%s" % (ts1, ts2), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["users"][0]["media_count"], 3) + + # filter media until `ts2` and earlier + request, channel = self.make_request( + "GET", self.url + "?until_ts=%s" % (ts2,), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["users"][0]["media_count"], 6) + + def test_search_term(self): + self._create_users_with_media(20, 1) + + # check without filter get all users + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + + # filter user 1 and 10-19 by `user_id` + request, channel = self.make_request( + "GET", + self.url + "?search_term=foo_user_1", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 11) + + # filter on this user in `displayname` + request, channel = self.make_request( + "GET", + self.url + "?search_term=bar_user_10", + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["users"][0]["displayname"], "bar_user_10") + self.assertEqual(channel.json_body["total"], 1) + + # filter and get empty result + request, channel = self.make_request( + "GET", self.url + "?search_term=foobar", access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 0) + + def _create_users_with_media(self, number_users: int, media_per_user: int): + """ + Create a number of users with a number of media + Args: + number_users: Number of users to be created + media_per_user: Number of media to be created for each user + """ + for i in range(number_users): + self.register_user("foo_user_%s" % i, "pass", displayname="bar_user_%s" % i) + user_tok = self.login("foo_user_%s" % i, "pass") + self._create_media(user_tok, media_per_user) + + def _create_media(self, user_token: str, number_media: int): + """ + Create a number of media for a specific user + Args: + user_token: Access token of the user + number_media: Number of media to be created for the user + """ + upload_resource = self.media_repo.children[b"upload"] + for i in range(number_media): + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + self.helper.upload_media( + upload_resource, image_data, tok=user_token, expect_code=200 + ) + + def _check_fields(self, content: List[Dict[str, Any]]): + """Checks that all attributes are present in content + Args: + content: List that is checked for content + """ + for c in content: + self.assertIn("user_id", c) + self.assertIn("displayname", c) + self.assertIn("media_count", c) + self.assertIn("media_length", c) + + def _order_test( + self, order_type: str, expected_user_list: List[str], dir: Optional[str] = None + ): + """Request the list of users in a certain order. Assert that order is what + we expect + Args: + order_type: The type of ordering to give the server + expected_user_list: The list of user_ids in the order we expect to get + back from the server + dir: The direction of ordering to give the server + """ + + url = self.url + "?order_by=%s" % (order_type,) + if dir is not None and dir in ("b", "f"): + url += "&dir=%s" % (dir,) + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], len(expected_user_list)) + + returned_order = [row["user_id"] for row in channel.json_body["users"]] + self.assertListEqual(expected_user_list, returned_order) + self._check_fields(channel.json_body["users"]) -- cgit 1.5.1 From d3523e3e9727f0a81f88e9aa58a8f0fc2b3ee260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Nov 2020 22:34:08 +0000 Subject: pass a Site into RestHelper --- tests/rest/client/v1/utils.py | 11 ++++++----- tests/unittest.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index afaf9f7b85..dc789fbdaa 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -23,6 +23,7 @@ from typing import Any, Dict, Optional import attr from twisted.web.resource import Resource +from twisted.web.server import Site from synapse.api.constants import Membership @@ -36,7 +37,7 @@ class RestHelper: """ hs = attr.ib() - resource = attr.ib() + site = attr.ib(type=Site) auth_user_id = attr.ib() def create_room_as( @@ -54,7 +55,7 @@ class RestHelper: request, channel = make_request( self.hs.get_reactor(), "POST", path, json.dumps(content).encode("utf8") ) - render(request, self.resource, self.hs.get_reactor()) + render(request, self.site.resource, self.hs.get_reactor()) assert channel.result["code"] == b"%d" % expect_code, channel.result self.auth_user_id = temp_id @@ -128,7 +129,7 @@ class RestHelper: self.hs.get_reactor(), "PUT", path, json.dumps(data).encode("utf8") ) - render(request, self.resource, self.hs.get_reactor()) + render(request, self.site.resource, self.hs.get_reactor()) assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" @@ -160,7 +161,7 @@ class RestHelper: request, channel = make_request( self.hs.get_reactor(), "PUT", path, json.dumps(content).encode("utf8") ) - render(request, self.resource, self.hs.get_reactor()) + render(request, self.site.resource, self.hs.get_reactor()) assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" @@ -212,7 +213,7 @@ class RestHelper: request, channel = make_request(self.hs.get_reactor(), method, path, content) - render(request, self.resource, self.hs.get_reactor()) + render(request, self.site.resource, self.hs.get_reactor()) assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" diff --git a/tests/unittest.py b/tests/unittest.py index e36ac89196..0a24c2f6b2 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -253,7 +253,7 @@ class HomeserverTestCase(TestCase): from tests.rest.client.v1.utils import RestHelper - self.helper = RestHelper(self.hs, self.resource, getattr(self, "user_id", None)) + self.helper = RestHelper(self.hs, self.site, getattr(self, "user_id", None)) if hasattr(self, "user_id"): if self.hijack_auth: -- cgit 1.5.1 From 9debe657a39a234d574e949ae8faf3f5ed027c09 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Nov 2020 22:39:09 +0000 Subject: pass a Site into make_request --- tests/rest/client/v1/utils.py | 31 +++++++++++++++++++++++++------ tests/server.py | 16 +++++++++++++++- tests/test_server.py | 40 +++++++++++++++++++++++++++------------- tests/unittest.py | 1 + 4 files changed, 68 insertions(+), 20 deletions(-) (limited to 'tests') diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index dc789fbdaa..60e4b9b846 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -27,7 +27,7 @@ from twisted.web.server import Site from synapse.api.constants import Membership -from tests.server import make_request, render +from tests.server import FakeSite, make_request, render @attr.s @@ -53,7 +53,11 @@ class RestHelper: path = path + "?access_token=%s" % tok request, channel = make_request( - self.hs.get_reactor(), "POST", path, json.dumps(content).encode("utf8") + self.hs.get_reactor(), + self.site, + "POST", + path, + json.dumps(content).encode("utf8"), ) render(request, self.site.resource, self.hs.get_reactor()) @@ -126,7 +130,11 @@ class RestHelper: data.update(extra_data) request, channel = make_request( - self.hs.get_reactor(), "PUT", path, json.dumps(data).encode("utf8") + self.hs.get_reactor(), + self.site, + "PUT", + path, + json.dumps(data).encode("utf8"), ) render(request, self.site.resource, self.hs.get_reactor()) @@ -159,7 +167,11 @@ class RestHelper: path = path + "?access_token=%s" % tok request, channel = make_request( - self.hs.get_reactor(), "PUT", path, json.dumps(content).encode("utf8") + self.hs.get_reactor(), + self.site, + "PUT", + path, + json.dumps(content).encode("utf8"), ) render(request, self.site.resource, self.hs.get_reactor()) @@ -211,7 +223,9 @@ class RestHelper: if body is not None: content = json.dumps(body).encode("utf8") - request, channel = make_request(self.hs.get_reactor(), method, path, content) + request, channel = make_request( + self.hs.get_reactor(), self.site, method, path, content + ) render(request, self.site.resource, self.hs.get_reactor()) @@ -297,7 +311,12 @@ class RestHelper: image_length = len(image_data) path = "/_matrix/media/r0/upload?filename=%s" % (filename,) request, channel = make_request( - self.hs.get_reactor(), "POST", path, content=image_data, access_token=tok + self.hs.get_reactor(), + FakeSite(resource), + "POST", + path, + content=image_data, + access_token=tok, ) request.requestHeaders.addRawHeader( b"Content-Length", str(image_length).encode("UTF-8") diff --git a/tests/server.py b/tests/server.py index 3dd2cfc072..b9ccde4962 100644 --- a/tests/server.py +++ b/tests/server.py @@ -21,6 +21,7 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock from twisted.web.http import unquote from twisted.web.http_headers import Headers +from twisted.web.resource import IResource from twisted.web.server import Site from synapse.http.site import SynapseRequest @@ -128,9 +129,21 @@ class FakeSite: site_tag = "test" access_logger = logging.getLogger("synapse.access.http.fake") + def __init__(self, resource: IResource): + """ + + Args: + resource: the resource to be used for rendering all requests + """ + self._resource = resource + + def getResourceFor(self, request): + return self._resource + def make_request( reactor, + site: Site, method, path, content=b"", @@ -145,6 +158,8 @@ def make_request( content, and return the Request and the Channel underneath. Args: + site: The twisted Site to associate with the Channel + method (bytes/unicode): The HTTP request method ("verb"). path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). @@ -181,7 +196,6 @@ def make_request( if isinstance(content, str): content = content.encode("utf8") - site = FakeSite() channel = FakeChannel(site, reactor) req = request(channel) diff --git a/tests/test_server.py b/tests/test_server.py index 655c918a15..300d13ac95 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -26,6 +26,7 @@ from synapse.util import Clock from tests import unittest from tests.server import ( + FakeSite, ThreadedMemoryReactorClock, make_request, render, @@ -62,7 +63,7 @@ class JsonResourceTests(unittest.TestCase): ) request, channel = make_request( - self.reactor, b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83" + self.reactor, FakeSite(res), b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83" ) render(request, res, self.reactor) @@ -83,7 +84,9 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") + request, channel = make_request( + self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" + ) render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"500") @@ -108,7 +111,9 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") + request, channel = make_request( + self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" + ) render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"500") @@ -127,7 +132,9 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") + request, channel = make_request( + self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" + ) render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"403") @@ -150,7 +157,9 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar") + request, channel = make_request( + self.reactor, FakeSite(res), b"GET", b"/_matrix/foobar" + ) render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"400") @@ -173,7 +182,9 @@ class JsonResourceTests(unittest.TestCase): ) # The path was registered as GET, but this is a HEAD request. - request, channel = make_request(self.reactor, b"HEAD", b"/_matrix/foo") + request, channel = make_request( + self.reactor, FakeSite(res), b"HEAD", b"/_matrix/foo" + ) render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"200") @@ -196,9 +207,6 @@ class OptionsResourceTests(unittest.TestCase): def _make_request(self, method, path): """Create a request from the method/path and return a channel with the response.""" - request, channel = make_request(self.reactor, method, path, shorthand=False) - request.prepath = [] # This doesn't get set properly by make_request. - # Create a site and query for the resource. site = SynapseSite( "test", @@ -207,6 +215,12 @@ class OptionsResourceTests(unittest.TestCase): self.resource, "1.0", ) + + request, channel = make_request( + self.reactor, site, method, path, shorthand=False + ) + request.prepath = [] # This doesn't get set properly by make_request. + request.site = site resource = site.getResourceFor(request) @@ -284,7 +298,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, b"GET", b"/path") + request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"200") @@ -303,7 +317,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, b"GET", b"/path") + request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"301") @@ -325,7 +339,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, b"GET", b"/path") + request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"304") @@ -345,7 +359,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, b"HEAD", b"/path") + request, channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/path") render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"200") diff --git a/tests/unittest.py b/tests/unittest.py index 0a24c2f6b2..8c7979a7c0 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -434,6 +434,7 @@ class HomeserverTestCase(TestCase): return make_request( self.reactor, + self.site, method, path, content, -- cgit 1.5.1 From 70c0d47989b7794766ea957369c77d99664429c5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Nov 2020 23:48:25 +0000 Subject: fix dict handling for make_request() --- tests/server.py | 2 ++ tests/unittest.py | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/tests/server.py b/tests/server.py index b9ccde4962..a74fb3fc67 100644 --- a/tests/server.py +++ b/tests/server.py @@ -193,6 +193,8 @@ def make_request( if not path.startswith(b"/"): path = b"/" + path + if isinstance(content, dict): + content = json.dumps(content).encode("utf8") if isinstance(content, str): content = content.encode("utf8") diff --git a/tests/unittest.py b/tests/unittest.py index 8c7979a7c0..3e656b7b12 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -429,9 +429,6 @@ class HomeserverTestCase(TestCase): Returns: Tuple[synapse.http.site.SynapseRequest, channel] """ - if isinstance(content, dict): - content = json.dumps(content).encode("utf8") - return make_request( self.reactor, self.site, -- cgit 1.5.1 From cfd895a22e7563b6b22141d14d9c8aebf177df44 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 13 Nov 2020 23:11:43 +0000 Subject: use global make_request() directly where we have a custom Resource Where we want to render a request against a specific Resource, call the global make_request() function rather than the one in HomeserverTestCase, allowing us to pass in an appropriate `Site`. --- tests/app/test_frontend_proxy.py | 13 +++---- tests/app/test_openid_listener.py | 17 ++++----- tests/http/test_additional_resource.py | 13 +++---- tests/replication/test_client_reader_shard.py | 29 ++++++++++++---- tests/replication/test_multi_media_repo.py | 10 +++--- tests/replication/test_sharded_event_persister.py | 42 +++++++++++++++++------ tests/rest/admin/test_admin.py | 18 +++++++--- tests/rest/admin/test_media.py | 13 +++++-- tests/rest/client/test_consent.py | 28 +++++++++++---- tests/rest/client/v2_alpha/test_account.py | 14 ++++++-- tests/rest/media/v1/test_media_storage.py | 17 +++++++-- 11 files changed, 154 insertions(+), 60 deletions(-) (limited to 'tests') diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 4a301b84e1..0bac7995e8 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -15,6 +15,7 @@ from synapse.app.generic_worker import GenericWorkerServer +from tests.server import make_request, render from tests.unittest import HomeserverTestCase @@ -55,10 +56,10 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - self.resource = site.resource.children[b"_matrix"].children[b"client"] + resource = site.resource.children[b"_matrix"].children[b"client"] - request, channel = self.make_request("PUT", "presence/a/status") - self.render(request) + request, channel = make_request(self.reactor, site, "PUT", "presence/a/status") + render(request, resource, self.reactor) # 400 + unrecognised, because nothing is registered self.assertEqual(channel.code, 400) @@ -77,10 +78,10 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - self.resource = site.resource.children[b"_matrix"].children[b"client"] + resource = site.resource.children[b"_matrix"].children[b"client"] - request, channel = self.make_request("PUT", "presence/a/status") - self.render(request) + request, channel = make_request(self.reactor, site, "PUT", "presence/a/status") + render(request, resource, self.reactor) # 401, because the stub servlet still checks authentication self.assertEqual(channel.code, 401) diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index c2b10d2c70..1292145890 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -20,6 +20,7 @@ from synapse.app.generic_worker import GenericWorkerServer from synapse.app.homeserver import SynapseHomeServer from synapse.config.server import parse_listener_def +from tests.server import make_request, render from tests.unittest import HomeserverTestCase @@ -66,16 +67,16 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - self.resource = site.resource.children[b"_matrix"].children[b"federation"] + resource = site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise - request, channel = self.make_request( - "GET", "/_matrix/federation/v1/openid/userinfo" + request, channel = make_request( + self.reactor, site, "GET", "/_matrix/federation/v1/openid/userinfo" ) - self.render(request) + render(request, resource, self.reactor) self.assertEqual(channel.code, 401) @@ -115,15 +116,15 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - self.resource = site.resource.children[b"_matrix"].children[b"federation"] + resource = site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise - request, channel = self.make_request( - "GET", "/_matrix/federation/v1/openid/userinfo" + request, channel = make_request( + self.reactor, site, "GET", "/_matrix/federation/v1/openid/userinfo" ) - self.render(request) + render(request, resource, self.reactor) self.assertEqual(channel.code, 401) diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py index 62d36c2906..e835512a41 100644 --- a/tests/http/test_additional_resource.py +++ b/tests/http/test_additional_resource.py @@ -17,6 +17,7 @@ from synapse.http.additional_resource import AdditionalResource from synapse.http.server import respond_with_json +from tests.server import FakeSite, make_request, render from tests.unittest import HomeserverTestCase @@ -43,20 +44,20 @@ class AdditionalResourceTests(HomeserverTestCase): def test_async(self): handler = _AsyncTestCustomEndpoint({}, None).handle_request - self.resource = AdditionalResource(self.hs, handler) + resource = AdditionalResource(self.hs, handler) - request, channel = self.make_request("GET", "/") - self.render(request) + request, channel = make_request(self.reactor, FakeSite(resource), "GET", "/") + render(request, resource, self.reactor) self.assertEqual(request.code, 200) self.assertEqual(channel.json_body, {"some_key": "some_value_async"}) def test_sync(self): handler = _SyncTestCustomEndpoint({}, None).handle_request - self.resource = AdditionalResource(self.hs, handler) + resource = AdditionalResource(self.hs, handler) - request, channel = self.make_request("GET", "/") - self.render(request) + request, channel = make_request(self.reactor, FakeSite(resource), "GET", "/") + render(request, resource, self.reactor) self.assertEqual(request.code, 200) self.assertEqual(channel.json_body, {"some_key": "some_value_sync"}) diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index 86c03fd89c..90172bd377 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -20,7 +20,7 @@ from synapse.rest.client.v2_alpha import register from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.rest.client.v2_alpha.test_auth import DummyRecaptchaChecker -from tests.server import FakeChannel +from tests.server import FakeChannel, make_request logger = logging.getLogger(__name__) @@ -46,8 +46,11 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): """Test that registration works when using a single client reader worker. """ worker_hs = self.make_worker_hs("synapse.app.client_reader") + site = self._hs_to_site[worker_hs] - request_1, channel_1 = self.make_request( + request_1, channel_1 = make_request( + self.reactor, + site, "POST", "register", {"username": "user", "type": "m.login.password", "password": "bar"}, @@ -59,8 +62,12 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): session = channel_1.json_body["session"] # also complete the dummy auth - request_2, channel_2 = self.make_request( - "POST", "register", {"auth": {"session": session, "type": "m.login.dummy"}} + request_2, channel_2 = make_request( + self.reactor, + site, + "POST", + "register", + {"auth": {"session": session, "type": "m.login.dummy"}}, ) # type: SynapseRequest, FakeChannel self.render_on_worker(worker_hs, request_2) self.assertEqual(request_2.code, 200) @@ -74,7 +81,10 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): worker_hs_1 = self.make_worker_hs("synapse.app.client_reader") worker_hs_2 = self.make_worker_hs("synapse.app.client_reader") - request_1, channel_1 = self.make_request( + site_1 = self._hs_to_site[worker_hs_1] + request_1, channel_1 = make_request( + self.reactor, + site_1, "POST", "register", {"username": "user", "type": "m.login.password", "password": "bar"}, @@ -86,8 +96,13 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): session = channel_1.json_body["session"] # also complete the dummy auth - request_2, channel_2 = self.make_request( - "POST", "register", {"auth": {"session": session, "type": "m.login.dummy"}} + site_2 = self._hs_to_site[worker_hs_2] + request_2, channel_2 = make_request( + self.reactor, + site_2, + "POST", + "register", + {"auth": {"session": session, "type": "m.login.dummy"}}, ) # type: SynapseRequest, FakeChannel self.render_on_worker(worker_hs_2, request_2) self.assertEqual(request_2.code, 200) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 77c261dbf7..a9ac4aeec1 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -28,7 +28,7 @@ from synapse.server import HomeServer from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.replication._base import BaseMultiWorkerStreamTestCase -from tests.server import FakeChannel, FakeTransport +from tests.server import FakeChannel, FakeSite, FakeTransport, make_request logger = logging.getLogger(__name__) @@ -67,14 +67,16 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): The channel for the *client* request and the *outbound* request for the media which the caller should respond to. """ - - request, channel = self.make_request( + resource = hs.get_media_repository_resource().children[b"download"] + request, channel = make_request( + self.reactor, + FakeSite(resource), "GET", "/{}/{}".format(target, media_id), shorthand=False, access_token=self.access_token, ) - request.render(hs.get_media_repository_resource().children[b"download"]) + request.render(resource) self.pump() clients = self.reactor.tcpClients diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 82cf033d4e..2820dd622f 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -22,6 +22,7 @@ from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import make_request from tests.utils import USE_POSTGRES_FOR_TESTS logger = logging.getLogger(__name__) @@ -148,6 +149,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): sync_hs = self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "sync"}, ) + sync_hs_site = self._hs_to_site[sync_hs] # Specially selected room IDs that get persisted on different workers. room_id1 = "!foo:test" @@ -178,7 +180,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ) # Do an initial sync so that we're up to date. - request, channel = self.make_request("GET", "/sync", access_token=access_token) + request, channel = make_request( + self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token + ) self.render_on_worker(sync_hs, request) next_batch = channel.json_body["next_batch"] @@ -203,8 +207,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): # Check that syncing still gets the new event, despite the gap in the # stream IDs. - request, channel = self.make_request( - "GET", "/sync?since={}".format(next_batch), access_token=access_token + request, channel = make_request( + self.reactor, + sync_hs_site, + "GET", + "/sync?since={}".format(next_batch), + access_token=access_token, ) self.render_on_worker(sync_hs, request) @@ -230,7 +238,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token) first_event_in_room2 = response["event_id"] - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + sync_hs_site, "GET", "/sync?since={}".format(vector_clock_token), access_token=access_token, @@ -254,8 +264,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token) self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token) - request, channel = self.make_request( - "GET", "/sync?since={}".format(next_batch), access_token=access_token + request, channel = make_request( + self.reactor, + sync_hs_site, + "GET", + "/sync?since={}".format(next_batch), + access_token=access_token, ) self.render_on_worker(sync_hs, request) @@ -269,7 +283,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): # Paginating back in the first room should not produce any results, as # no events have happened in it. This tests that we are correctly # filtering results based on the vector clock portion. - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=b".format( room_id1, prev_batch1, vector_clock_token @@ -281,7 +297,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): # Paginating back on the second room should produce the first event # again. This tests that pagination isn't completely broken. - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=b".format( room_id2, prev_batch2, vector_clock_token @@ -295,7 +313,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ) # Paginating forwards should give the same results - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=f".format( room_id1, vector_clock_token, prev_batch1 @@ -305,7 +325,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): self.render_on_worker(sync_hs, request) self.assertListEqual([], channel.json_body["chunk"]) - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=f".format( room_id2, vector_clock_token, prev_batch2, diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 0f1144fe1e..64b6016729 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -30,6 +30,7 @@ from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import groups from tests import unittest +from tests.server import FakeSite, make_request class VersionTestCase(unittest.HomeserverTestCase): @@ -222,8 +223,13 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): def _ensure_quarantined(self, admin_user_tok, server_and_media_id): """Ensure a piece of media is quarantined when trying to access it.""" - request, channel = self.make_request( - "GET", server_and_media_id, shorthand=False, access_token=admin_user_tok, + request, channel = make_request( + self.reactor, + FakeSite(self.download_resource), + "GET", + server_and_media_id, + shorthand=False, + access_token=admin_user_tok, ) request.render(self.download_resource) self.pump(1.0) @@ -287,7 +293,9 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): server_name, media_id = server_name_and_media_id.split("/") # Attempt to access the media - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(self.download_resource), "GET", server_name_and_media_id, shorthand=False, @@ -462,7 +470,9 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) # Attempt to access each piece of media - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(self.download_resource), "GET", server_and_media_id_2, shorthand=False, diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 721fa1ed51..36e07f1b36 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -23,6 +23,7 @@ from synapse.rest.client.v1 import login, profile, room from synapse.rest.media.v1.filepath import MediaFilePaths from tests import unittest +from tests.server import FakeSite, make_request class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): @@ -124,7 +125,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): self.assertEqual(server_name, self.server_name) # Attempt to access media - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(download_resource), "GET", server_and_media_id, shorthand=False, @@ -161,7 +164,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): ) # Attempt to access media - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(download_resource), "GET", server_and_media_id, shorthand=False, @@ -535,7 +540,9 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): media_id = server_and_media_id.split("/")[1] local_path = self.filepaths.local_media_filepath(media_id) - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(download_resource), "GET", server_and_media_id, shorthand=False, diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 6803b372ac..2931859f25 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -21,7 +21,7 @@ from synapse.rest.client.v1 import login, room from synapse.rest.consent import consent_resource from tests import unittest -from tests.server import render +from tests.server import FakeSite, make_request, render class ConsentResourceTestCase(unittest.HomeserverTestCase): @@ -61,7 +61,9 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): def test_render_public_consent(self): """You can observe the terms form without specifying a user""" resource = consent_resource.ConsentResource(self.hs) - request, channel = self.make_request("GET", "/consent?v=1", shorthand=False) + request, channel = make_request( + self.reactor, FakeSite(resource), "GET", "/consent?v=1", shorthand=False + ) render(request, resource, self.reactor) self.assertEqual(channel.code, 200) @@ -81,8 +83,13 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): uri_builder.build_user_consent_uri(user_id).replace("_matrix/", "") + "&u=user" ) - request, channel = self.make_request( - "GET", consent_uri, access_token=access_token, shorthand=False + request, channel = make_request( + self.reactor, + FakeSite(resource), + "GET", + consent_uri, + access_token=access_token, + shorthand=False, ) render(request, resource, self.reactor) self.assertEqual(channel.code, 200) @@ -92,7 +99,9 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): self.assertEqual(consented, "False") # POST to the consent page, saying we've agreed - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(resource), "POST", consent_uri + "&v=" + version, access_token=access_token, @@ -103,8 +112,13 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): # Fetch the consent page, to get the consent version -- it should have # changed - request, channel = self.make_request( - "GET", consent_uri, access_token=access_token, shorthand=False + request, channel = make_request( + self.reactor, + FakeSite(resource), + "GET", + consent_uri, + access_token=access_token, + shorthand=False, ) render(request, resource, self.reactor) self.assertEqual(channel.code, 200) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 66ac4dbe85..94a627b0a6 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -31,6 +31,7 @@ from synapse.rest.client.v2_alpha import account, register from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from tests import unittest +from tests.server import FakeSite, make_request from tests.unittest import override_config @@ -255,9 +256,16 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): path = link.replace("https://example.com", "") # Load the password reset confirmation page - request, channel = self.make_request("GET", path, shorthand=False) + request, channel = make_request( + self.reactor, + FakeSite(self.submit_token_resource), + "GET", + path, + shorthand=False, + ) request.render(self.submit_token_resource) self.pump() + self.assertEquals(200, channel.code, channel.result) # Now POST to the same endpoint, mimicking the same behaviour as clicking the @@ -271,7 +279,9 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): form_args.append(arg) # Confirm the password reset - request, channel = self.make_request( + request, channel = make_request( + self.reactor, + FakeSite(self.submit_token_resource), "POST", path, content=urlencode(form_args).encode("utf8"), diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 5f897d49cf..0fd31a0096 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -36,6 +36,7 @@ from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend from tests import unittest +from tests.server import FakeSite, make_request class MediaStorageTests(unittest.HomeserverTestCase): @@ -227,7 +228,13 @@ class MediaRepoTests(unittest.HomeserverTestCase): def _req(self, content_disposition): - request, channel = self.make_request("GET", self.media_id, shorthand=False) + request, channel = make_request( + self.reactor, + FakeSite(self.download_resource), + "GET", + self.media_id, + shorthand=False, + ) request.render(self.download_resource) self.pump() @@ -317,8 +324,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): def _test_thumbnail(self, method, expected_body, expected_found): params = "?width=32&height=32&method=" + method - request, channel = self.make_request( - "GET", self.media_id + params, shorthand=False + request, channel = make_request( + self.reactor, + FakeSite(self.thumbnail_resource), + "GET", + self.media_id + params, + shorthand=False, ) request.render(self.thumbnail_resource) self.pump() -- cgit 1.5.1 From ebc405446e6615d6187a2e29cb33f27dd5bd0841 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 16 Nov 2020 14:45:22 +0000 Subject: Add a `custom_headers` param to `make_request` (#8760) Some tests want to set some custom HTTP request headers, so provide a way to do that before calling requestReceived(). --- changelog.d/8760.misc | 1 + tests/rest/client/v1/utils.py | 10 ++++++---- tests/server.py | 11 ++++++++++- tests/storage/test_client_ips.py | 13 +++++++------ 4 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8760.misc (limited to 'tests') diff --git a/changelog.d/8760.misc b/changelog.d/8760.misc new file mode 100644 index 0000000000..54502e9b90 --- /dev/null +++ b/changelog.d/8760.misc @@ -0,0 +1 @@ +Refactor test utilities for injecting HTTP requests. diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index afaf9f7b85..1b2d0497a6 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -296,10 +296,12 @@ class RestHelper: image_length = len(image_data) path = "/_matrix/media/r0/upload?filename=%s" % (filename,) request, channel = make_request( - self.hs.get_reactor(), "POST", path, content=image_data, access_token=tok - ) - request.requestHeaders.addRawHeader( - b"Content-Length", str(image_length).encode("UTF-8") + self.hs.get_reactor(), + "POST", + path, + content=image_data, + access_token=tok, + custom_headers=[(b"Content-Length", str(image_length))], ) request.render(resource) self.hs.get_reactor().pump([100]) diff --git a/tests/server.py b/tests/server.py index 3dd2cfc072..ef03109a6c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -2,7 +2,7 @@ import json import logging from collections import deque from io import SEEK_END, BytesIO -from typing import Callable +from typing import Callable, Iterable, Optional, Tuple, Union import attr from typing_extensions import Deque @@ -139,6 +139,9 @@ def make_request( shorthand=True, federation_auth_origin=None, content_is_form=False, + custom_headers: Optional[ + Iterable[Tuple[Union[bytes, str], Union[bytes, str]]] + ] = None, ): """ Make a web request using the given method and path, feed it the @@ -157,6 +160,8 @@ def make_request( content_is_form: Whether the content is URL encoded form data. Adds the 'Content-Type': 'application/x-www-form-urlencoded' header. + custom_headers: (name, value) pairs to add as request headers + Returns: Tuple[synapse.http.site.SynapseRequest, channel] """ @@ -211,6 +216,10 @@ def make_request( # Assume the body is JSON req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + if custom_headers: + for k, v in custom_headers: + req.requestHeaders.addRawHeader(k, v) + req.requestReceived(method, path, b"1.1") return req, channel diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index e96ca1c8ca..efca43ec78 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -21,6 +21,7 @@ from synapse.http.site import XForwardedForRequest from synapse.rest.client.v1 import login from tests import unittest +from tests.server import make_request from tests.test_utils import make_awaitable from tests.unittest import override_config @@ -408,17 +409,17 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): # Advance to a known time self.reactor.advance(123456 - self.reactor.seconds()) - request, channel = self.make_request( + headers1 = {b"User-Agent": b"Mozzila pizza"} + headers1.update(headers) + + request, channel = make_request( + self.reactor, "GET", "/_matrix/client/r0/admin/users/" + self.user_id, access_token=access_token, + custom_headers=headers1.items(), **make_request_args, ) - request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza") - - # Add the optional headers - for h, v in headers.items(): - request.requestHeaders.addRawHeader(h, v) self.render(request) # Advance so the save loop occurs -- cgit 1.5.1 From 791d7cd6f065e166576538b9cba3d90febf83ea4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 16 Nov 2020 14:45:52 +0000 Subject: Rename `create_test_json_resource` to `create_test_resource` (#8759) The root resource isn't necessarily a JsonResource, so rename this method accordingly, and update a couple of test classes to use the method rather than directly manipulating self.resource. --- changelog.d/8759.misc | 1 + tests/replication/_base.py | 4 ++-- tests/rest/admin/test_admin.py | 2 +- tests/rest/key/v2/test_remote_key_resource.py | 2 +- tests/rest/test_health.py | 6 ++---- tests/rest/test_well_known.py | 6 ++---- tests/unittest.py | 18 +++++++----------- 7 files changed, 16 insertions(+), 23 deletions(-) create mode 100644 changelog.d/8759.misc (limited to 'tests') diff --git a/changelog.d/8759.misc b/changelog.d/8759.misc new file mode 100644 index 0000000000..54502e9b90 --- /dev/null +++ b/changelog.d/8759.misc @@ -0,0 +1 @@ +Refactor test utilities for injecting HTTP requests. diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 5c633ac6df..bc56b13dcd 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -240,8 +240,8 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): lambda: self._handle_http_replication_attempt(self.hs, 8765), ) - def create_test_json_resource(self): - """Overrides `HomeserverTestCase.create_test_json_resource`. + def create_test_resource(self): + """Overrides `HomeserverTestCase.create_test_resource`. """ # We override this so that it automatically registers all the HTTP # replication servlets, without having to explicitly do that in all diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 0f1144fe1e..6804f9337f 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -35,7 +35,7 @@ from tests import unittest class VersionTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/server_version" - def create_test_json_resource(self): + def create_test_resource(self): resource = JsonResource(self.hs) VersionServlet(self.hs).register(resource) return resource diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 6850c666be..6671cbd32d 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -41,7 +41,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): self.http_client = Mock() return self.setup_test_homeserver(http_client=self.http_client) - def create_test_json_resource(self): + def create_test_resource(self): return create_resource_tree( {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource() ) diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index 2d021f6565..f4d06e2200 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -20,11 +20,9 @@ from tests import unittest class HealthCheckTests(unittest.HomeserverTestCase): - def setUp(self): - super().setUp() - + def create_test_resource(self): # replace the JsonResource with a HealthResource. - self.resource = HealthResource() + return HealthResource() def test_health(self): request, channel = self.make_request("GET", "/health", shorthand=False) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index dcd65c2a50..a3746e7130 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -20,11 +20,9 @@ from tests import unittest class WellKnownTests(unittest.HomeserverTestCase): - def setUp(self): - super().setUp() - + def create_test_resource(self): # replace the JsonResource with a WellKnownResource - self.resource = WellKnownResource(self.hs) + return WellKnownResource(self.hs) def test_well_known(self): self.hs.config.public_baseurl = "https://tesths" diff --git a/tests/unittest.py b/tests/unittest.py index e36ac89196..c630760e51 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -30,6 +30,7 @@ from twisted.internet.defer import Deferred, ensureDeferred, succeed from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool from twisted.trial import unittest +from twisted.web.resource import Resource from synapse.api.constants import EventTypes, Membership from synapse.config.homeserver import HomeServerConfig @@ -239,10 +240,8 @@ class HomeserverTestCase(TestCase): if not isinstance(self.hs, HomeServer): raise Exception("A homeserver wasn't returned, but %r" % (self.hs,)) - # Register the resources - self.resource = self.create_test_json_resource() - - # create a site to wrap the resource. + # create the root resource, and a site to wrap it. + self.resource = self.create_test_resource() self.site = SynapseSite( logger_name="synapse.access.http.fake", site_tag=self.hs.config.server.server_name, @@ -323,15 +322,12 @@ class HomeserverTestCase(TestCase): hs = self.setup_test_homeserver() return hs - def create_test_json_resource(self): + def create_test_resource(self) -> Resource: """ - Create a test JsonResource, with the relevant servlets registerd to it - - The default implementation calls each function in `servlets` to do the - registration. + Create a the root resource for the test server. - Returns: - JsonResource: + The default implementation creates a JsonResource and calls each function in + `servlets` to register servletes against it """ resource = JsonResource(self.hs) -- cgit 1.5.1 From c3e3552ec4f677ff51e784f5ac28823eea989ec4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 16 Nov 2020 15:51:47 +0000 Subject: fixup test --- tests/storage/test_client_ips.py | 1 + 1 file changed, 1 insertion(+) (limited to 'tests') diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index efca43ec78..583addb5b5 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -414,6 +414,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): request, channel = make_request( self.reactor, + self.site, "GET", "/_matrix/client/r0/admin/users/" + self.user_id, access_token=access_token, -- cgit 1.5.1 From f125895475aeee9447f3988ecbd8bfd1836545bf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 16 Nov 2020 18:21:47 +0000 Subject: Move `wait_until_result` into `FakeChannel` (#8758) FakeChannel has everything we need, and this more accurately models the real flow. --- changelog.d/8758.misc | 1 + tests/rest/key/v2/test_remote_key_resource.py | 6 ++-- tests/server.py | 42 +++++++++++++-------------- 3 files changed, 24 insertions(+), 25 deletions(-) create mode 100644 changelog.d/8758.misc (limited to 'tests') diff --git a/changelog.d/8758.misc b/changelog.d/8758.misc new file mode 100644 index 0000000000..54502e9b90 --- /dev/null +++ b/changelog.d/8758.misc @@ -0,0 +1 @@ +Refactor test utilities for injecting HTTP requests. diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 6671cbd32d..fbcf8d5b86 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -32,7 +32,7 @@ from synapse.util.httpresourcetree import create_resource_tree from synapse.util.stringutils import random_string from tests import unittest -from tests.server import FakeChannel, wait_until_result +from tests.server import FakeChannel from tests.utils import default_config @@ -94,7 +94,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase): % (server_name.encode("utf-8"), key_id.encode("utf-8")), b"1.1", ) - wait_until_result(self.reactor, req) + channel.await_result() self.assertEqual(channel.code, 200) resp = channel.json_body return resp @@ -190,7 +190,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): req.requestReceived( b"POST", path.encode("utf-8"), b"1.1", ) - wait_until_result(self.reactor, req) + channel.await_result() self.assertEqual(channel.code, 200) resp = channel.json_body return resp diff --git a/tests/server.py b/tests/server.py index ef03109a6c..18cb8b2d72 100644 --- a/tests/server.py +++ b/tests/server.py @@ -117,6 +117,25 @@ class FakeChannel: def transport(self): return self + def await_result(self, timeout: int = 100) -> None: + """ + Wait until the request is finished. + """ + self._reactor.run() + x = 0 + + while not self.result.get("done"): + # If there's a producer, tell it to resume producing so we get content + if self._producer: + self._producer.resumeProducing() + + x += 1 + + if x > timeout: + raise TimedOutException("Timed out waiting for request to finish.") + + self._reactor.advance(0.1) + class FakeSite: """ @@ -225,30 +244,9 @@ def make_request( return req, channel -def wait_until_result(clock, request, timeout=100): - """ - Wait until the request is finished. - """ - clock.run() - x = 0 - - while not request.finished: - - # If there's a producer, tell it to resume producing so we get content - if request._channel._producer: - request._channel._producer.resumeProducing() - - x += 1 - - if x > timeout: - raise TimedOutException("Timed out waiting for request to finish.") - - clock.advance(0.1) - - def render(request, resource, clock): request.render(resource) - wait_until_result(clock, request) + request._channel.await_result() @implementer(IReactorPluggableNameResolver) -- cgit 1.5.1 From 1f41422c98967c969fe8fc093c04308164bde594 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 14 Nov 2020 01:29:16 +0000 Subject: Fix the URL in the URL preview tests the preview resource is mointed at preview_url, not url_preview --- tests/rest/media/v1/test_url_preview.py | 41 ++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 19 deletions(-) (limited to 'tests') diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index c00a7b9114..e00ad61231 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -133,11 +133,14 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.reactor.nameResolver = Resolver() + def create_test_resource(self): + return self.hs.get_media_repository_resource() + def test_cache_returns_correct_type(self): self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False, ) request.render(self.preview_url) self.pump() @@ -160,7 +163,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check the cache returns the correct response request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False ) request.render(self.preview_url) self.pump() @@ -178,7 +181,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check the database cache returns the correct response request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False ) request.render(self.preview_url) self.pump() @@ -201,7 +204,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False, ) request.render(self.preview_url) self.pump() @@ -234,7 +237,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False ) request.render(self.preview_url) self.pump() @@ -267,7 +270,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "url_preview?url=http://matrix.org", shorthand=False + "GET", "preview_url?url=http://matrix.org", shorthand=False ) request.render(self.preview_url) self.pump() @@ -298,7 +301,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -326,7 +329,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -349,7 +352,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -368,7 +371,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blacklisted IP addresses, accessed directly, are not spidered. """ request, channel = self.make_request( - "GET", "url_preview?url=http://192.168.1.1", shorthand=False + "GET", "preview_url?url=http://192.168.1.1", shorthand=False ) request.render(self.preview_url) self.pump() @@ -389,7 +392,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blacklisted IP ranges, accessed directly, are not spidered. """ request, channel = self.make_request( - "GET", "url_preview?url=http://1.1.1.2", shorthand=False + "GET", "preview_url?url=http://1.1.1.2", shorthand=False ) request.render(self.preview_url) self.pump() @@ -411,7 +414,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -446,7 +449,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -468,7 +471,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -491,7 +494,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv6Address, "2001:800::1")] request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -510,7 +513,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): OPTIONS returns the OPTIONS. """ request, channel = self.make_request( - "OPTIONS", "url_preview?url=http://example.com", shorthand=False + "OPTIONS", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -525,7 +528,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Build and make a request to the server request, channel = self.make_request( - "GET", "url_preview?url=http://example.com", shorthand=False + "GET", "preview_url?url=http://example.com", shorthand=False ) request.render(self.preview_url) self.pump() @@ -598,7 +601,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", - "url_preview?url=http://twitter.com/matrixdotorg/status/12345", + "preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, ) request.render(self.preview_url) @@ -663,7 +666,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", - "url_preview?url=http://twitter.com/matrixdotorg/status/12345", + "preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, ) request.render(self.preview_url) -- cgit 1.5.1 From 129ae841e5aebb34a980dd7d118140d08b0ff81d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 15 Nov 2020 22:47:54 +0000 Subject: Make `make_request` actually render the request remove the stubbing out of `request.process`, so that `requestReceived` also renders the request via the appropriate resource. Replace render() with a stub for now. --- tests/replication/test_multi_media_repo.py | 4 +- tests/rest/admin/test_admin.py | 6 --- tests/rest/admin/test_media.py | 6 --- tests/rest/client/v1/utils.py | 4 +- tests/rest/client/v2_alpha/test_account.py | 4 -- tests/rest/media/v1/test_media_storage.py | 7 ++-- tests/rest/media/v1/test_url_preview.py | 66 ++++++++++++++---------------- tests/server.py | 20 +++++---- tests/unittest.py | 8 ++++ 9 files changed, 57 insertions(+), 68 deletions(-) (limited to 'tests') diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index a9ac4aeec1..48b574ccbe 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -68,15 +68,15 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): the media which the caller should respond to. """ resource = hs.get_media_repository_resource().children[b"download"] - request, channel = make_request( + _, channel = make_request( self.reactor, FakeSite(resource), "GET", "/{}/{}".format(target, media_id), shorthand=False, access_token=self.access_token, + await_result=False, ) - request.render(resource) self.pump() clients = self.reactor.tcpClients diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 9e4b0bca53..961a5732b3 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -231,8 +231,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=admin_user_tok, ) - request.render(self.download_resource) - self.pump(1.0) # Should be quarantined self.assertEqual( @@ -301,8 +299,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=non_admin_user_tok, ) - request.render(self.download_resource) - self.pump(1.0) # Should be successful self.assertEqual(200, int(channel.code), msg=channel.result["body"]) @@ -478,8 +474,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=non_admin_user_tok, ) - request.render(self.download_resource) - self.pump(1.0) # Shouldn't be quarantined self.assertEqual( diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 36e07f1b36..64b7aa53ee 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -133,8 +133,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=self.admin_user_tok, ) - request.render(download_resource) - self.pump(1.0) # Should be successful self.assertEqual( @@ -172,8 +170,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=self.admin_user_tok, ) - request.render(download_resource) - self.pump(1.0) self.assertEqual( 404, channel.code, @@ -548,8 +544,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): shorthand=False, access_token=self.admin_user_tok, ) - request.render(download_resource) - self.pump(1.0) if expect_success: self.assertEqual( diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 900852f85b..040a92d6f0 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -310,7 +310,7 @@ class RestHelper: """ image_length = len(image_data) path = "/_matrix/media/r0/upload?filename=%s" % (filename,) - request, channel = make_request( + _, channel = make_request( self.hs.get_reactor(), FakeSite(resource), "POST", @@ -319,8 +319,6 @@ class RestHelper: access_token=tok, custom_headers=[(b"Content-Length", str(image_length))], ) - request.render(resource) - self.hs.get_reactor().pump([100]) assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 94a627b0a6..b871200909 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -263,8 +263,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): path, shorthand=False, ) - request.render(self.submit_token_resource) - self.pump() self.assertEquals(200, channel.code, channel.result) @@ -288,8 +286,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): shorthand=False, content_is_form=True, ) - request.render(self.submit_token_resource) - self.pump() self.assertEquals(200, channel.code, channel.result) def _get_link_from_email(self): diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 0fd31a0096..2a3b2a8f27 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -234,8 +234,8 @@ class MediaRepoTests(unittest.HomeserverTestCase): "GET", self.media_id, shorthand=False, + await_result=False, ) - request.render(self.download_resource) self.pump() # We've made one fetch, to example.com, using the media URL, and asking @@ -330,8 +330,8 @@ class MediaRepoTests(unittest.HomeserverTestCase): "GET", self.media_id + params, shorthand=False, + await_result=False, ) - request.render(self.thumbnail_resource) self.pump() headers = { @@ -359,7 +359,6 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_NOT_FOUND", - "error": "Not found [b'example.com', b'12345?width=32&height=32&method=%s']" - % method, + "error": "Not found [b'example.com', b'12345']", }, ) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index e00ad61231..ccdc8c2ecf 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -140,9 +140,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] request, channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False, + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -165,8 +167,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://matrix.org", shorthand=False ) - request.render(self.preview_url) - self.pump() # Check the cache response has the same content self.assertEqual(channel.code, 200) @@ -183,8 +183,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://matrix.org", shorthand=False ) - request.render(self.preview_url) - self.pump() # Check the cache response has the same content self.assertEqual(channel.code, 200) @@ -204,9 +202,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False, + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -237,9 +237,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -270,9 +272,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request, channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -301,9 +305,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")] request, channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "preview_url?url=http://example.com", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -331,8 +337,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) @@ -354,8 +358,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() self.assertEqual(channel.code, 502) self.assertEqual( @@ -373,8 +375,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://192.168.1.1", shorthand=False ) - request.render(self.preview_url) - self.pump() # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) @@ -394,8 +394,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://1.1.1.2", shorthand=False ) - request.render(self.preview_url) - self.pump() self.assertEqual(channel.code, 403) self.assertEqual( @@ -414,9 +412,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")] request, channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "preview_url?url=http://example.com", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -451,8 +451,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, @@ -473,8 +471,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) @@ -496,8 +492,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() self.assertEqual(channel.code, 502) self.assertEqual( @@ -515,8 +509,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): request, channel = self.make_request( "OPTIONS", "preview_url?url=http://example.com", shorthand=False ) - request.render(self.preview_url) - self.pump() self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {}) @@ -528,9 +520,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Build and make a request to the server request, channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "preview_url?url=http://example.com", + shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() # Extract Synapse's tcp client @@ -603,8 +597,8 @@ class URLPreviewTests(unittest.HomeserverTestCase): "GET", "preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) @@ -668,8 +662,8 @@ class URLPreviewTests(unittest.HomeserverTestCase): "GET", "preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, + await_result=False, ) - request.render(self.preview_url) self.pump() client = self.reactor.tcpClients[0][2].buildProtocol(None) diff --git a/tests/server.py b/tests/server.py index 5a1583a3e7..de7cb1d8b3 100644 --- a/tests/server.py +++ b/tests/server.py @@ -171,16 +171,18 @@ def make_request( shorthand=True, federation_auth_origin=None, content_is_form=False, + await_result: bool = True, custom_headers: Optional[ Iterable[Tuple[Union[bytes, str], Union[bytes, str]]] ] = None, ): """ - Make a web request using the given method and path, feed it the - content, and return the Request and the Channel underneath. + Make a web request using the given method, path and content, and render it + + Returns the Request and the Channel underneath. Args: - site: The twisted Site to associate with the Channel + site: The twisted Site to use to render the request method (bytes/unicode): The HTTP request method ("verb"). path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. @@ -196,6 +198,10 @@ def make_request( custom_headers: (name, value) pairs to add as request headers + await_result: whether to wait for the request to complete rendering. If true, + will pump the reactor until the the renderer tells the channel the request + is finished. + Returns: Tuple[synapse.http.site.SynapseRequest, channel] """ @@ -225,11 +231,9 @@ def make_request( channel = FakeChannel(site, reactor) req = request(channel) - req.process = lambda: b"" req.content = BytesIO(content) # Twisted expects to be at the end of the content when parsing the request. req.content.seek(SEEK_END) - req.postpath = list(map(unquote, path[1:].split(b"/"))) if access_token: req.requestHeaders.addRawHeader( @@ -257,12 +261,14 @@ def make_request( req.requestReceived(method, path, b"1.1") + if await_result: + channel.await_result() + return req, channel def render(request, resource, clock): - request.render(resource) - request._channel.await_result() + pass @implementer(IReactorPluggableNameResolver) diff --git a/tests/unittest.py b/tests/unittest.py index e39cb8dec9..9c7eca3b6e 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -377,6 +377,7 @@ class HomeserverTestCase(TestCase): shorthand: bool = True, federation_auth_origin: str = None, content_is_form: bool = False, + await_result: bool = True, ) -> Tuple[SynapseRequest, FakeChannel]: ... @@ -391,6 +392,7 @@ class HomeserverTestCase(TestCase): shorthand: bool = True, federation_auth_origin: str = None, content_is_form: bool = False, + await_result: bool = True, ) -> Tuple[T, FakeChannel]: ... @@ -404,6 +406,7 @@ class HomeserverTestCase(TestCase): shorthand: bool = True, federation_auth_origin: str = None, content_is_form: bool = False, + await_result: bool = True, ) -> Tuple[T, FakeChannel]: """ Create a SynapseRequest at the path using the method and containing the @@ -422,6 +425,10 @@ class HomeserverTestCase(TestCase): content_is_form: Whether the content is URL encoded form data. Adds the 'Content-Type': 'application/x-www-form-urlencoded' header. + await_result: whether to wait for the request to complete rendering. If + true (the default), will pump the test reactor until the the renderer + tells the channel the request is finished. + Returns: Tuple[synapse.http.site.SynapseRequest, channel] """ @@ -436,6 +443,7 @@ class HomeserverTestCase(TestCase): shorthand, federation_auth_origin, content_is_form, + await_result, ) def render(self, request): -- cgit 1.5.1 From be8fa65d0baddcc0a64954e21d38a854e4ee00d7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 15 Nov 2020 22:49:21 +0000 Subject: Remove redundant calls to `render()` --- tests/app/test_frontend_proxy.py | 10 ++--- tests/app/test_openid_listener.py | 12 +++--- tests/http/test_additional_resource.py | 4 +- tests/replication/_base.py | 5 +-- tests/replication/test_client_reader_shard.py | 4 -- tests/replication/test_sharded_event_persister.py | 8 ---- tests/rest/client/test_consent.py | 6 +-- tests/rest/client/v1/utils.py | 16 +++----- tests/rest/client/v2_alpha/test_sync.py | 6 +-- tests/server.py | 5 --- tests/storage/test_client_ips.py | 3 +- tests/test_server.py | 49 +++++------------------ tests/unittest.py | 10 +---- 13 files changed, 32 insertions(+), 106 deletions(-) (limited to 'tests') diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 0bac7995e8..40abe9d72d 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -15,7 +15,7 @@ from synapse.app.generic_worker import GenericWorkerServer -from tests.server import make_request, render +from tests.server import make_request from tests.unittest import HomeserverTestCase @@ -56,10 +56,8 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - resource = site.resource.children[b"_matrix"].children[b"client"] - request, channel = make_request(self.reactor, site, "PUT", "presence/a/status") - render(request, resource, self.reactor) + _, channel = make_request(self.reactor, site, "PUT", "presence/a/status") # 400 + unrecognised, because nothing is registered self.assertEqual(channel.code, 400) @@ -78,10 +76,8 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - resource = site.resource.children[b"_matrix"].children[b"client"] - request, channel = make_request(self.reactor, site, "PUT", "presence/a/status") - render(request, resource, self.reactor) + _, channel = make_request(self.reactor, site, "PUT", "presence/a/status") # 401, because the stub servlet still checks authentication self.assertEqual(channel.code, 401) diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 1292145890..ea3be95cf1 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -20,7 +20,7 @@ from synapse.app.generic_worker import GenericWorkerServer from synapse.app.homeserver import SynapseHomeServer from synapse.config.server import parse_listener_def -from tests.server import make_request, render +from tests.server import make_request from tests.unittest import HomeserverTestCase @@ -67,16 +67,15 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - resource = site.resource.children[b"_matrix"].children[b"federation"] + site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise - request, channel = make_request( + _, channel = make_request( self.reactor, site, "GET", "/_matrix/federation/v1/openid/userinfo" ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 401) @@ -116,15 +115,14 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - resource = site.resource.children[b"_matrix"].children[b"federation"] + site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise - request, channel = make_request( + _, channel = make_request( self.reactor, site, "GET", "/_matrix/federation/v1/openid/userinfo" ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 401) diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py index e835512a41..05e9c449be 100644 --- a/tests/http/test_additional_resource.py +++ b/tests/http/test_additional_resource.py @@ -17,7 +17,7 @@ from synapse.http.additional_resource import AdditionalResource from synapse.http.server import respond_with_json -from tests.server import FakeSite, make_request, render +from tests.server import FakeSite, make_request from tests.unittest import HomeserverTestCase @@ -47,7 +47,6 @@ class AdditionalResourceTests(HomeserverTestCase): resource = AdditionalResource(self.hs, handler) request, channel = make_request(self.reactor, FakeSite(resource), "GET", "/") - render(request, resource, self.reactor) self.assertEqual(request.code, 200) self.assertEqual(channel.json_body, {"some_key": "some_value_async"}) @@ -57,7 +56,6 @@ class AdditionalResourceTests(HomeserverTestCase): resource = AdditionalResource(self.hs, handler) request, channel = make_request(self.reactor, FakeSite(resource), "GET", "/") - render(request, resource, self.reactor) self.assertEqual(request.code, 200) self.assertEqual(channel.json_body, {"some_key": "some_value_sync"}) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index bc56b13dcd..516db4c30a 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -36,7 +36,7 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.server import FakeTransport, render +from tests.server import FakeTransport try: import hiredis @@ -347,9 +347,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): config["worker_replication_http_port"] = "8765" return config - def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): - render(request, self._hs_to_site[worker_hs].resource, self.reactor) - def replicate(self): """Tell the master side of replication that something has happened, and then wait for the replication to occur. diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index 90172bd377..96801db473 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -55,7 +55,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): "register", {"username": "user", "type": "m.login.password", "password": "bar"}, ) # type: SynapseRequest, FakeChannel - self.render_on_worker(worker_hs, request_1) self.assertEqual(request_1.code, 401) # Grab the session @@ -69,7 +68,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): "register", {"auth": {"session": session, "type": "m.login.dummy"}}, ) # type: SynapseRequest, FakeChannel - self.render_on_worker(worker_hs, request_2) self.assertEqual(request_2.code, 200) # We're given a registered user. @@ -89,7 +87,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): "register", {"username": "user", "type": "m.login.password", "password": "bar"}, ) # type: SynapseRequest, FakeChannel - self.render_on_worker(worker_hs_1, request_1) self.assertEqual(request_1.code, 401) # Grab the session @@ -104,7 +101,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): "register", {"auth": {"session": session, "type": "m.login.dummy"}}, ) # type: SynapseRequest, FakeChannel - self.render_on_worker(worker_hs_2, request_2) self.assertEqual(request_2.code, 200) # We're given a registered user. diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 2820dd622f..77fc3856d5 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -183,7 +183,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): request, channel = make_request( self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token ) - self.render_on_worker(sync_hs, request) next_batch = channel.json_body["next_batch"] # We now gut wrench into the events stream MultiWriterIdGenerator on @@ -214,7 +213,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): "/sync?since={}".format(next_batch), access_token=access_token, ) - self.render_on_worker(sync_hs, request) # We should only see the new event and nothing else self.assertIn(room_id1, channel.json_body["rooms"]["join"]) @@ -245,7 +243,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): "/sync?since={}".format(vector_clock_token), access_token=access_token, ) - self.render_on_worker(sync_hs, request) self.assertNotIn(room_id1, channel.json_body["rooms"]["join"]) self.assertIn(room_id2, channel.json_body["rooms"]["join"]) @@ -271,7 +268,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): "/sync?since={}".format(next_batch), access_token=access_token, ) - self.render_on_worker(sync_hs, request) prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][ "prev_batch" @@ -292,7 +288,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ), access_token=access_token, ) - self.render_on_worker(sync_hs, request) self.assertListEqual([], channel.json_body["chunk"]) # Paginating back on the second room should produce the first event @@ -306,7 +301,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ), access_token=access_token, ) - self.render_on_worker(sync_hs, request) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertEqual( channel.json_body["chunk"][0]["event_id"], first_event_in_room2 @@ -322,7 +316,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ), access_token=access_token, ) - self.render_on_worker(sync_hs, request) self.assertListEqual([], channel.json_body["chunk"]) request, channel = make_request( @@ -334,7 +327,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): ), access_token=access_token, ) - self.render_on_worker(sync_hs, request) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertEqual( channel.json_body["chunk"][0]["event_id"], first_event_in_room2 diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 2931859f25..e2e6a5e16d 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -21,7 +21,7 @@ from synapse.rest.client.v1 import login, room from synapse.rest.consent import consent_resource from tests import unittest -from tests.server import FakeSite, make_request, render +from tests.server import FakeSite, make_request class ConsentResourceTestCase(unittest.HomeserverTestCase): @@ -64,7 +64,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): request, channel = make_request( self.reactor, FakeSite(resource), "GET", "/consent?v=1", shorthand=False ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 200) def test_accept_consent(self): @@ -91,7 +90,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): access_token=access_token, shorthand=False, ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 200) # Get the version from the body, and whether we've consented @@ -107,7 +105,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): access_token=access_token, shorthand=False, ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 200) # Fetch the consent page, to get the consent version -- it should have @@ -120,7 +117,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): access_token=access_token, shorthand=False, ) - render(request, resource, self.reactor) self.assertEqual(channel.code, 200) # Get the version from the body, and check that it's the version we diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 040a92d6f0..b58768675b 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -27,7 +27,7 @@ from twisted.web.server import Site from synapse.api.constants import Membership -from tests.server import FakeSite, make_request, render +from tests.server import FakeSite, make_request @attr.s @@ -52,14 +52,13 @@ class RestHelper: if tok: path = path + "?access_token=%s" % tok - request, channel = make_request( + _, channel = make_request( self.hs.get_reactor(), self.site, "POST", path, json.dumps(content).encode("utf8"), ) - render(request, self.site.resource, self.hs.get_reactor()) assert channel.result["code"] == b"%d" % expect_code, channel.result self.auth_user_id = temp_id @@ -129,7 +128,7 @@ class RestHelper: data = {"membership": membership} data.update(extra_data) - request, channel = make_request( + _, channel = make_request( self.hs.get_reactor(), self.site, "PUT", @@ -137,8 +136,6 @@ class RestHelper: json.dumps(data).encode("utf8"), ) - render(request, self.site.resource, self.hs.get_reactor()) - assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" % (expect_code, int(channel.result["code"]), channel.result["body"]) @@ -166,14 +163,13 @@ class RestHelper: if tok: path = path + "?access_token=%s" % tok - request, channel = make_request( + _, channel = make_request( self.hs.get_reactor(), self.site, "PUT", path, json.dumps(content).encode("utf8"), ) - render(request, self.site.resource, self.hs.get_reactor()) assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" @@ -223,12 +219,10 @@ class RestHelper: if body is not None: content = json.dumps(body).encode("utf8") - request, channel = make_request( + _, channel = make_request( self.hs.get_reactor(), self.site, method, path, content ) - render(request, self.site.resource, self.hs.get_reactor()) - assert int(channel.result["code"]) == expect_code, ( "Expected: %d, got: %d, resp: %r" % (expect_code, int(channel.result["code"]), channel.result["body"]) diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index a31e44c97e..f74d611943 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -320,10 +320,8 @@ class SyncTypingTests(unittest.HomeserverTestCase): typing._reset() # Now it SHOULD fail as it never completes! - request, channel = self.make_request( - "GET", sync_url % (access_token, next_batch) - ) - self.assertRaises(TimedOutException, self.render, request) + with self.assertRaises(TimedOutException): + self.make_request("GET", sync_url % (access_token, next_batch)) class UnreadMessagesTestCase(unittest.HomeserverTestCase): diff --git a/tests/server.py b/tests/server.py index de7cb1d8b3..a51ad0c14e 100644 --- a/tests/server.py +++ b/tests/server.py @@ -19,7 +19,6 @@ from twisted.internet.interfaces import ( ) from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock -from twisted.web.http import unquote from twisted.web.http_headers import Headers from twisted.web.resource import IResource from twisted.web.server import Site @@ -267,10 +266,6 @@ def make_request( return req, channel -def render(request, resource, clock): - pass - - @implementer(IReactorPluggableNameResolver) class ThreadedMemoryReactorClock(MemoryReactorClock): """ diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 583addb5b5..6bdde1a2ba 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -412,7 +412,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): headers1 = {b"User-Agent": b"Mozzila pizza"} headers1.update(headers) - request, channel = make_request( + make_request( self.reactor, self.site, "GET", @@ -421,7 +421,6 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): custom_headers=headers1.items(), **make_request_args, ) - self.render(request) # Advance so the save loop occurs self.reactor.advance(100) diff --git a/tests/test_server.py b/tests/test_server.py index 300d13ac95..c387a85f2e 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -29,7 +29,6 @@ from tests.server import ( FakeSite, ThreadedMemoryReactorClock, make_request, - render, setup_test_homeserver, ) @@ -65,7 +64,6 @@ class JsonResourceTests(unittest.TestCase): request, channel = make_request( self.reactor, FakeSite(res), b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83" ) - render(request, res, self.reactor) self.assertEqual(request.args, {b"a": ["\N{SNOWMAN}".encode("utf8")]}) self.assertEqual(got_kwargs, {"room_id": "\N{SNOWMAN}"}) @@ -84,10 +82,7 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request( - self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" - ) - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo") self.assertEqual(channel.result["code"], b"500") @@ -111,10 +106,7 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request( - self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" - ) - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo") self.assertEqual(channel.result["code"], b"500") @@ -132,10 +124,7 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request( - self.reactor, FakeSite(res), b"GET", b"/_matrix/foo" - ) - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/_matrix/foo") self.assertEqual(channel.result["code"], b"403") self.assertEqual(channel.json_body["error"], "Forbidden!!one!") @@ -157,10 +146,9 @@ class JsonResourceTests(unittest.TestCase): "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" ) - request, channel = make_request( + _, channel = make_request( self.reactor, FakeSite(res), b"GET", b"/_matrix/foobar" ) - render(request, res, self.reactor) self.assertEqual(channel.result["code"], b"400") self.assertEqual(channel.json_body["error"], "Unrecognized request") @@ -182,10 +170,7 @@ class JsonResourceTests(unittest.TestCase): ) # The path was registered as GET, but this is a HEAD request. - request, channel = make_request( - self.reactor, FakeSite(res), b"HEAD", b"/_matrix/foo" - ) - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/_matrix/foo") self.assertEqual(channel.result["code"], b"200") self.assertNotIn("body", channel.result) @@ -216,16 +201,8 @@ class OptionsResourceTests(unittest.TestCase): "1.0", ) - request, channel = make_request( - self.reactor, site, method, path, shorthand=False - ) - request.prepath = [] # This doesn't get set properly by make_request. - - request.site = site - resource = site.getResourceFor(request) - - # Finally, render the resource and return the channel. - render(request, resource, self.reactor) + # render the request and return the channel + _, channel = make_request(self.reactor, site, method, path, shorthand=False) return channel def test_unknown_options_request(self): @@ -298,8 +275,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") self.assertEqual(channel.result["code"], b"200") body = channel.result["body"] @@ -317,8 +293,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") self.assertEqual(channel.result["code"], b"301") headers = channel.result["headers"] @@ -339,8 +314,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"GET", b"/path") self.assertEqual(channel.result["code"], b"304") headers = channel.result["headers"] @@ -359,8 +333,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): res = WrapHtmlRequestHandlerTests.TestResource() res.callback = callback - request, channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/path") - render(request, res, self.reactor) + _, channel = make_request(self.reactor, FakeSite(res), b"HEAD", b"/path") self.assertEqual(channel.result["code"], b"200") self.assertNotIn("body", channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index 9c7eca3b6e..8a49bb5262 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -48,13 +48,7 @@ from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util.ratelimitutils import FederationRateLimiter -from tests.server import ( - FakeChannel, - get_clock, - make_request, - render, - setup_test_homeserver, -) +from tests.server import FakeChannel, get_clock, make_request, setup_test_homeserver from tests.test_utils import event_injection, setup_awaitable_errors from tests.test_utils.logging_setup import setup_logging from tests.utils import default_config, setupdb @@ -454,7 +448,7 @@ class HomeserverTestCase(TestCase): Args: request (synapse.http.site.SynapseRequest): The request to render. """ - render(request, self.resource, self.reactor) + pass def setup_test_homeserver(self, *args, **kwargs): """ -- cgit 1.5.1 From acfe3b3065bf134e01439e24753bb8bb2ad9e1c2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 15 Nov 2020 22:58:47 +0000 Subject: Remove redundant `HomeserverTestCase.render` --- tests/federation/test_complexity.py | 2 - tests/federation/test_federation_server.py | 3 - tests/federation/transport/test_server.py | 2 - tests/handlers/test_directory.py | 6 -- tests/handlers/test_message.py | 1 - tests/handlers/test_typing.py | 1 - tests/handlers/test_user_directory.py | 2 - tests/rest/admin/test_admin.py | 13 --- tests/rest/admin/test_device.py | 35 ------- tests/rest/admin/test_event_reports.py | 27 ------ tests/rest/admin/test_media.py | 23 ----- tests/rest/admin/test_room.py | 46 --------- tests/rest/admin/test_statistics.py | 27 ------ tests/rest/admin/test_user.py | 95 ------------------ tests/rest/client/test_ephemeral_message.py | 1 - tests/rest/client/test_identity.py | 2 - tests/rest/client/test_redactions.py | 2 - tests/rest/client/test_retention.py | 1 - tests/rest/client/test_shadow_banned.py | 8 -- tests/rest/client/test_third_party_rules.py | 5 - tests/rest/client/v1/test_directory.py | 4 - tests/rest/client/v1/test_events.py | 4 - tests/rest/client/v1/test_login.py | 32 ------ tests/rest/client/v1/test_presence.py | 2 - tests/rest/client/v1/test_profile.py | 7 -- tests/rest/client/v1/test_push_rule_attrs.py | 33 ------- tests/rest/client/v1/test_rooms.py | 107 --------------------- tests/rest/client/v1/test_typing.py | 4 - tests/rest/client/v2_alpha/test_account.py | 19 ---- tests/rest/client/v2_alpha/test_auth.py | 6 -- tests/rest/client/v2_alpha/test_capabilities.py | 4 - tests/rest/client/v2_alpha/test_filter.py | 7 -- tests/rest/client/v2_alpha/test_password_policy.py | 8 -- tests/rest/client/v2_alpha/test_register.py | 31 ------ tests/rest/client/v2_alpha/test_relations.py | 18 ---- tests/rest/client/v2_alpha/test_shared_rooms.py | 1 - tests/rest/client/v2_alpha/test_sync.py | 12 --- tests/rest/test_health.py | 1 - tests/rest/test_well_known.py | 2 - tests/server_notices/test_consent.py | 3 - .../test_resource_limits_server_notices.py | 3 - tests/test_mau.py | 2 - tests/test_terms_auth.py | 3 - tests/unittest.py | 14 --- 44 files changed, 629 deletions(-) (limited to 'tests') diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 1471cc1a28..0187f56e21 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -51,7 +51,6 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.render(request) self.assertEquals(200, channel.code) complexity = channel.json_body["v1"] self.assertTrue(complexity > 0, complexity) @@ -64,7 +63,6 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.render(request) self.assertEquals(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index da933ecd75..3009fbb6c4 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -51,7 +51,6 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): "/_matrix/federation/v1/get_missing_events/%s" % (room_1,), query_content, ) - self.render(request) self.assertEquals(400, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON") @@ -99,7 +98,6 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertEqual( @@ -132,7 +130,6 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) - self.render(request) self.assertEquals(403, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index 72e22d655f..f9e3c7a51f 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -40,7 +40,6 @@ class RoomDirectoryFederationTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/v1/publicRooms" ) - self.render(request) self.assertEquals(403, channel.code) @override_config({"allow_public_rooms_over_federation": True}) @@ -48,5 +47,4 @@ class RoomDirectoryFederationTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/federation/v1/publicRooms" ) - self.render(request) self.assertEquals(200, channel.code) diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 2ce6dc9528..ee6ef5e6fa 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -412,7 +412,6 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): b"directory/room/%23test%3Atest", ('{"room_id":"%s"}' % (room_id,)).encode("ascii"), ) - self.render(request) self.assertEquals(403, channel.code, channel.result) def test_allowed(self): @@ -423,7 +422,6 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): b"directory/room/%23unofficial_test%3Atest", ('{"room_id":"%s"}' % (room_id,)).encode("ascii"), ) - self.render(request) self.assertEquals(200, channel.code, channel.result) @@ -438,7 +436,6 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", b"directory/list/room/%s" % (room_id.encode("ascii"),), b"{}" ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.room_list_handler = hs.get_room_list_handler() @@ -452,7 +449,6 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): # Room list is enabled so we should get some results request, channel = self.make_request("GET", b"publicRooms") - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) > 0) @@ -461,7 +457,6 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): # Room list disabled so we should get no results request, channel = self.make_request("GET", b"publicRooms") - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) == 0) @@ -470,5 +465,4 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", b"directory/list/room/%s" % (room_id.encode("ascii"),), b"{}" ) - self.render(request) self.assertEquals(403, channel.code, channel.result) diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 8b57081cbe..af42775815 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -209,5 +209,4 @@ class ServerAclValidationTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", path, content={}, access_token=self.access_token ) - self.render(request) self.assertEqual(int(channel.result["code"]), 403) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 16ff2e22d2..abbdf2d524 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -228,7 +228,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ), federation_auth_origin=b"farm", ) - self.render(request) self.assertEqual(channel.code, 200) self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 87be94111f..98e5af2072 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -537,7 +537,6 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", b"user_directory/search", b'{"search_term":"user2"}' ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) > 0) @@ -546,6 +545,5 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", b"user_directory/search", b'{"search_term":"user2"}' ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 961a5732b3..898e43411e 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -43,7 +43,6 @@ class VersionTestCase(unittest.HomeserverTestCase): def test_version_string(self): request, channel = self.make_request("GET", self.url, shorthand=False) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -76,7 +75,6 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): content={"localpart": "test"}, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) group_id = channel.json_body["group_id"] @@ -89,14 +87,12 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url.encode("ascii"), access_token=self.admin_user_tok, content={} ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) url = "/groups/%s/self/accept_invite" % (group_id,) request, channel = self.make_request( "PUT", url.encode("ascii"), access_token=self.other_user_token, content={} ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Check other user knows they're in the group @@ -112,7 +108,6 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): content={"localpart": "test"}, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Check group returns 404 @@ -132,7 +127,6 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): "GET", url.encode("ascii"), access_token=self.admin_user_tok ) - self.render(request) self.assertEqual( expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -144,7 +138,6 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): "GET", "/joined_groups".encode("ascii"), access_token=access_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) return channel.json_body["groups"] @@ -251,7 +244,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url.encode("ascii"), access_token=non_admin_user_tok, ) - self.render(request) # Expect a forbidden error self.assertEqual( @@ -265,7 +257,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url.encode("ascii"), access_token=non_admin_user_tok, ) - self.render(request) # Expect a forbidden error self.assertEqual( @@ -309,7 +300,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): urllib.parse.quote(media_id), ) request, channel = self.make_request("POST", url, access_token=admin_user_tok,) - self.render(request) self.pump(1.0) self.assertEqual(200, int(channel.code), msg=channel.result["body"]) @@ -362,7 +352,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room_id ) request, channel = self.make_request("POST", url, access_token=admin_user_tok,) - self.render(request) self.pump(1.0) self.assertEqual(200, int(channel.code), msg=channel.result["body"]) self.assertEqual( @@ -409,7 +398,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url.encode("ascii"), access_token=admin_user_tok, ) - self.render(request) self.pump(1.0) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -452,7 +440,6 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url.encode("ascii"), access_token=admin_user_tok, ) - self.render(request) self.pump(1.0) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index d89eb90cfe..cf3a007598 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -51,19 +51,16 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): Try to get a device of an user without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) request, channel = self.make_request("PUT", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) request, channel = self.make_request("DELETE", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -75,7 +72,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -83,7 +79,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", self.url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -91,7 +86,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", self.url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -108,7 +102,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -116,7 +109,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -124,7 +116,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -141,7 +132,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -149,7 +139,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -157,7 +146,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -173,7 +161,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -181,14 +168,12 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body) @@ -218,7 +203,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) @@ -227,7 +211,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"]) @@ -247,7 +230,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -255,7 +237,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"]) @@ -272,7 +253,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -280,7 +260,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new displayname", channel.json_body["display_name"]) @@ -292,7 +271,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["user_id"]) @@ -316,7 +294,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -347,7 +324,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): Try to list devices of an user without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -361,7 +337,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -374,7 +349,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -388,7 +362,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -403,7 +376,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -422,7 +394,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_devices, channel.json_body["total"]) @@ -461,7 +432,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): Try to delete devices of an user without authentication. """ request, channel = self.make_request("POST", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -475,7 +445,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, access_token=other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -488,7 +457,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -502,7 +470,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -518,7 +485,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) # Delete unknown devices returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body) @@ -550,7 +516,6 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 303622217f..11b72c10f7 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -75,7 +75,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): Try to get an event report without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -88,7 +87,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.other_user_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -101,7 +99,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -117,7 +114,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -133,7 +129,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -149,7 +144,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -167,7 +161,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.url + "?room_id=%s" % self.room_id1, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 10) @@ -188,7 +181,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.url + "?user_id=%s" % self.other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 10) @@ -209,7 +201,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.url + "?user_id=%s&room_id=%s" % (self.other_user, self.room_id1), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 5) @@ -230,7 +221,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?dir=b", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -247,7 +237,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?dir=f", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -268,7 +257,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -282,7 +270,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -295,7 +282,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -310,7 +296,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=20", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -322,7 +307,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=21", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -334,7 +318,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -347,7 +330,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -366,7 +348,6 @@ class EventReportsTestCase(unittest.HomeserverTestCase): json.dumps({"score": -100, "reason": "this makes me sad"}), access_token=user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) def _check_fields(self, content): @@ -419,7 +400,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): Try to get event report without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -432,7 +412,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.other_user_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -445,7 +424,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self._check_fields(channel.json_body) @@ -461,7 +439,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/event_reports/-123", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -476,7 +453,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/event_reports/abcdef", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -491,7 +467,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/event_reports/", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -510,7 +485,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/event_reports/123", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -528,7 +502,6 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): json.dumps({"score": -100, "reason": "this makes me sad"}), access_token=user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) def _check_fields(self, content): diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 64b7aa53ee..2a65ab33bd 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -51,7 +51,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345") request, channel = self.make_request("DELETE", url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -68,7 +67,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -82,7 +80,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -96,7 +93,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) @@ -153,7 +149,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) @@ -211,7 +206,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): """ request, channel = self.make_request("POST", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -226,7 +220,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -240,7 +233,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url + "?before_ts=1234", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) @@ -252,7 +244,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) @@ -267,7 +258,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url + "?before_ts=-1234", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -281,7 +271,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=1234&size_gt=-1234", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -295,7 +284,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=1234&keep_profiles=not_bool", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) @@ -326,7 +314,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( @@ -351,7 +338,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -364,7 +350,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( @@ -388,7 +373,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&size_gt=67", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -400,7 +384,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&size_gt=66", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( @@ -425,7 +408,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): content=json.dumps({"avatar_url": "mxc://%s" % (server_and_media_id,)}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) now_ms = self.clock.time_msec() @@ -434,7 +416,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -446,7 +427,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( @@ -472,7 +452,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): content=json.dumps({"url": "mxc://%s" % (server_and_media_id,)}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) now_ms = self.clock.time_msec() @@ -481,7 +460,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -493,7 +471,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 535d68f284..54824a5410 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -85,7 +85,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) @@ -110,7 +109,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): json.dumps({"history_visibility": "world_readable"}), access_token=self.other_user_token, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Test that the admin can still send shutdown @@ -121,7 +119,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) @@ -136,7 +133,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok ) - self.render(request) self.assertEqual( expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -145,7 +141,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok ) - self.render(request) self.assertEqual( expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -192,7 +187,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, json.dumps({}), access_token=self.other_user_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -206,7 +200,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, json.dumps({}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -220,7 +213,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, json.dumps({}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -239,7 +231,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertIn("new_room_id", channel.json_body) @@ -259,7 +250,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -278,7 +268,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) @@ -295,7 +284,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) @@ -322,7 +310,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(None, channel.json_body["new_room_id"]) @@ -356,7 +343,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(None, channel.json_body["new_room_id"]) @@ -391,7 +377,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(None, channel.json_body["new_room_id"]) @@ -439,7 +424,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) @@ -470,7 +454,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): json.dumps({"history_visibility": "world_readable"}), access_token=self.other_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Test that room is not purged @@ -488,7 +471,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) @@ -551,7 +533,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok ) - self.render(request) self.assertEqual( expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -560,7 +541,6 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok ) - self.render(request) self.assertEqual( expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -595,7 +575,6 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase): {"room_id": room_id}, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) @@ -647,7 +626,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) # Check request completed successfully self.assertEqual(200, int(channel.code), msg=channel.json_body) @@ -729,7 +707,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual( 200, int(channel.result["code"]), msg=channel.result["body"] ) @@ -770,7 +747,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) def test_correct_room_attributes(self): @@ -794,7 +770,6 @@ class RoomTestCase(unittest.HomeserverTestCase): {"room_id": room_id}, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Set this new alias as the canonical alias for this room @@ -822,7 +797,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Check that rooms were returned @@ -867,7 +841,6 @@ class RoomTestCase(unittest.HomeserverTestCase): {"room_id": room_id}, access_token=admin_user_tok, ) - self.render(request) self.assertEqual( 200, int(channel.result["code"]), msg=channel.result["body"] ) @@ -905,7 +878,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) # Check that rooms were returned @@ -1042,7 +1014,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) if expected_http_code != 200: @@ -1104,7 +1075,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("room_id", channel.json_body) @@ -1152,7 +1122,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertCountEqual( @@ -1164,7 +1133,6 @@ class RoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertCountEqual( @@ -1208,7 +1176,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.second_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1225,7 +1192,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) @@ -1242,7 +1208,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -1259,7 +1224,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -1280,7 +1244,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("No known servers", channel.json_body["error"]) @@ -1298,7 +1261,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -1318,7 +1280,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(self.public_room_id, channel.json_body["room_id"]) @@ -1328,7 +1289,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.render(request) self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0]) @@ -1349,7 +1309,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1377,7 +1336,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok, ) - self.render(request) self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) @@ -1392,7 +1350,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["room_id"]) @@ -1401,7 +1358,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.render(request) self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) @@ -1422,7 +1378,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["room_id"]) @@ -1432,7 +1387,6 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.render(request) self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 816683a612..907b49f889 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -47,7 +47,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): Try to list users without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -59,7 +58,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, json.dumps({}), access_token=self.other_user_tok, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -72,7 +70,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?order_by=bar", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -81,7 +78,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -90,7 +86,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -99,7 +94,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from_ts=-1234", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -108,7 +102,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?until_ts=-1234", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -119,7 +112,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?from_ts=10&until_ts=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -128,7 +120,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?search_term=", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -137,7 +128,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -151,7 +141,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 10) @@ -168,7 +157,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -185,7 +173,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -206,7 +193,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=20", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_users) @@ -218,7 +204,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=21", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_users) @@ -230,7 +215,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_users) @@ -242,7 +226,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_users) @@ -258,7 +241,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -337,7 +319,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["users"][0]["media_count"], 3) @@ -346,7 +327,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from_ts=%s" % (ts1,), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 0) @@ -362,7 +342,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?from_ts=%s&until_ts=%s" % (ts1, ts2), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["users"][0]["media_count"], 3) @@ -370,7 +349,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?until_ts=%s" % (ts2,), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["users"][0]["media_count"], 6) @@ -381,7 +359,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 20) @@ -391,7 +368,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?search_term=foo_user_1", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 11) @@ -401,7 +377,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?search_term=bar_user_10", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["users"][0]["displayname"], "bar_user_10") self.assertEqual(channel.json_body["total"], 1) @@ -410,7 +385,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?search_term=foobar", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], 0) @@ -476,7 +450,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], len(expected_user_list)) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index d74efede06..5699c41eb4 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -71,7 +71,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.hs.config.registration_shared_secret = None request, channel = self.make_request("POST", self.url, b"{}") - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( @@ -89,7 +88,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.hs.get_secrets = Mock(return_value=secrets) request, channel = self.make_request("GET", self.url) - self.render(request) self.assertEqual(channel.json_body, {"nonce": "abcd"}) @@ -99,7 +97,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): only last for SALT_TIMEOUT (60s). """ request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] # 59 seconds @@ -107,7 +104,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): body = json.dumps({"nonce": nonce}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("username must be specified", channel.json_body["error"]) @@ -116,7 +112,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.reactor.advance(2) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("unrecognised nonce", channel.json_body["error"]) @@ -126,7 +121,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): Only the provided nonce can be used, as it's checked in the MAC. """ request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -143,7 +137,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("HMAC incorrect", channel.json_body["error"]) @@ -154,7 +147,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): user is registered. """ request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -174,7 +166,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["user_id"]) @@ -184,7 +175,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): A valid unrecognised nonce. """ request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -201,14 +191,12 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["user_id"]) # Now, try and reuse it request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("unrecognised nonce", channel.json_body["error"]) @@ -222,7 +210,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): def nonce(): request, channel = self.make_request("GET", self.url) - self.render(request) return channel.json_body["nonce"] # @@ -232,7 +219,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be present body = json.dumps({}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("nonce must be specified", channel.json_body["error"]) @@ -244,7 +230,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be present body = json.dumps({"nonce": nonce()}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("username must be specified", channel.json_body["error"]) @@ -252,7 +237,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be a string body = json.dumps({"nonce": nonce(), "username": 1234}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid username", channel.json_body["error"]) @@ -260,7 +244,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must not have null bytes body = json.dumps({"nonce": nonce(), "username": "abcd\u0000"}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid username", channel.json_body["error"]) @@ -268,7 +251,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must not have null bytes body = json.dumps({"nonce": nonce(), "username": "a" * 1000}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid username", channel.json_body["error"]) @@ -280,7 +262,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be present body = json.dumps({"nonce": nonce(), "username": "a"}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("password must be specified", channel.json_body["error"]) @@ -288,7 +269,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be a string body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid password", channel.json_body["error"]) @@ -296,7 +276,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must not have null bytes body = json.dumps({"nonce": nonce(), "username": "a", "password": "abcd\u0000"}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid password", channel.json_body["error"]) @@ -304,7 +283,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Super long body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000}) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid password", channel.json_body["error"]) @@ -323,7 +301,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Invalid user type", channel.json_body["error"]) @@ -335,7 +312,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # set no displayname request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -346,19 +322,16 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): {"nonce": nonce, "username": "bob1", "password": "abc123", "mac": want_mac} ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob1:test", channel.json_body["user_id"]) request, channel = self.make_request("GET", "/profile/@bob1:test/displayname") - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("bob1", channel.json_body["displayname"]) # displayname is None request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -375,19 +348,16 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob2:test", channel.json_body["user_id"]) request, channel = self.make_request("GET", "/profile/@bob2:test/displayname") - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("bob2", channel.json_body["displayname"]) # displayname is empty request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -404,18 +374,15 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob3:test", channel.json_body["user_id"]) request, channel = self.make_request("GET", "/profile/@bob3:test/displayname") - self.render(request) self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) # set displayname request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -432,13 +399,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob4:test", channel.json_body["user_id"]) request, channel = self.make_request("GET", "/profile/@bob4:test/displayname") - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("Bob's Name", channel.json_body["displayname"]) @@ -465,7 +430,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Register new user with admin API request, channel = self.make_request("GET", self.url) - self.render(request) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) @@ -485,7 +449,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request("POST", self.url, body.encode("utf8")) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["user_id"]) @@ -511,7 +474,6 @@ class UsersListTestCase(unittest.HomeserverTestCase): Try to list users without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"]) @@ -526,7 +488,6 @@ class UsersListTestCase(unittest.HomeserverTestCase): b"{}", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(3, len(channel.json_body["users"])) @@ -562,7 +523,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("You are not a server admin", channel.json_body["error"]) @@ -570,7 +530,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, access_token=self.other_user_token, content=b"{}", ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("You are not a server admin", channel.json_body["error"]) @@ -585,7 +544,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v2/users/@unknown_person:test", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) @@ -613,7 +571,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -626,7 +583,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -659,7 +615,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -672,7 +627,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -700,7 +654,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/sync", access_token=self.admin_user_tok ) - self.render(request) if channel.code != 200: raise HttpResponseException( @@ -729,7 +682,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -769,7 +721,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) # Admin user is not blocked by mau anymore self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) @@ -807,7 +758,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -852,7 +802,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -879,7 +828,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) @@ -897,7 +845,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -907,7 +854,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -929,7 +875,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -940,7 +885,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -961,7 +905,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -972,7 +915,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -990,7 +932,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=json.dumps({"deactivated": True}).encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self._is_erased("@user:test", False) d = self.store.mark_user_erased("@user:test") @@ -1004,7 +945,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=json.dumps({"deactivated": False}).encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) # Reactivate the user. @@ -1016,14 +956,12 @@ class UserRestTestCase(unittest.HomeserverTestCase): encoding="utf_8" ), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Get user request, channel = self.make_request( "GET", self.url_other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -1044,7 +982,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -1054,7 +991,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_other_user, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) @@ -1076,7 +1012,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -1086,7 +1021,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -1102,7 +1036,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) @@ -1110,7 +1043,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) @@ -1153,7 +1085,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): Try to list rooms of an user without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -1167,7 +1098,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1180,7 +1110,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -1194,7 +1123,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -1208,7 +1136,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -1228,7 +1155,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_rooms, channel.json_body["total"]) @@ -1258,7 +1184,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): Try to list pushers of an user without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -1272,7 +1197,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1285,7 +1209,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -1299,7 +1222,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -1313,7 +1235,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -1343,7 +1264,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) @@ -1383,7 +1303,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): Try to list media of an user without authentication. """ request, channel = self.make_request("GET", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -1397,7 +1316,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=other_user_token, ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1410,7 +1328,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @@ -1424,7 +1341,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) @@ -1441,7 +1357,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1461,7 +1376,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1481,7 +1395,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1497,7 +1410,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -1510,7 +1422,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) @@ -1529,7 +1440,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=20", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1541,7 +1451,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=21", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1553,7 +1462,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?limit=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1566,7 +1474,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url + "?from=19", access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(channel.json_body["total"], number_media) @@ -1582,7 +1489,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) @@ -1600,7 +1506,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) - self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_media, channel.json_body["total"]) diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 5e9c07ebf3..a1ccc4ee9a 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -94,7 +94,6 @@ class EphemeralMessageTestCase(unittest.HomeserverTestCase): url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) request, channel = self.make_request("GET", url) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index c973521907..259c6a1985 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -46,7 +46,6 @@ class IdentityTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", "/createRoom", b"{}", access_token=tok ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) room_id = channel.json_body["room_id"] @@ -60,5 +59,4 @@ class IdentityTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", request_url, request_data, access_token=tok ) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index d2bcf256fa..c1f516cc93 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -72,7 +72,6 @@ class RedactionsTestCase(HomeserverTestCase): request, channel = self.make_request( "POST", path, content={}, access_token=access_token ) - self.render(request) self.assertEqual(int(channel.result["code"]), expect_code) return channel.json_body @@ -80,7 +79,6 @@ class RedactionsTestCase(HomeserverTestCase): request, channel = self.make_request( "GET", "sync", access_token=self.mod_access_token ) - self.render(request) self.assertEqual(channel.result["code"], b"200") room_sync = channel.json_body["rooms"]["join"][room_id] return room_sync["timeline"]["events"] diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 7d3773ff78..f56b5d9231 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -326,7 +326,6 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) request, channel = self.make_request("GET", url, access_token=self.token) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index 6bb02b9630..94dcfb9f7c 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -95,7 +95,6 @@ class RoomTestCase(_ShadowBannedBase): {"id_server": "test", "medium": "email", "address": "test@test.test"}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) # This should have raised an error earlier, but double check this wasn't called. @@ -110,7 +109,6 @@ class RoomTestCase(_ShadowBannedBase): {"visibility": "public", "invite": [self.other_user_id]}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) room_id = channel.json_body["room_id"] @@ -166,7 +164,6 @@ class RoomTestCase(_ShadowBannedBase): {"new_version": "6"}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) # A new room_id should be returned. self.assertIn("replacement_room", channel.json_body) @@ -192,7 +189,6 @@ class RoomTestCase(_ShadowBannedBase): {"typing": True, "timeout": 30000}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code) # There should be no typing events. @@ -208,7 +204,6 @@ class RoomTestCase(_ShadowBannedBase): {"typing": True, "timeout": 30000}, access_token=self.other_access_token, ) - self.render(request) self.assertEquals(200, channel.code) # These appear in the room. @@ -255,7 +250,6 @@ class ProfileTestCase(_ShadowBannedBase): {"displayname": new_display_name}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertEqual(channel.json_body, {}) @@ -263,7 +257,6 @@ class ProfileTestCase(_ShadowBannedBase): request, channel = self.make_request( "GET", "/profile/%s/displayname" % (self.banned_user_id,) ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self.assertEqual(channel.json_body["displayname"], new_display_name) @@ -296,7 +289,6 @@ class ProfileTestCase(_ShadowBannedBase): {"membership": "join", "displayname": new_display_name}, access_token=self.banned_access_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertIn("event_id", channel.json_body) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 0048bea54a..0e96697f9b 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -92,7 +92,6 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): {}, access_token=self.tok, ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) callback.assert_called_once() @@ -111,7 +110,6 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): {}, access_token=self.tok, ) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) def test_cannot_modify_event(self): @@ -131,7 +129,6 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): {"x": "x"}, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.result["code"], b"500", channel.result) def test_modify_event(self): @@ -151,7 +148,6 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): {"x": "x"}, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.result["code"], b"200", channel.result) event_id = channel.json_body["event_id"] @@ -161,7 +157,6 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.result["code"], b"200", channel.result) ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index ea5a7f3739..7a2c653df8 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -94,7 +94,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) - self.render(request) self.assertEqual(channel.code, 400, channel.result) def test_room_creation(self): @@ -108,7 +107,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) def set_alias_via_state_event(self, expected_code, alias_length=5): @@ -123,7 +121,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def set_alias_via_directory(self, expected_code, alias_length=5): @@ -134,7 +131,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def random_alias(self, length): diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 3397ba5579..12a93f5687 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -66,14 +66,12 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/events?access_token=%s" % ("invalid" + self.token,) ) - self.render(request) self.assertEquals(channel.code, 401, msg=channel.result) # valid token, expect content request, channel = self.make_request( "GET", "/events?access_token=%s&timeout=0" % (self.token,) ) - self.render(request) self.assertEquals(channel.code, 200, msg=channel.result) self.assertTrue("chunk" in channel.json_body) self.assertTrue("start" in channel.json_body) @@ -92,7 +90,6 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/events?access_token=%s&timeout=0" % (self.token,) ) - self.render(request) self.assertEquals(channel.code, 200, msg=channel.result) # We may get a presence event for ourselves down @@ -155,5 +152,4 @@ class GetEventsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/events/" + event_id, access_token=self.token, ) - self.render(request) self.assertEquals(channel.code, 200, msg=channel.result) diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 5d987a30c7..176ddf7ec9 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -64,7 +64,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) if i == 5: self.assertEquals(channel.result["code"], b"429", channel.result) @@ -84,7 +83,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -111,7 +109,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) if i == 5: self.assertEquals(channel.result["code"], b"429", channel.result) @@ -131,7 +128,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -158,7 +154,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "notamonkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) if i == 5: self.assertEquals(channel.result["code"], b"429", channel.result) @@ -178,7 +173,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "notamonkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) @@ -188,7 +182,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we shouldn't be able to make requests without an access token request, channel = self.make_request(b"GET", TEST_URL) - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) self.assertEquals(channel.json_body["errcode"], "M_MISSING_TOKEN") @@ -199,7 +192,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEquals(channel.code, 200, channel.result) access_token = channel.json_body["access_token"] @@ -209,7 +201,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 200, channel.result) # time passes @@ -219,7 +210,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], True) @@ -236,7 +226,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], True) @@ -247,7 +236,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], False) @@ -257,7 +245,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) # check it's a UI-Auth fail self.assertEqual( @@ -281,7 +268,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): access_token=access_token, content={"auth": auth}, ) - self.render(request) self.assertEquals(channel.code, 200, channel.result) @override_config({"session_lifetime": "24h"}) @@ -295,7 +281,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 200, channel.result) # time passes @@ -305,7 +290,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], True) @@ -314,7 +298,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", "/logout", access_token=access_token ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @override_config({"session_lifetime": "24h"}) @@ -328,7 +311,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 200, channel.result) # time passes @@ -338,7 +320,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"GET", TEST_URL, access_token=access_token ) - self.render(request) self.assertEquals(channel.code, 401, channel.result) self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], True) @@ -347,7 +328,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", "/logout/all", access_token=access_token ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -423,7 +403,6 @@ class CASTestCase(unittest.HomeserverTestCase): # Get Synapse to call the fake CAS and serve the template. request, channel = self.make_request("GET", cas_ticket_url) - self.render(request) # Test that the response is HTML. self.assertEqual(channel.code, 200) @@ -468,7 +447,6 @@ class CASTestCase(unittest.HomeserverTestCase): # Get Synapse to call the fake CAS and serve the template. request, channel = self.make_request("GET", cas_ticket_url) - self.render(request) self.assertEqual(channel.code, 302) location_headers = channel.headers.getRawHeaders("Location") @@ -495,7 +473,6 @@ class CASTestCase(unittest.HomeserverTestCase): # Get Synapse to call the fake CAS and serve the template. request, channel = self.make_request("GET", cas_ticket_url) - self.render(request) # Because the user is deactivated they are served an error template. self.assertEqual(channel.code, 403) @@ -526,7 +503,6 @@ class JWTTestCase(unittest.HomeserverTestCase): {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} ) request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) return channel def test_login_jwt_valid_registered(self): @@ -659,7 +635,6 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_no_token(self): params = json.dumps({"type": "org.matrix.login.jwt"}) request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Token field for JWT is missing") @@ -733,7 +708,6 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} ) request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) return channel def test_login_jwt_valid(self): @@ -766,7 +740,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/register?access_token=%s" % (self.service.token,), {"username": username}, ) - self.render(request) def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver() @@ -815,7 +788,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) def test_login_appservice_user_bot(self): @@ -831,7 +803,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) def test_login_appservice_wrong_user(self): @@ -847,7 +818,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) def test_login_appservice_wrong_as(self): @@ -863,7 +833,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.another_service.token ) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) def test_login_appservice_no_token(self): @@ -878,5 +847,4 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): } request, channel = self.make_request(b"POST", LOGIN_URL, params) - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 3c66255dac..b84f86d28c 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -53,7 +53,6 @@ class PresenceTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", "/presence/%s/status" % (self.user_id,), body ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(self.hs.presence_handler.set_state.call_count, 1) @@ -69,7 +68,6 @@ class PresenceTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", "/presence/%s/status" % (self.user_id,), body ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(self.hs.presence_handler.set_state.call_count, 0) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index ace0a3c08d..383a9eafac 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -195,7 +195,6 @@ class ProfileTestCase(unittest.HomeserverTestCase): content=json.dumps({"displayname": "test"}), access_token=self.owner_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) res = self.get_displayname() @@ -209,7 +208,6 @@ class ProfileTestCase(unittest.HomeserverTestCase): content=json.dumps({"displayname": "test" * 100}), access_token=self.owner_tok, ) - self.render(request) self.assertEqual(channel.code, 400, channel.result) res = self.get_displayname() @@ -219,7 +217,6 @@ class ProfileTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/profile/%s/displayname" % (self.owner,) ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) return channel.json_body["displayname"] @@ -284,7 +281,6 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.profile_url + url_suffix, access_token=access_token ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def ensure_requester_left_room(self): @@ -327,7 +323,6 @@ class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/profile/" + self.requester, access_token=self.requester_tok ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) request, channel = self.make_request( @@ -335,7 +330,6 @@ class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): "/profile/" + self.requester + "/displayname", access_token=self.requester_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) request, channel = self.make_request( @@ -343,5 +337,4 @@ class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): "/profile/" + self.requester + "/avatar_url", access_token=self.requester_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) diff --git a/tests/rest/client/v1/test_push_rule_attrs.py b/tests/rest/client/v1/test_push_rule_attrs.py index 081052f6a6..7add5523c8 100644 --- a/tests/rest/client/v1/test_push_rule_attrs.py +++ b/tests/rest/client/v1/test_push_rule_attrs.py @@ -48,14 +48,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # GET enabled for that new rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) @@ -79,7 +77,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # disable the rule @@ -89,14 +86,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"enabled": False}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 200) # check rule disabled request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], False) @@ -104,21 +99,18 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "DELETE", "/pushrules/global/override/best.friend", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # PUT a new rule request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # GET enabled for that new rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) @@ -141,7 +133,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # disable the rule @@ -151,14 +142,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"enabled": False}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 200) # check rule disabled request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], False) @@ -169,14 +158,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"enabled": True}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 200) # check rule enabled request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) @@ -198,7 +185,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -206,28 +192,24 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # GET enabled for that new rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # DELETE the rule request, channel = self.make_request( "DELETE", "/pushrules/global/override/best.friend", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # check 404 for deleted rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -242,7 +224,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "GET", "/pushrules/global/override/.m.muahahaha/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -260,7 +241,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"enabled": True}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -278,7 +258,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"enabled": True}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -300,14 +279,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # GET actions for that new rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/actions", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual( channel.json_body["actions"], ["notify", {"set_tweak": "highlight"}] @@ -331,7 +308,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # change the rule actions @@ -341,14 +317,12 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"actions": ["dont_notify"]}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 200) # GET actions for that new rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/actions", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["actions"], ["dont_notify"]) @@ -370,7 +344,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -378,21 +351,18 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "PUT", "/pushrules/global/override/best.friend", body, access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # DELETE the rule request, channel = self.make_request( "DELETE", "/pushrules/global/override/best.friend", access_token=token ) - self.render(request) self.assertEqual(channel.code, 200) # check 404 for deleted rule request, channel = self.make_request( "GET", "/pushrules/global/override/best.friend/enabled", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -407,7 +377,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): request, channel = self.make_request( "GET", "/pushrules/global/override/.m.muahahaha/actions", access_token=token ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -425,7 +394,6 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"actions": ["dont_notify"]}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -443,6 +411,5 @@ class PushRuleAttributesTestCase(HomeserverTestCase): {"actions": ["dont_notify"]}, access_token=token, ) - self.render(request) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 9ba5f9d943..49f1073c88 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -86,7 +86,6 @@ class RoomPermissionsTestCase(RoomBase): request, channel = self.make_request( "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - self.render(request) self.assertEquals(200, channel.code, channel.result) # set topic for public room @@ -95,7 +94,6 @@ class RoomPermissionsTestCase(RoomBase): ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode("ascii"), b'{"topic":"Public Room Topic"}', ) - self.render(request) self.assertEquals(200, channel.code, channel.result) # auth as user_id now @@ -118,12 +116,10 @@ class RoomPermissionsTestCase(RoomBase): "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 request, channel = self.make_request("PUT", send_msg_path(), msg_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 @@ -131,19 +127,16 @@ class RoomPermissionsTestCase(RoomBase): room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) request, channel = self.make_request("PUT", send_msg_path(), msg_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) request, channel = self.make_request("PUT", send_msg_path(), msg_content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) request, channel = self.make_request("PUT", send_msg_path(), msg_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_topic_perms(self): @@ -154,20 +147,16 @@ class RoomPermissionsTestCase(RoomBase): request, channel = self.make_request( "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) request, channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 request, channel = self.make_request("PUT", topic_path, topic_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) request, channel = self.make_request("GET", topic_path) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 @@ -175,12 +164,10 @@ class RoomPermissionsTestCase(RoomBase): room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) request, channel = self.make_request("PUT", topic_path, topic_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 request, channel = self.make_request("GET", topic_path) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 @@ -189,29 +176,24 @@ class RoomPermissionsTestCase(RoomBase): # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id request, channel = self.make_request("PUT", topic_path, topic_content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id request, channel = self.make_request("GET", topic_path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(topic_content.decode("utf8")), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) request, channel = self.make_request("PUT", topic_path, topic_content) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) request, channel = self.make_request("GET", topic_path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 request, channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 @@ -220,14 +202,12 @@ class RoomPermissionsTestCase(RoomBase): "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) def _test_get_membership(self, room=None, members=[], expect_code=None): for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) request, channel = self.make_request("GET", path) - self.render(request) self.assertEquals(expect_code, channel.code) def test_membership_basic_room_perms(self): @@ -400,18 +380,15 @@ class RoomsMemberListTestCase(RoomBase): def test_get_member_list(self): room_id = self.helper.create_room_as(self.user_id) request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self): request, channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self): room_id = self.helper.create_room_as("@some_other_guy:red") request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self): @@ -421,19 +398,16 @@ class RoomsMemberListTestCase(RoomBase): self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. request, channel = self.make_request("GET", room_path) - self.render(request) self.assertEquals(403, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined request, channel = self.make_request("GET", room_path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left request, channel = self.make_request("GET", room_path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) @@ -446,7 +420,6 @@ class RoomsCreateTestCase(RoomBase): # POST with no config keys, expect new room id request, channel = self.make_request("POST", "/createRoom", "{}") - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) @@ -455,7 +428,6 @@ class RoomsCreateTestCase(RoomBase): request, channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private"}' ) - self.render(request) self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) @@ -464,7 +436,6 @@ class RoomsCreateTestCase(RoomBase): request, channel = self.make_request( "POST", "/createRoom", b'{"custom":"stuff"}' ) - self.render(request) self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) @@ -473,18 +444,15 @@ class RoomsCreateTestCase(RoomBase): request, channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - self.render(request) self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self): # POST with invalid content / paths, expect 400 request, channel = self.make_request("POST", "/createRoom", b'{"visibili') - self.render(request) self.assertEquals(400, channel.code) request, channel = self.make_request("POST", "/createRoom", b'["hello"]') - self.render(request) self.assertEquals(400, channel.code) def test_post_room_invitees_invalid_mxid(self): @@ -493,7 +461,6 @@ class RoomsCreateTestCase(RoomBase): request, channel = self.make_request( "POST", "/createRoom", b'{"invite":["@alice:example.com "]}' ) - self.render(request) self.assertEquals(400, channel.code) @@ -510,52 +477,42 @@ class RoomTopicTestCase(RoomBase): def test_invalid_puts(self): # missing keys or invalid json request, channel = self.make_request("PUT", self.path, "{}") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", self.path, '{"nao') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request( "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", self.path, "text only") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", self.path, "") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid key, wrong type content = '{"topic":["Topic name"]}' request, channel = self.make_request("PUT", self.path, content) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_topic(self): # nothing should be there request, channel = self.make_request("GET", self.path) - self.render(request) self.assertEquals(404, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' request, channel = self.make_request("PUT", self.path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get request, channel = self.make_request("GET", self.path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) @@ -563,12 +520,10 @@ class RoomTopicTestCase(RoomBase): # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' request, channel = self.make_request("PUT", self.path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get request, channel = self.make_request("GET", self.path) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) @@ -585,29 +540,23 @@ class RoomMemberStateTestCase(RoomBase): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json request, channel = self.make_request("PUT", path, "{}") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, '{"_name":"bo"}') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, '{"nao') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request( "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, "text only") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, "") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid keys, wrong types @@ -617,7 +566,6 @@ class RoomMemberStateTestCase(RoomBase): Membership.LEAVE, ) request, channel = self.make_request("PUT", path, content.encode("ascii")) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_members_self(self): @@ -629,11 +577,9 @@ class RoomMemberStateTestCase(RoomBase): # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN request, channel = self.make_request("PUT", path, content.encode("ascii")) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) request, channel = self.make_request("GET", path, None) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} @@ -649,11 +595,9 @@ class RoomMemberStateTestCase(RoomBase): # valid invite message content = '{"membership":"%s"}' % Membership.INVITE request, channel = self.make_request("PUT", path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) request, channel = self.make_request("GET", path, None) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) @@ -670,11 +614,9 @@ class RoomMemberStateTestCase(RoomBase): "Join us!", ) request, channel = self.make_request("PUT", path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) request, channel = self.make_request("GET", path, None) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) @@ -725,7 +667,6 @@ class RoomJoinRatelimitTestCase(RoomBase): # Update the display name for the user. path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id request, channel = self.make_request("PUT", path, {"displayname": "John Doe"}) - self.render(request) self.assertEquals(channel.code, 200, channel.json_body) # Check that all the rooms have been sent a profile update into. @@ -736,7 +677,6 @@ class RoomJoinRatelimitTestCase(RoomBase): ) request, channel = self.make_request("GET", path) - self.render(request) self.assertEquals(channel.code, 200) self.assertIn("displayname", channel.json_body) @@ -761,7 +701,6 @@ class RoomJoinRatelimitTestCase(RoomBase): # if all of these requests ended up joining the user to a room. for i in range(4): request, channel = self.make_request("POST", path % room_id, {}) - self.render(request) self.assertEquals(channel.code, 200) @@ -777,29 +716,23 @@ class RoomMessagesTestCase(RoomBase): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json request, channel = self.make_request("PUT", path, b"{}") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, b'{"_name":"bo"}') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, b'{"nao') - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request( "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, b"text only") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) request, channel = self.make_request("PUT", path, b"") - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_messages_sent(self): @@ -807,20 +740,17 @@ class RoomMessagesTestCase(RoomBase): content = b'{"body":"test","msgtype":{"type":"a"}}' request, channel = self.make_request("PUT", path, content) - self.render(request) self.assertEquals(400, channel.code, msg=channel.result["body"]) # custom message types content = b'{"body":"test","msgtype":"test.custom.text"}' request, channel = self.make_request("PUT", path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) content = b'{"body":"test2","msgtype":"m.text"}' request, channel = self.make_request("PUT", path, content) - self.render(request) self.assertEquals(200, channel.code, msg=channel.result["body"]) @@ -837,7 +767,6 @@ class RoomInitialSyncTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/initialSync" % self.room_id ) - self.render(request) self.assertEquals(200, channel.code) self.assertEquals(self.room_id, channel.json_body["room_id"]) @@ -881,7 +810,6 @@ class RoomMessageListTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.render(request) self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body["start"]) @@ -893,7 +821,6 @@ class RoomMessageListTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.render(request) self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body["start"]) @@ -933,7 +860,6 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) chunk = channel.json_body["chunk"] @@ -962,7 +888,6 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) chunk = channel.json_body["chunk"] @@ -980,7 +905,6 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) chunk = channel.json_body["chunk"] @@ -1040,7 +964,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): } }, ) - self.render(request) # Check we get the results we expect -- one search result, of the sent # messages @@ -1074,7 +997,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): } }, ) - self.render(request) # Check we get the results we expect -- one search result, of the sent # messages @@ -1111,7 +1033,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): def test_restricted_no_auth(self): request, channel = self.make_request("GET", self.url) - self.render(request) self.assertEqual(channel.code, 401, channel.result) def test_restricted_auth(self): @@ -1119,7 +1040,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): tok = self.login("user", "pass") request, channel = self.make_request("GET", self.url, access_token=tok) - self.render(request) self.assertEqual(channel.code, 200, channel.result) @@ -1153,7 +1073,6 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): request_data, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) @@ -1168,7 +1087,6 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): request_data, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) event_id = channel.json_body["event_id"] @@ -1177,7 +1095,6 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) res_displayname = channel.json_body["content"]["displayname"] @@ -1212,7 +1129,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1227,7 +1143,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1242,7 +1157,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.second_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1257,7 +1171,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1270,7 +1183,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1283,7 +1195,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1303,7 +1214,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self._check_for_reason(reason) @@ -1316,7 +1226,6 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): ), access_token=self.creator_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) event_content = channel.json_body @@ -1365,7 +1274,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_LABELS)), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] @@ -1396,7 +1304,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] @@ -1432,7 +1339,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_LABELS_NOT_LABELS)), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] @@ -1460,7 +1366,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" % (self.room_id, self.tok, token, json.dumps(self.FILTER_LABELS)), ) - self.render(request) events = channel.json_body["chunk"] @@ -1478,7 +1383,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" % (self.room_id, self.tok, token, json.dumps(self.FILTER_NOT_LABELS)), ) - self.render(request) events = channel.json_body["chunk"] @@ -1507,7 +1411,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): json.dumps(self.FILTER_LABELS_NOT_LABELS), ), ) - self.render(request) events = channel.json_body["chunk"] @@ -1532,7 +1435,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "/search?access_token=%s" % self.tok, request_data ) - self.render(request) results = channel.json_body["search_categories"]["room_events"]["results"] @@ -1568,7 +1470,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "/search?access_token=%s" % self.tok, request_data ) - self.render(request) results = channel.json_body["search_categories"]["room_events"]["results"] @@ -1616,7 +1517,6 @@ class LabelsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "/search?access_token=%s" % self.tok, request_data ) - self.render(request) results = channel.json_body["search_categories"]["room_events"]["results"] @@ -1741,7 +1641,6 @@ class ContextTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id), access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] @@ -1806,7 +1705,6 @@ class ContextTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id), access_token=invited_tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] @@ -1897,7 +1795,6 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): % (self.room_id,), access_token=access_token, ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) res = channel.json_body self.assertIsInstance(res, dict) @@ -1916,7 +1813,6 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) @@ -1947,7 +1843,6 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def _get_canonical_alias(self, expected_code: int = 200) -> JsonDict: @@ -1957,7 +1852,6 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): "rooms/%s/state/m.room.canonical_alias" % (self.room_id,), access_token=self.room_owner_tok, ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) res = channel.json_body self.assertIsInstance(res, dict) @@ -1971,7 +1865,6 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): json.dumps(content), access_token=self.room_owner_tok, ) - self.render(request) self.assertEqual(channel.code, expected_code, channel.result) res = channel.json_body self.assertIsInstance(res, dict) diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index cd58ee7792..bbd30f594b 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -99,7 +99,6 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.render(request) self.assertEquals(200, channel.code) self.assertEquals(self.event_source.get_current_key(), 1) @@ -123,7 +122,6 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": false}', ) - self.render(request) self.assertEquals(200, channel.code) def test_typing_timeout(self): @@ -132,7 +130,6 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.render(request) self.assertEquals(200, channel.code) self.assertEquals(self.event_source.get_current_key(), 1) @@ -146,7 +143,6 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.render(request) self.assertEquals(200, channel.code) self.assertEquals(self.event_source.get_current_key(), 3) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index b871200909..2ac1ecb7d3 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -246,7 +246,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): b"account/password/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) return channel.json_body["sid"] @@ -325,7 +324,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): }, }, ) - self.render(request) self.assertEquals(expected_code, channel.code, channel.result) @@ -355,7 +353,6 @@ class DeactivateTestCase(unittest.HomeserverTestCase): # Check that this access token has been invalidated. request, channel = self.make_request("GET", "account/whoami") - self.render(request) self.assertEqual(request.code, 401) def test_pending_invites(self): @@ -413,7 +410,6 @@ class DeactivateTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) - self.render(request) self.assertEqual(request.code, 200) @@ -548,7 +544,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -556,7 +551,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @@ -581,14 +575,12 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Get user request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @@ -615,7 +607,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -624,7 +615,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -653,7 +643,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) @@ -661,7 +650,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @@ -688,7 +676,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) @@ -696,7 +683,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @@ -801,7 +787,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", b"account/3pid/email/requestToken", body, ) - self.render(request) self.assertEquals(expect_code, channel.code, channel.result) return channel.json_body.get("sid") @@ -814,7 +799,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): b"account/3pid/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(expected_errcode, channel.json_body["errcode"]) self.assertEqual(expected_error, channel.json_body["error"]) @@ -824,7 +808,6 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): path = link.replace("https://example.com", "") request, channel = self.make_request("GET", path, shorthand=False) - self.render(request) self.assertEquals(200, channel.code, channel.result) def _get_link_from_email(self): @@ -873,14 +856,12 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Get user request, channel = self.make_request( "GET", self.url_3pid, access_token=self.user_id_tok, ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 86184f0d2e..f684c37db5 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -72,7 +72,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "register", body ) # type: SynapseRequest, FakeChannel - self.render(request) self.assertEqual(request.code, expected_response) return channel @@ -87,7 +86,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "auth/m.login.recaptcha/fallback/web?session=" + session ) # type: SynapseRequest, FakeChannel - self.render(request) self.assertEqual(request.code, 200) request, channel = self.make_request( @@ -96,7 +94,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): + post_session + "&g-recaptcha-response=a", ) - self.render(request) self.assertEqual(request.code, expected_post_response) # The recaptcha handler is called with the response given @@ -177,7 +174,6 @@ class UIAuthTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "devices", access_token=self.user_tok, ) # type: SynapseRequest, FakeChannel - self.render(request) # Get the ID of the device. self.assertEqual(request.code, 200) @@ -190,7 +186,6 @@ class UIAuthTests(unittest.HomeserverTestCase): request, channel = self.make_request( "DELETE", "devices/" + device, body, access_token=self.user_tok ) # type: SynapseRequest, FakeChannel - self.render(request) # Ensure the response is sane. self.assertEqual(request.code, expected_response) @@ -204,7 +199,6 @@ class UIAuthTests(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "delete_devices", body, access_token=self.user_tok, ) # type: SynapseRequest, FakeChannel - self.render(request) # Ensure the response is sane. self.assertEqual(request.code, expected_response) diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index b9e01c9418..767e126875 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -37,7 +37,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): def test_check_auth_required(self): request, channel = self.make_request("GET", self.url) - self.render(request) self.assertEqual(channel.code, 401) @@ -46,7 +45,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): access_token = self.login("user", "pass") request, channel = self.make_request("GET", self.url, access_token=access_token) - self.render(request) capabilities = channel.json_body["capabilities"] self.assertEqual(channel.code, 200) @@ -65,7 +63,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): access_token = self.login(user, password) request, channel = self.make_request("GET", self.url, access_token=access_token) - self.render(request) capabilities = channel.json_body["capabilities"] self.assertEqual(channel.code, 200) @@ -74,7 +71,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertTrue(capabilities["m.change_password"]["enabled"]) self.get_success(self.store.user_set_password_hash(user, None)) request, channel = self.make_request("GET", self.url, access_token=access_token) - self.render(request) capabilities = channel.json_body["capabilities"] self.assertEqual(channel.code, 200) diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index de00350580..231d5aefea 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -41,7 +41,6 @@ class FilterTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/user/%s/filter" % (self.user_id), self.EXAMPLE_FILTER_JSON, ) - self.render(request) self.assertEqual(channel.result["code"], b"200") self.assertEqual(channel.json_body, {"filter_id": "0"}) @@ -55,7 +54,6 @@ class FilterTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"), self.EXAMPLE_FILTER_JSON, ) - self.render(request) self.assertEqual(channel.result["code"], b"403") self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN) @@ -68,7 +66,6 @@ class FilterTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/user/%s/filter" % (self.user_id), self.EXAMPLE_FILTER_JSON, ) - self.render(request) self.hs.is_mine = _is_mine self.assertEqual(channel.result["code"], b"403") @@ -85,7 +82,6 @@ class FilterTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id) ) - self.render(request) self.assertEqual(channel.result["code"], b"200") self.assertEquals(channel.json_body, self.EXAMPLE_FILTER) @@ -94,7 +90,6 @@ class FilterTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id) ) - self.render(request) self.assertEqual(channel.result["code"], b"404") self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND) @@ -105,7 +100,6 @@ class FilterTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id) ) - self.render(request) self.assertEqual(channel.result["code"], b"400") @@ -114,6 +108,5 @@ class FilterTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id) ) - self.render(request) self.assertEqual(channel.result["code"], b"400") diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py index c57072f50c..ee86b94917 100644 --- a/tests/rest/client/v2_alpha/test_password_policy.py +++ b/tests/rest/client/v2_alpha/test_password_policy.py @@ -73,7 +73,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/password_policy" ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) self.assertEqual( @@ -91,7 +90,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_too_short(self): request_data = json.dumps({"username": "kermit", "password": "shorty"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -101,7 +99,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_no_digit(self): request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -111,7 +108,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_no_symbol(self): request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -121,7 +117,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_no_uppercase(self): request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -131,7 +126,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_no_lowercase(self): request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -141,7 +135,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): def test_password_compliant(self): request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) request, channel = self.make_request("POST", self.register_url, request_data) - self.render(request) # Getting a 401 here means the password has passed validation and the server has # responded with a list of registration flows. @@ -173,7 +166,6 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): request_data, access_token=tok, ) - self.render(request) self.assertEqual(channel.code, 400, channel.result) self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 98c3887bbf..88923fcea4 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -64,7 +64,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) det_data = {"user_id": user_id, "home_server": self.hs.hostname} @@ -76,14 +75,12 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) def test_POST_bad_password(self): request_data = json.dumps({"username": "kermit", "password": 666}) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) self.assertEquals(channel.result["code"], b"400", channel.result) self.assertEquals(channel.json_body["error"], "Invalid password") @@ -91,7 +88,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): def test_POST_bad_username(self): request_data = json.dumps({"username": 777, "password": "monkey"}) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) self.assertEquals(channel.result["code"], b"400", channel.result) self.assertEquals(channel.json_body["error"], "Invalid username") @@ -107,7 +103,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): } request_data = json.dumps(params) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) det_data = { "user_id": user_id, @@ -123,7 +118,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals(channel.json_body["error"], "Registration has been disabled") @@ -133,7 +127,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.hs.config.allow_guest_access = True request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.render(request) det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"} self.assertEquals(channel.result["code"], b"200", channel.result) @@ -143,7 +136,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.hs.config.allow_guest_access = False request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals(channel.json_body["error"], "Guest access is disabled") @@ -153,7 +145,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): for i in range(0, 6): url = self.url + b"?kind=guest" request, channel = self.make_request(b"POST", url, b"{}") - self.render(request) if i == 5: self.assertEquals(channel.result["code"], b"429", channel.result) @@ -164,7 +155,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.reactor.advance(retry_after_ms / 1000.0 + 1.0) request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -179,7 +169,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): } request_data = json.dumps(params) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) if i == 5: self.assertEquals(channel.result["code"], b"429", channel.result) @@ -190,13 +179,11 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.reactor.advance(retry_after_ms / 1000.0 + 1.0) request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) def test_advertised_flows(self): request, channel = self.make_request(b"POST", self.url, b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -220,7 +207,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) def test_advertised_flows_captcha_and_terms_and_3pids(self): request, channel = self.make_request(b"POST", self.url, b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -253,7 +239,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) def test_advertised_flows_no_msisdn_email_required(self): request, channel = self.make_request(b"POST", self.url, b"{}") - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -298,7 +283,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): b"register/email/requestToken", {"client_secret": "foobar", "email": email, "send_attempt": 1}, ) - self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertIsNotNone(channel.json_body.get("sid")) @@ -334,14 +318,12 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. request, channel = self.make_request(b"GET", "/sync", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) request, channel = self.make_request(b"GET", "/sync", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals( @@ -366,13 +348,11 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", url, request_data, access_token=admin_tok ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. request, channel = self.make_request(b"GET", "/sync", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) def test_manual_expire(self): @@ -392,13 +372,11 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", url, request_data, access_token=admin_tok ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. request, channel = self.make_request(b"GET", "/sync", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result @@ -421,12 +399,10 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( b"POST", url, request_data, access_token=admin_tok ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # Try to log the user out request, channel = self.make_request(b"POST", "/logout", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # Log the user in again (allowed for expired accounts) @@ -434,7 +410,6 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): # Try to log out all of the user's sessions request, channel = self.make_request(b"POST", "/logout/all", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -509,7 +484,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id)) url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token request, channel = self.make_request(b"GET", url) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # Check that we're getting HTML back. @@ -530,7 +504,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # succeed. self.reactor.advance(datetime.timedelta(days=3).total_seconds()) request, channel = self.make_request(b"GET", "/sync", access_token=tok) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) def test_renewal_invalid_token(self): @@ -538,7 +511,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # expected, i.e. that it responds with 404 Not Found and the correct HTML. url = "/_matrix/client/unstable/account_validity/renew?token=123" request, channel = self.make_request(b"GET", url) - self.render(request) self.assertEquals(channel.result["code"], b"404", channel.result) # Check that we're getting HTML back. @@ -564,7 +536,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) self.assertEqual(len(self.email_attempts), 1) @@ -587,7 +558,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) - self.render(request) self.assertEqual(request.code, 200) self.reactor.advance(datetime.timedelta(days=8).total_seconds()) @@ -641,7 +611,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) self.assertEqual(len(self.email_attempts), 1) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 99c9f4e928..6cd4eb6624 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -65,7 +65,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/rooms/%s/event/%s" % (self.room, event_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assert_dict( @@ -114,7 +113,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) # We expect to get back a single pagination result, which is the full @@ -160,7 +158,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id, from_token), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) @@ -219,7 +216,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id, from_token), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) @@ -296,7 +292,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): ), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) @@ -336,7 +331,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEquals( @@ -369,7 +363,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): access_token=self.user_token, content={}, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) request, channel = self.make_request( @@ -378,7 +371,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEquals( @@ -396,7 +388,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, self.parent_id, RelationTypes.REPLACE), access_token=self.user_token, ) - self.render(request) self.assertEquals(400, channel.code, channel.json_body) def test_aggregation_get_event(self): @@ -428,7 +419,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEquals( @@ -465,7 +455,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEquals(channel.json_body["content"], new_body) @@ -523,7 +512,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertEquals(channel.json_body["content"], new_body) @@ -567,7 +555,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, original_event_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertIn("chunk", channel.json_body) @@ -581,7 +568,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): access_token=self.user_token, content="{}", ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) # Try to check for remaining m.replace relations @@ -591,7 +577,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, original_event_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) # Check that no relations are returned @@ -624,7 +609,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): access_token=self.user_token, content="{}", ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) # Check that aggregations returns zero @@ -634,7 +618,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): % (self.room, original_event_id), access_token=self.user_token, ) - self.render(request) self.assertEquals(200, channel.code, channel.json_body) self.assertIn("chunk", channel.json_body) @@ -680,7 +663,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): json.dumps(content).encode("utf-8"), access_token=access_token, ) - self.render(request) return channel def _create_user(self, localpart): diff --git a/tests/rest/client/v2_alpha/test_shared_rooms.py b/tests/rest/client/v2_alpha/test_shared_rooms.py index 5ae72fd008..562a9c1ba4 100644 --- a/tests/rest/client/v2_alpha/test_shared_rooms.py +++ b/tests/rest/client/v2_alpha/test_shared_rooms.py @@ -47,7 +47,6 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase): % other_user, access_token=token, ) - self.render(request) return request, channel def test_shared_room_list_public(self): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index f74d611943..31ac0fccb8 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -36,7 +36,6 @@ class FilterTestCase(unittest.HomeserverTestCase): def test_sync_argless(self): request, channel = self.make_request("GET", "/sync") - self.render(request) self.assertEqual(channel.code, 200) self.assertTrue( @@ -57,7 +56,6 @@ class FilterTestCase(unittest.HomeserverTestCase): self.hs.config.use_presence = False request, channel = self.make_request("GET", "/sync") - self.render(request) self.assertEqual(channel.code, 200) self.assertTrue( @@ -199,7 +197,6 @@ class SyncFilterTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/sync?filter=%s" % sync_filter, access_token=tok ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) return channel.json_body["rooms"]["join"][room_id]["timeline"]["events"] @@ -253,13 +250,11 @@ class SyncTypingTests(unittest.HomeserverTestCase): typing_url % (room, other_user_id, other_access_token), b'{"typing": true, "timeout": 30000}', ) - self.render(request) self.assertEquals(200, channel.code) request, channel = self.make_request( "GET", "/sync?access_token=%s" % (access_token,) ) - self.render(request) self.assertEquals(200, channel.code) next_batch = channel.json_body["next_batch"] @@ -269,7 +264,6 @@ class SyncTypingTests(unittest.HomeserverTestCase): typing_url % (room, other_user_id, other_access_token), b'{"typing": false}', ) - self.render(request) self.assertEquals(200, channel.code) # Start typing. @@ -278,14 +272,12 @@ class SyncTypingTests(unittest.HomeserverTestCase): typing_url % (room, other_user_id, other_access_token), b'{"typing": true, "timeout": 30000}', ) - self.render(request) self.assertEquals(200, channel.code) # Should return immediately request, channel = self.make_request( "GET", sync_url % (access_token, next_batch) ) - self.render(request) self.assertEquals(200, channel.code) next_batch = channel.json_body["next_batch"] @@ -300,7 +292,6 @@ class SyncTypingTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", sync_url % (access_token, next_batch) ) - self.render(request) self.assertEquals(200, channel.code) next_batch = channel.json_body["next_batch"] @@ -311,7 +302,6 @@ class SyncTypingTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", sync_url % (access_token, next_batch) ) - self.render(request) self.assertEquals(200, channel.code) next_batch = channel.json_body["next_batch"] @@ -399,7 +389,6 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): body, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) # Check that the unread counter is back to 0. @@ -464,7 +453,6 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", self.url % self.next_batch, access_token=self.tok, ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index f4d06e2200..02a46e5fda 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -26,7 +26,6 @@ class HealthCheckTests(unittest.HomeserverTestCase): def test_health(self): request, channel = self.make_request("GET", "/health", shorthand=False) - self.render(request) self.assertEqual(request.code, 200) self.assertEqual(channel.result["body"], b"OK") diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index a3746e7130..6a930f4148 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -31,7 +31,6 @@ class WellKnownTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.render(request) self.assertEqual(request.code, 200) self.assertEqual( @@ -48,6 +47,5 @@ class WellKnownTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.render(request) self.assertEqual(request.code, 404) diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 872039c8f1..e0a9cd93ac 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -73,7 +73,6 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/_matrix/client/r0/sync", access_token=self.access_token ) - self.render(request) self.assertEqual(channel.code, 200) # Get the Room ID to join @@ -85,14 +84,12 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): "/_matrix/client/r0/rooms/" + room_id + "/join", access_token=self.access_token, ) - self.render(request) self.assertEqual(channel.code, 200) # Sync again, to get the message in the room request, channel = self.make_request( "GET", "/_matrix/client/r0/sync", access_token=self.access_token ) - self.render(request) self.assertEqual(channel.code, 200) # Get the message diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 6382b19dc3..9c8027a5b2 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -306,7 +306,6 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): tok = self.login("user", "password") request, channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - self.render(request) invites = channel.json_body["rooms"]["invite"] self.assertEqual(len(invites), 0, invites) @@ -320,7 +319,6 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): # Sync again to retrieve the events in the room, so we can check whether this # room has a notice in it. request, channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - self.render(request) # Scan the events in the room to search for a message from the server notices # user. @@ -358,7 +356,6 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "/sync?timeout=0", access_token=tok, ) - self.render(request) # Also retrieves the list of invites for this user. We don't care about that # one except if we're processing the last user, which should have received an diff --git a/tests/test_mau.py b/tests/test_mau.py index 654a6fa42d..c5ec6396a7 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -202,7 +202,6 @@ class TestMauLimit(unittest.HomeserverTestCase): ) request, channel = self.make_request("POST", "/register", request_data) - self.render(request) if channel.code != 200: raise HttpResponseException( @@ -215,7 +214,6 @@ class TestMauLimit(unittest.HomeserverTestCase): def do_sync_for_user(self, token): request, channel = self.make_request("GET", "/sync", access_token=token) - self.render(request) if channel.code != 200: raise HttpResponseException( diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index b89798336c..71580b454d 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -54,7 +54,6 @@ class TermsTestCase(unittest.HomeserverTestCase): # Do a UI auth request request_data = json.dumps({"username": "kermit", "password": "monkey"}) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) @@ -98,7 +97,6 @@ class TermsTestCase(unittest.HomeserverTestCase): self.registration_handler.check_username = Mock(return_value=True) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) # We don't bother checking that the response is correct - we'll leave that to # other tests. We just want to make sure we're on the right path. @@ -116,7 +114,6 @@ class TermsTestCase(unittest.HomeserverTestCase): } ) request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) # We're interested in getting a response that looks like a successful # registration, not so much that the details are exactly what we want. diff --git a/tests/unittest.py b/tests/unittest.py index 8a49bb5262..c7c889c405 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -440,16 +440,6 @@ class HomeserverTestCase(TestCase): await_result, ) - def render(self, request): - """ - Render a request against the resources registered by the test class's - servlets. - - Args: - request (synapse.http.site.SynapseRequest): The request to render. - """ - pass - def setup_test_homeserver(self, *args, **kwargs): """ Set up the test homeserver, meant to be called by the overridable @@ -565,7 +555,6 @@ class HomeserverTestCase(TestCase): # Create the user request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") - self.render(request) self.assertEqual(channel.code, 200, msg=channel.result) nonce = channel.json_body["nonce"] @@ -593,7 +582,6 @@ class HomeserverTestCase(TestCase): request, channel = self.make_request( "POST", "/_matrix/client/r0/admin/register", body.encode("utf8") ) - self.render(request) self.assertEqual(channel.code, 200, channel.json_body) user_id = channel.json_body["user_id"] @@ -612,7 +600,6 @@ class HomeserverTestCase(TestCase): request, channel = self.make_request( "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8") ) - self.render(request) self.assertEqual(channel.code, 200, channel.result) access_token = channel.json_body["access_token"] @@ -681,7 +668,6 @@ class HomeserverTestCase(TestCase): request, channel = self.make_request( "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8") ) - self.render(request) self.assertEqual(channel.code, 403, channel.result) def inject_room_member(self, room: str, user: str, membership: Membership) -> None: -- cgit 1.5.1 From f737368a26bb9eea401fcc3a5bdd7e0b59e91f09 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Nov 2020 10:51:25 +0000 Subject: Add admin API for logging in as a user (#8617) --- changelog.d/8617.feature | 1 + docs/admin_api/user_admin_api.rst | 35 ++++ synapse/api/auth_blocking.py | 33 ++- synapse/handlers/_base.py | 4 +- synapse/handlers/auth.py | 24 ++- synapse/handlers/deactivate_account.py | 5 +- synapse/handlers/message.py | 21 +- synapse/handlers/profile.py | 8 +- synapse/handlers/register.py | 24 ++- synapse/handlers/room.py | 10 +- synapse/handlers/room_member.py | 5 +- synapse/handlers/sync.py | 4 +- synapse/module_api/__init__.py | 5 +- synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/rooms.py | 4 +- synapse/rest/admin/users.py | 54 ++++- synapse/rest/client/v2_alpha/sync.py | 1 + synapse/server_notices/server_notices_manager.py | 13 +- synapse/storage/databases/main/registration.py | 2 + tests/api/test_auth.py | 6 +- tests/handlers/test_sync.py | 14 +- tests/module_api/test_api.py | 11 +- tests/rest/admin/test_user.py | 245 ++++++++++++++++++++++- tests/storage/test_cleanup_extrems.py | 30 --- tests/test_state.py | 1 + 25 files changed, 475 insertions(+), 87 deletions(-) create mode 100644 changelog.d/8617.feature (limited to 'tests') diff --git a/changelog.d/8617.feature b/changelog.d/8617.feature new file mode 100644 index 0000000000..4f1e788506 --- /dev/null +++ b/changelog.d/8617.feature @@ -0,0 +1 @@ +Add admin API for logging in as a user. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index d4051d0257..95b3da27c4 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -424,6 +424,41 @@ The following fields are returned in the JSON response body: - ``next_token``: integer - Indication for pagination. See above. - ``total`` - integer - Total number of media. +Login as a user +=============== + +Get an access token that can be used to authenticate as that user. Useful for +when admins wish to do actions on behalf of a user. + +The API is:: + + POST /_synapse/admin/v1/users//login + {} + +An optional ``valid_until_ms`` field can be specified in the request body as an +integer timestamp that specifies when the token should expire. By default tokens +do not expire. + +A response body like the following is returned: + +.. code:: json + + { + "access_token": "" + } + + +This API does *not* generate a new device for the user, and so will not appear +their ``/devices`` list, and in general the target user should not be able to +tell they have been logged in as. + +To expire the token call the standard ``/logout`` API with the token. + +Note: The token will expire if the *admin* user calls ``/logout/all`` from any +of their devices, but the token will *not* expire if the target user does the +same. + + User devices ============ diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index d8fafd7cb8..9c227218e0 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -14,10 +14,12 @@ # limitations under the License. import logging +from typing import Optional from synapse.api.constants import LimitBlockingTypes, UserTypes from synapse.api.errors import Codes, ResourceLimitError from synapse.config.server import is_threepid_reserved +from synapse.types import Requester logger = logging.getLogger(__name__) @@ -33,24 +35,47 @@ class AuthBlocking: self._max_mau_value = hs.config.max_mau_value self._limit_usage_by_mau = hs.config.limit_usage_by_mau self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids + self._server_name = hs.hostname - async def check_auth_blocking(self, user_id=None, threepid=None, user_type=None): + async def check_auth_blocking( + self, + user_id: Optional[str] = None, + threepid: Optional[dict] = None, + user_type: Optional[str] = None, + requester: Optional[Requester] = None, + ): """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag Args: - user_id(str|None): If present, checks for presence against existing + user_id: If present, checks for presence against existing MAU cohort - threepid(dict|None): If present, checks for presence against configured + threepid: If present, checks for presence against configured reserved threepid. Used in cases where the user is trying register with a MAU blocked server, normally they would be rejected but their threepid is on the reserved list. user_id and threepid should never be set at the same time. - user_type(str|None): If present, is used to decide whether to check against + user_type: If present, is used to decide whether to check against certain blocking reasons like MAU. + + requester: If present, and the authenticated entity is a user, checks for + presence against existing MAU cohort. Passing in both a `user_id` and + `requester` is an error. """ + if requester and user_id: + raise Exception( + "Passed in both 'user_id' and 'requester' to 'check_auth_blocking'" + ) + + if requester: + if requester.authenticated_entity.startswith("@"): + user_id = requester.authenticated_entity + elif requester.authenticated_entity == self._server_name: + # We never block the server from doing actions on behalf of + # users. + return # Never fail an auth check for the server notices users or support user # This can be a problem where event creation is prohibited due to blocking diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index bd8e71ae56..bb81c0e81d 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -169,7 +169,9 @@ class BaseHandler: # and having homeservers have their own users leave keeps more # of that decision-making and control local to the guest-having # homeserver. - requester = synapse.types.create_requester(target_user, is_guest=True) + requester = synapse.types.create_requester( + target_user, is_guest=True, authenticated_entity=self.server_name + ) handler = self.hs.get_room_member_handler() await handler.update_membership( requester, diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 213baea2e3..5163afd86c 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -698,8 +698,12 @@ class AuthHandler(BaseHandler): } async def get_access_token_for_user_id( - self, user_id: str, device_id: Optional[str], valid_until_ms: Optional[int] - ): + self, + user_id: str, + device_id: Optional[str], + valid_until_ms: Optional[int], + puppets_user_id: Optional[str] = None, + ) -> str: """ Creates a new access token for the user with the given user ID. @@ -725,13 +729,25 @@ class AuthHandler(BaseHandler): fmt_expiry = time.strftime( " until %Y-%m-%d %H:%M:%S", time.localtime(valid_until_ms / 1000.0) ) - logger.info("Logging in user %s on device %s%s", user_id, device_id, fmt_expiry) + + if puppets_user_id: + logger.info( + "Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry + ) + else: + logger.info( + "Logging in user %s on device %s%s", user_id, device_id, fmt_expiry + ) await self.auth.check_auth_blocking(user_id) access_token = self.macaroon_gen.generate_access_token(user_id) await self.store.add_access_token_to_user( - user_id, access_token, device_id, valid_until_ms + user_id=user_id, + token=access_token, + device_id=device_id, + valid_until_ms=valid_until_ms, + puppets_user_id=puppets_user_id, ) # the device *should* have been registered before we got here; however, diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 4efe6c530a..e808142365 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -39,6 +39,7 @@ class DeactivateAccountHandler(BaseHandler): self._room_member_handler = hs.get_room_member_handler() self._identity_handler = hs.get_identity_handler() self.user_directory_handler = hs.get_user_directory_handler() + self._server_name = hs.hostname # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False @@ -152,7 +153,7 @@ class DeactivateAccountHandler(BaseHandler): for room in pending_invites: try: await self._room_member_handler.update_membership( - create_requester(user), + create_requester(user, authenticated_entity=self._server_name), user, room.room_id, "leave", @@ -208,7 +209,7 @@ class DeactivateAccountHandler(BaseHandler): logger.info("User parter parting %r from %r", user_id, room_id) try: await self._room_member_handler.update_membership( - create_requester(user), + create_requester(user, authenticated_entity=self._server_name), user, room_id, "leave", diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c6791fb912..96843338ae 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -472,7 +472,7 @@ class EventCreationHandler: Returns: Tuple of created event, Context """ - await self.auth.check_auth_blocking(requester.user.to_string()) + await self.auth.check_auth_blocking(requester=requester) if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": room_version = event_dict["content"]["room_version"] @@ -619,7 +619,13 @@ class EventCreationHandler: if requester.app_service is not None: return - user_id = requester.user.to_string() + user_id = requester.authenticated_entity + if not user_id.startswith("@"): + # The authenticated entity might not be a user, e.g. if it's the + # server puppetting the user. + return + + user = UserID.from_string(user_id) # exempt the system notices user if ( @@ -639,9 +645,7 @@ class EventCreationHandler: if u["consent_version"] == self.config.user_consent_version: return - consent_uri = self._consent_uri_builder.build_user_consent_uri( - requester.user.localpart - ) + consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart) msg = self._block_events_without_consent_error % {"consent_uri": consent_uri} raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri) @@ -1252,7 +1256,7 @@ class EventCreationHandler: for user_id in members: if not self.hs.is_mine_id(user_id): continue - requester = create_requester(user_id) + requester = create_requester(user_id, authenticated_entity=self.server_name) try: event, context = await self.create_event( requester, @@ -1273,11 +1277,6 @@ class EventCreationHandler: requester, event, context, ratelimit=False, ignore_shadow_ban=True, ) return True - except ConsentNotGivenError: - logger.info( - "Failed to send dummy event into room %s for user %s due to " - "lack of consent. Will try another user" % (room_id, user_id) - ) except AuthError: logger.info( "Failed to send dummy event into room %s for user %s due to " diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 74a1ddd780..dee0ef45e7 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -206,7 +206,9 @@ class ProfileHandler(BaseHandler): # the join event to update the displayname in the rooms. # This must be done by the target user himself. if by_admin: - requester = create_requester(target_user) + requester = create_requester( + target_user, authenticated_entity=requester.authenticated_entity, + ) await self.store.set_profile_displayname( target_user.localpart, displayname_to_set @@ -286,7 +288,9 @@ class ProfileHandler(BaseHandler): # Same like set_displayname if by_admin: - requester = create_requester(target_user) + requester = create_requester( + target_user, authenticated_entity=requester.authenticated_entity + ) await self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index ed1ff62599..252f700786 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -52,6 +52,7 @@ class RegistrationHandler(BaseHandler): self.ratelimiter = hs.get_registration_ratelimiter() self.macaroon_gen = hs.get_macaroon_generator() self._server_notices_mxid = hs.config.server_notices_mxid + self._server_name = hs.hostname self.spam_checker = hs.get_spam_checker() @@ -317,7 +318,8 @@ class RegistrationHandler(BaseHandler): requires_join = False if self.hs.config.registration.auto_join_user_id: fake_requester = create_requester( - self.hs.config.registration.auto_join_user_id + self.hs.config.registration.auto_join_user_id, + authenticated_entity=self._server_name, ) # If the room requires an invite, add the user to the list of invites. @@ -329,7 +331,9 @@ class RegistrationHandler(BaseHandler): # being necessary this will occur after the invite was sent. requires_join = True else: - fake_requester = create_requester(user_id) + fake_requester = create_requester( + user_id, authenticated_entity=self._server_name + ) # Choose whether to federate the new room. if not self.hs.config.registration.autocreate_auto_join_rooms_federated: @@ -362,7 +366,9 @@ class RegistrationHandler(BaseHandler): # created it, then ensure the first user joins it. if requires_join: await room_member_handler.update_membership( - requester=create_requester(user_id), + requester=create_requester( + user_id, authenticated_entity=self._server_name + ), target=UserID.from_string(user_id), room_id=info["room_id"], # Since it was just created, there are no remote hosts. @@ -370,11 +376,6 @@ class RegistrationHandler(BaseHandler): action="join", ratelimit=False, ) - - except ConsentNotGivenError as e: - # Technically not necessary to pull out this error though - # moving away from bare excepts is a good thing to do. - logger.error("Failed to join new user to %r: %r", r, e) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) @@ -426,7 +427,8 @@ class RegistrationHandler(BaseHandler): if requires_invite: await room_member_handler.update_membership( requester=create_requester( - self.hs.config.registration.auto_join_user_id + self.hs.config.registration.auto_join_user_id, + authenticated_entity=self._server_name, ), target=UserID.from_string(user_id), room_id=room_id, @@ -437,7 +439,9 @@ class RegistrationHandler(BaseHandler): # Send the join. await room_member_handler.update_membership( - requester=create_requester(user_id), + requester=create_requester( + user_id, authenticated_entity=self._server_name + ), target=UserID.from_string(user_id), room_id=room_id, remote_room_hosts=remote_room_hosts, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e73031475f..930047e730 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -587,7 +587,7 @@ class RoomCreationHandler(BaseHandler): """ user_id = requester.user.to_string() - await self.auth.check_auth_blocking(user_id) + await self.auth.check_auth_blocking(requester=requester) if ( self._server_notices_mxid is not None @@ -1257,7 +1257,9 @@ class RoomShutdownHandler: 400, "User must be our own: %s" % (new_room_user_id,) ) - room_creator_requester = create_requester(new_room_user_id) + room_creator_requester = create_requester( + new_room_user_id, authenticated_entity=requester_user_id + ) info, stream_id = await self._room_creation_handler.create_room( room_creator_requester, @@ -1297,7 +1299,9 @@ class RoomShutdownHandler: try: # Kick users from room - target_requester = create_requester(user_id) + target_requester = create_requester( + user_id, authenticated_entity=requester_user_id + ) _, stream_id = await self.room_member_handler.update_membership( requester=target_requester, target=target_requester.user, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index fd85e08973..70f8966267 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -965,6 +965,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): self.distributor = hs.get_distributor() self.distributor.declare("user_left_room") + self._server_name = hs.hostname async def _is_remote_room_too_complex( self, room_id: str, remote_room_hosts: List[str] @@ -1059,7 +1060,9 @@ class RoomMemberMasterHandler(RoomMemberHandler): return event_id, stream_id # The room is too large. Leave. - requester = types.create_requester(user, None, False, False, None) + requester = types.create_requester( + user, authenticated_entity=self._server_name + ) await self.update_membership( requester=requester, target=user, room_id=room_id, action="leave" ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 32e53c2d25..9827c7eb8d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -31,6 +31,7 @@ from synapse.types import ( Collection, JsonDict, MutableStateMap, + Requester, RoomStreamToken, StateMap, StreamToken, @@ -260,6 +261,7 @@ class SyncHandler: async def wait_for_sync_for_user( self, + requester: Requester, sync_config: SyncConfig, since_token: Optional[StreamToken] = None, timeout: int = 0, @@ -273,7 +275,7 @@ class SyncHandler: # not been exceeded (if not part of the group by this point, almost certain # auth_blocking will occur) user_id = sync_config.user.to_string() - await self.auth.check_auth_blocking(user_id) + await self.auth.check_auth_blocking(requester=requester) res = await self.response_cache.wrap( sync_config.request_key, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0142542852..72ab5750cc 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -49,6 +49,7 @@ class ModuleApi: self._store = hs.get_datastore() self._auth = hs.get_auth() self._auth_handler = auth_handler + self._server_name = hs.hostname # We expose these as properties below in order to attach a helpful docstring. self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient @@ -336,7 +337,9 @@ class ModuleApi: SynapseError if the event was not allowed. """ # Create a requester object - requester = create_requester(event_dict["sender"]) + requester = create_requester( + event_dict["sender"], authenticated_entity=self._server_name + ) # Create and send the event ( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 2a4f7a1740..7a3a5c46ca 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -61,6 +61,7 @@ from synapse.rest.admin.users import ( UserRestServletV2, UsersRestServlet, UsersRestServletV2, + UserTokenRestServlet, WhoisRestServlet, ) from synapse.types import RoomStreamToken @@ -223,6 +224,7 @@ def register_servlets(hs, http_server): UserAdminServlet(hs).register(http_server) UserMediaRestServlet(hs).register(http_server) UserMembershipRestServlet(hs).register(http_server) + UserTokenRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) DeviceRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f5304ff43d..ee345e12ce 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -309,7 +309,9 @@ class JoinRoomAliasServlet(RestServlet): 400, "%s was not legal room ID or room alias" % (room_identifier,) ) - fake_requester = create_requester(target_user) + fake_requester = create_requester( + target_user, authenticated_entity=requester.authenticated_entity + ) # send invite if room has "JoinRules.INVITE" room_state = await self.state_handler.get_current_state(room_id) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 3638e219f2..fa8d8e6d91 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -16,7 +16,7 @@ import hashlib import hmac import logging from http import HTTPStatus -from typing import Tuple +from typing import TYPE_CHECKING, Tuple from synapse.api.constants import UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError @@ -37,6 +37,9 @@ from synapse.rest.admin._base import ( ) from synapse.types import JsonDict, UserID +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) _GET_PUSHERS_ALLOWED_KEYS = { @@ -828,3 +831,52 @@ class UserMediaRestServlet(RestServlet): ret["next_token"] = start + len(media) return 200, ret + + +class UserTokenRestServlet(RestServlet): + """An admin API for logging in as a user. + + Example: + + POST /_synapse/admin/v1/users/@test:example.com/login + {} + + 200 OK + { + "access_token": "" + } + """ + + PATTERNS = admin_patterns("/users/(?P[^/]*)/login$") + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.auth_handler = hs.get_auth_handler() + + async def on_POST(self, request, user_id): + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + auth_user = requester.user + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Only local users can be logged in as") + + body = parse_json_object_from_request(request, allow_empty_body=True) + + valid_until_ms = body.get("valid_until_ms") + if valid_until_ms and not isinstance(valid_until_ms, int): + raise SynapseError(400, "'valid_until_ms' parameter must be an int") + + if auth_user.to_string() == user_id: + raise SynapseError(400, "Cannot use admin API to login as self") + + token = await self.auth_handler.get_access_token_for_user_id( + user_id=auth_user.to_string(), + device_id=None, + valid_until_ms=valid_until_ms, + puppets_user_id=user_id, + ) + + return 200, {"access_token": token} diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 2b84eb89c0..8e52e4cca4 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -171,6 +171,7 @@ class SyncRestServlet(RestServlet): ) with context: sync_result = await self.sync_handler.wait_for_sync_for_user( + requester, sync_config, since_token=since_token, timeout=timeout, diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index d464c75c03..100dbd5e2c 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -39,6 +39,7 @@ class ServerNoticesManager: self._room_member_handler = hs.get_room_member_handler() self._event_creation_handler = hs.get_event_creation_handler() self._is_mine_id = hs.is_mine_id + self._server_name = hs.hostname self._notifier = hs.get_notifier() self.server_notices_mxid = self._config.server_notices_mxid @@ -72,7 +73,9 @@ class ServerNoticesManager: await self.maybe_invite_user_to_room(user_id, room_id) system_mxid = self._config.server_notices_mxid - requester = create_requester(system_mxid) + requester = create_requester( + system_mxid, authenticated_entity=self._server_name + ) logger.info("Sending server notice to %s", user_id) @@ -145,7 +148,9 @@ class ServerNoticesManager: "avatar_url": self._config.server_notices_mxid_avatar_url, } - requester = create_requester(self.server_notices_mxid) + requester = create_requester( + self.server_notices_mxid, authenticated_entity=self._server_name + ) info, _ = await self._room_creation_handler.create_room( requester, config={ @@ -174,7 +179,9 @@ class ServerNoticesManager: user_id: The ID of the user to invite. room_id: The ID of the room to invite the user to. """ - requester = create_requester(self.server_notices_mxid) + requester = create_requester( + self.server_notices_mxid, authenticated_entity=self._server_name + ) # Check whether the user has already joined or been invited to this room. If # that's the case, there is no need to re-invite them. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index e5d07ce72a..fedb8a6c26 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1110,6 +1110,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): token: str, device_id: Optional[str], valid_until_ms: Optional[int], + puppets_user_id: Optional[str] = None, ) -> int: """Adds an access token for the given user. @@ -1133,6 +1134,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): "token": token, "device_id": device_id, "valid_until_ms": valid_until_ms, + "puppets_user_id": puppets_user_id, }, desc="add_access_token_to_user", ) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 0fd55f428a..ee5217b074 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -282,7 +282,11 @@ class AuthTestCase(unittest.TestCase): ) ) self.store.add_access_token_to_user.assert_called_with( - USER_ID, token, "DEVICE", None + user_id=USER_ID, + token=token, + device_id="DEVICE", + valid_until_ms=None, + puppets_user_id=None, ) def get_user(tok): diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index e178d7765b..e62586142e 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -16,7 +16,7 @@ from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import DEFAULT_FILTER_COLLECTION from synapse.handlers.sync import SyncConfig -from synapse.types import UserID +from synapse.types import UserID, create_requester import tests.unittest import tests.utils @@ -38,6 +38,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): user_id1 = "@user1:test" user_id2 = "@user2:test" sync_config = self._generate_sync_config(user_id1) + requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time self.auth_blocking._limit_usage_by_mau = True @@ -45,21 +46,26 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Check that the happy case does not throw errors self.get_success(self.store.upsert_monthly_active_user(user_id1)) - self.get_success(self.sync_handler.wait_for_sync_for_user(sync_config)) + self.get_success( + self.sync_handler.wait_for_sync_for_user(requester, sync_config) + ) # Test that global lock works self.auth_blocking._hs_disabled = True e = self.get_failure( - self.sync_handler.wait_for_sync_for_user(sync_config), ResourceLimitError + self.sync_handler.wait_for_sync_for_user(requester, sync_config), + ResourceLimitError, ) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False sync_config = self._generate_sync_config(user_id2) + requester = create_requester(user_id2) e = self.get_failure( - self.sync_handler.wait_for_sync_for_user(sync_config), ResourceLimitError + self.sync_handler.wait_for_sync_for_user(requester, sync_config), + ResourceLimitError, ) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9b573ac24d..27206ca3db 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -94,12 +94,13 @@ class ModuleApiTestCase(HomeserverTestCase): self.assertFalse(hasattr(event, "state_key")) self.assertDictEqual(event.content, content) + expected_requester = create_requester( + user_id, authenticated_entity=self.hs.hostname + ) + # Check that the event was sent self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( - create_requester(user_id), - event_dict, - ratelimit=False, - ignore_shadow_ban=True, + expected_requester, event_dict, ratelimit=False, ignore_shadow_ban=True, ) # Create and send a state event @@ -128,7 +129,7 @@ class ModuleApiTestCase(HomeserverTestCase): # Check that the event was sent self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( - create_requester(user_id), + expected_requester, { "type": "m.room.power_levels", "content": content, diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index d74efede06..94cd45066c 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -24,8 +24,8 @@ from mock import Mock import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError -from synapse.rest.client.v1 import login, profile, room -from synapse.rest.client.v2_alpha import sync +from synapse.rest.client.v1 import login, logout, profile, room +from synapse.rest.client.v2_alpha import devices, sync from tests import unittest from tests.test_utils import make_awaitable @@ -1638,3 +1638,244 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): self.assertIn("last_access_ts", m) self.assertIn("quarantined_by", m) self.assertIn("safe_from_quarantine", m) + + +class UserTokenRestTestCase(unittest.HomeserverTestCase): + """Test for /_synapse/admin/v1/users//login + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + room.register_servlets, + devices.register_servlets, + logout.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + self.url = "/_synapse/admin/v1/users/%s/login" % urllib.parse.quote( + self.other_user + ) + + def _get_token(self) -> str: + request, channel = self.make_request( + "POST", self.url, b"{}", access_token=self.admin_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + return channel.json_body["access_token"] + + def test_no_auth(self): + """Try to login as a user without authentication. + """ + request, channel = self.make_request("POST", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_not_admin(self): + """Try to login as a user as a non-admin user. + """ + request, channel = self.make_request( + "POST", self.url, b"{}", access_token=self.other_user_tok + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + + def test_send_event(self): + """Test that sending event as a user works. + """ + # Create a room. + room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) + + # Login in as the user + puppet_token = self._get_token() + + # Test that sending works, and generates the event as the right user. + resp = self.helper.send_event(room_id, "com.example.test", tok=puppet_token) + event_id = resp["event_id"] + event = self.get_success(self.store.get_event(event_id)) + self.assertEqual(event.sender, self.other_user) + + def test_devices(self): + """Tests that logging in as a user doesn't create a new device for them. + """ + # Login in as the user + self._get_token() + + # Check that we don't see a new device in our devices list + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=self.other_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # We should only see the one device (from the login in `prepare`) + self.assertEqual(len(channel.json_body["devices"]), 1) + + def test_logout(self): + """Test that calling `/logout` with the token works. + """ + # Login in as the user + puppet_token = self._get_token() + + # Test that we can successfully make a request + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Logout with the puppet token + request, channel = self.make_request( + "POST", "logout", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # The puppet token should no longer work + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + + # .. but the real user's tokens should still work + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=self.other_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + def test_user_logout_all(self): + """Tests that the target user calling `/logout/all` does *not* expire + the token. + """ + # Login in as the user + puppet_token = self._get_token() + + # Test that we can successfully make a request + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Logout all with the real user token + request, channel = self.make_request( + "POST", "logout/all", b"{}", access_token=self.other_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # The puppet token should still work + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # .. but the real user's tokens shouldn't + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=self.other_user_tok + ) + self.render(request) + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + + def test_admin_logout_all(self): + """Tests that the admin user calling `/logout/all` does expire the + token. + """ + # Login in as the user + puppet_token = self._get_token() + + # Test that we can successfully make a request + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Logout all with the admin user token + request, channel = self.make_request( + "POST", "logout/all", b"{}", access_token=self.admin_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # The puppet token should no longer work + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=puppet_token + ) + self.render(request) + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + + # .. but the real user's tokens should still work + request, channel = self.make_request( + "GET", "devices", b"{}", access_token=self.other_user_tok + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + @unittest.override_config( + { + "public_baseurl": "https://example.org/", + "user_consent": { + "version": "1.0", + "policy_name": "My Cool Privacy Policy", + "template_dir": "/", + "require_at_registration": True, + "block_events_error": "You should accept the policy", + }, + "form_secret": "123secret", + } + ) + def test_consent(self): + """Test that sending a message is not subject to the privacy policies. + """ + # Have the admin user accept the terms. + self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0")) + + # First, cheekily accept the terms and create a room + self.get_success(self.store.user_set_consent_version(self.other_user, "1.0")) + room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) + self.helper.send_event(room_id, "com.example.test", tok=self.other_user_tok) + + # Now unaccept it and check that we can't send an event + self.get_success(self.store.user_set_consent_version(self.other_user, "0.0")) + self.helper.send_event( + room_id, "com.example.test", tok=self.other_user_tok, expect_code=403 + ) + + # Login in as the user + puppet_token = self._get_token() + + # Sending an event on their behalf should work fine + self.helper.send_event(room_id, "com.example.test", tok=puppet_token) + + @override_config( + {"limit_usage_by_mau": True, "max_mau_value": 1, "mau_trial_days": 0} + ) + def test_mau_limit(self): + # Create a room as the admin user. This will bump the monthly active users to 1. + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + # Trying to join as the other user should fail due to reaching MAU limit. + self.helper.join( + room_id, user=self.other_user, tok=self.other_user_tok, expect_code=403 + ) + + # Logging in as the other user and joining a room should work, even + # though the MAU limit would stop the user doing so. + puppet_token = self._get_token() + self.helper.join(room_id, user=self.other_user, tok=puppet_token) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 5a1e5c4e66..c13a57dad1 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -309,36 +309,6 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase): ) self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids)) - @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0) - def test_send_dummy_event_without_consent(self): - self._create_extremity_rich_graph() - self._enable_consent_checking() - - # Pump the reactor repeatedly so that the background updates have a - # chance to run. Attempt to add dummy event with user that has not consented - # Check that dummy event send fails. - self.pump(10 * 60) - latest_event_ids = self.get_success( - self.store.get_latest_event_ids_in_room(self.room_id) - ) - self.assertTrue(len(latest_event_ids) == self.EXTREMITIES_COUNT) - - # Create new user, and add consent - user2 = self.register_user("user2", "password") - token2 = self.login("user2", "password") - self.get_success( - self.store.user_set_consent_version(user2, self.CONSENT_VERSION) - ) - self.helper.join(self.room_id, user2, tok=token2) - - # Background updates should now cause a dummy event to be added to the graph - self.pump(10 * 60) - - latest_event_ids = self.get_success( - self.store.get_latest_event_ids_in_room(self.room_id) - ) - self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids)) - @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=250) def test_expiry_logic(self): """Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion() diff --git a/tests/test_state.py b/tests/test_state.py index 80b0ccbc40..6227a3ba95 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -169,6 +169,7 @@ class StateTestCase(unittest.TestCase): "get_state_handler", "get_clock", "get_state_resolution_handler", + "hostname", ] ) hs.config = default_config("tesths", True) -- cgit 1.5.1 From ee382025b0c264701fc320133912e9fece40b021 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Nov 2020 09:46:23 -0500 Subject: Abstract shared SSO code. (#8765) De-duplicates code between the SAML and OIDC implementations. --- changelog.d/8765.misc | 1 + synapse/handlers/oidc_handler.py | 92 ++++++++++++++-------------------------- synapse/handlers/saml_handler.py | 77 ++++++++++----------------------- synapse/handlers/sso.py | 90 +++++++++++++++++++++++++++++++++++++++ synapse/server.py | 5 +++ tests/handlers/test_oidc.py | 14 +++--- 6 files changed, 159 insertions(+), 120 deletions(-) create mode 100644 changelog.d/8765.misc create mode 100644 synapse/handlers/sso.py (limited to 'tests') diff --git a/changelog.d/8765.misc b/changelog.d/8765.misc new file mode 100644 index 0000000000..053f9acc9c --- /dev/null +++ b/changelog.d/8765.misc @@ -0,0 +1 @@ +Consolidate logic between the OpenID Connect and SAML code. diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 331d4e7e96..be8562d47b 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -34,7 +34,8 @@ from typing_extensions import TypedDict from twisted.web.client import readBody from synapse.config import ConfigError -from synapse.http.server import respond_with_html +from synapse.handlers._base import BaseHandler +from synapse.handlers.sso import MappingException from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart @@ -83,17 +84,12 @@ class OidcError(Exception): return self.error -class MappingException(Exception): - """Used to catch errors when mapping the UserInfo object - """ - - -class OidcHandler: +class OidcHandler(BaseHandler): """Handles requests related to the OpenID Connect login flow. """ def __init__(self, hs: "HomeServer"): - self.hs = hs + super().__init__(hs) self._callback_url = hs.config.oidc_callback_url # type: str self._scopes = hs.config.oidc_scopes # type: List[str] self._user_profile_method = hs.config.oidc_user_profile_method # type: str @@ -120,36 +116,13 @@ class OidcHandler: self._http_client = hs.get_proxied_http_client() self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() - self._datastore = hs.get_datastore() - self._clock = hs.get_clock() - self._hostname = hs.hostname # type: str self._server_name = hs.config.server_name # type: str self._macaroon_secret_key = hs.config.macaroon_secret_key - self._error_template = hs.config.sso_error_template # identifier for the external_ids table self._auth_provider_id = "oidc" - def _render_error( - self, request, error: str, error_description: Optional[str] = None - ) -> None: - """Render the error template and respond to the request with it. - - This is used to show errors to the user. The template of this page can - be found under `synapse/res/templates/sso_error.html`. - - Args: - request: The incoming request from the browser. - We'll respond with an HTML page describing the error. - error: A technical identifier for this error. Those include - well-known OAuth2/OIDC error types like invalid_request or - access_denied. - error_description: A human-readable description of the error. - """ - html = self._error_template.render( - error=error, error_description=error_description - ) - respond_with_html(request, 400, html) + self._sso_handler = hs.get_sso_handler() def _validate_metadata(self): """Verifies the provider metadata. @@ -571,7 +544,7 @@ class OidcHandler: Since we might want to display OIDC-related errors in a user-friendly way, we don't raise SynapseError from here. Instead, we call - ``self._render_error`` which displays an HTML page for the error. + ``self._sso_handler.render_error`` which displays an HTML page for the error. Most of the OpenID Connect logic happens here: @@ -609,7 +582,7 @@ class OidcHandler: if error != "access_denied": logger.error("Error from the OIDC provider: %s %s", error, description) - self._render_error(request, error, description) + self._sso_handler.render_error(request, error, description) return # otherwise, it is presumably a successful response. see: @@ -619,7 +592,9 @@ class OidcHandler: session = request.getCookie(SESSION_COOKIE_NAME) # type: Optional[bytes] if session is None: logger.info("No session cookie found") - self._render_error(request, "missing_session", "No session cookie found") + self._sso_handler.render_error( + request, "missing_session", "No session cookie found" + ) return # Remove the cookie. There is a good chance that if the callback failed @@ -637,7 +612,9 @@ class OidcHandler: # Check for the state query parameter if b"state" not in request.args: logger.info("State parameter is missing") - self._render_error(request, "invalid_request", "State parameter is missing") + self._sso_handler.render_error( + request, "invalid_request", "State parameter is missing" + ) return state = request.args[b"state"][0].decode() @@ -651,17 +628,19 @@ class OidcHandler: ) = self._verify_oidc_session_token(session, state) except MacaroonDeserializationException as e: logger.exception("Invalid session") - self._render_error(request, "invalid_session", str(e)) + self._sso_handler.render_error(request, "invalid_session", str(e)) return except MacaroonInvalidSignatureException as e: logger.exception("Could not verify session") - self._render_error(request, "mismatching_session", str(e)) + self._sso_handler.render_error(request, "mismatching_session", str(e)) return # Exchange the code with the provider if b"code" not in request.args: logger.info("Code parameter is missing") - self._render_error(request, "invalid_request", "Code parameter is missing") + self._sso_handler.render_error( + request, "invalid_request", "Code parameter is missing" + ) return logger.debug("Exchanging code") @@ -670,7 +649,7 @@ class OidcHandler: token = await self._exchange_code(code) except OidcError as e: logger.exception("Could not exchange code") - self._render_error(request, e.error, e.error_description) + self._sso_handler.render_error(request, e.error, e.error_description) return logger.debug("Successfully obtained OAuth2 access token") @@ -683,7 +662,7 @@ class OidcHandler: userinfo = await self._fetch_userinfo(token) except Exception as e: logger.exception("Could not fetch userinfo") - self._render_error(request, "fetch_error", str(e)) + self._sso_handler.render_error(request, "fetch_error", str(e)) return else: logger.debug("Extracting userinfo from id_token") @@ -691,7 +670,7 @@ class OidcHandler: userinfo = await self._parse_id_token(token, nonce=nonce) except Exception as e: logger.exception("Invalid id_token") - self._render_error(request, "invalid_token", str(e)) + self._sso_handler.render_error(request, "invalid_token", str(e)) return # Pull out the user-agent and IP from the request. @@ -705,7 +684,7 @@ class OidcHandler: ) except MappingException as e: logger.exception("Could not map user") - self._render_error(request, "mapping_error", str(e)) + self._sso_handler.render_error(request, "mapping_error", str(e)) return # Mapping providers might not have get_extra_attributes: only call this @@ -770,7 +749,7 @@ class OidcHandler: macaroon.add_first_party_caveat( "ui_auth_session_id = %s" % (ui_auth_session_id,) ) - now = self._clock.time_msec() + now = self.clock.time_msec() expiry = now + duration_in_ms macaroon.add_first_party_caveat("time < %d" % (expiry,)) @@ -845,7 +824,7 @@ class OidcHandler: if not caveat.startswith(prefix): return False expiry = int(caveat[len(prefix) :]) - now = self._clock.time_msec() + now = self.clock.time_msec() return now < expiry async def _map_userinfo_to_user( @@ -885,20 +864,14 @@ class OidcHandler: # to be strings. remote_user_id = str(remote_user_id) - logger.info( - "Looking for existing mapping for user %s:%s", - self._auth_provider_id, - remote_user_id, - ) - - registered_user_id = await self._datastore.get_user_by_external_id( + # first of all, check if we already have a mapping for this user + previously_registered_user_id = await self._sso_handler.get_sso_user_by_remote_user_id( self._auth_provider_id, remote_user_id, ) + if previously_registered_user_id: + return previously_registered_user_id - if registered_user_id is not None: - logger.info("Found existing mapping %s", registered_user_id) - return registered_user_id - + # Otherwise, generate a new user. try: attributes = await self._user_mapping_provider.map_user_attributes( userinfo, token @@ -917,8 +890,8 @@ class OidcHandler: localpart = map_username_to_mxid_localpart(attributes["localpart"]) - user_id = UserID(localpart, self._hostname).to_string() - users = await self._datastore.get_users_by_id_case_insensitive(user_id) + user_id = UserID(localpart, self.server_name).to_string() + users = await self.store.get_users_by_id_case_insensitive(user_id) if users: if self._allow_existing_users: if len(users) == 1: @@ -942,7 +915,8 @@ class OidcHandler: default_display_name=attributes["display_name"], user_agent_ips=(user_agent, ip_address), ) - await self._datastore.record_user_external_id( + + await self.store.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id, ) return registered_user_id diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index fd6c5e9ea8..aee772239a 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -24,7 +24,8 @@ from saml2.client import Saml2Client from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.config.saml2_config import SamlAttributeRequirement -from synapse.http.server import respond_with_html +from synapse.handlers._base import BaseHandler +from synapse.handlers.sso import MappingException from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.module_api import ModuleApi @@ -42,10 +43,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class MappingException(Exception): - """Used to catch errors when mapping the SAML2 response to a user.""" - - @attr.s(slots=True) class Saml2SessionData: """Data we track about SAML2 sessions""" @@ -57,17 +54,13 @@ class Saml2SessionData: ui_auth_session_id = attr.ib(type=Optional[str], default=None) -class SamlHandler: +class SamlHandler(BaseHandler): def __init__(self, hs: "synapse.server.HomeServer"): - self.hs = hs + super().__init__(hs) self._saml_client = Saml2Client(hs.config.saml2_sp_config) - self._auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() - self._clock = hs.get_clock() - self._datastore = hs.get_datastore() - self._hostname = hs.hostname self._saml2_session_lifetime = hs.config.saml2_session_lifetime self._grandfathered_mxid_source_attribute = ( hs.config.saml2_grandfathered_mxid_source_attribute @@ -88,26 +81,9 @@ class SamlHandler: self._outstanding_requests_dict = {} # type: Dict[str, Saml2SessionData] # a lock on the mappings - self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock) - - def _render_error( - self, request, error: str, error_description: Optional[str] = None - ) -> None: - """Render the error template and respond to the request with it. + self._mapping_lock = Linearizer(name="saml_mapping", clock=self.clock) - This is used to show errors to the user. The template of this page can - be found under `synapse/res/templates/sso_error.html`. - - Args: - request: The incoming request from the browser. - We'll respond with an HTML page describing the error. - error: A technical identifier for this error. - error_description: A human-readable description of the error. - """ - html = self._error_template.render( - error=error, error_description=error_description - ) - respond_with_html(request, 400, html) + self._sso_handler = hs.get_sso_handler() def handle_redirect_request( self, client_redirect_url: bytes, ui_auth_session_id: Optional[str] = None @@ -130,7 +106,7 @@ class SamlHandler: # Since SAML sessions timeout it is useful to log when they were created. logger.info("Initiating a new SAML session: %s" % (reqid,)) - now = self._clock.time_msec() + now = self.clock.time_msec() self._outstanding_requests_dict[reqid] = Saml2SessionData( creation_time=now, ui_auth_session_id=ui_auth_session_id, ) @@ -171,12 +147,12 @@ class SamlHandler: # in the (user-visible) exception message, so let's log the exception here # so we can track down the session IDs later. logger.warning(str(e)) - self._render_error( + self._sso_handler.render_error( request, "unsolicited_response", "Unexpected SAML2 login." ) return except Exception as e: - self._render_error( + self._sso_handler.render_error( request, "invalid_response", "Unable to parse SAML2 response: %s." % (e,), @@ -184,7 +160,7 @@ class SamlHandler: return if saml2_auth.not_signed: - self._render_error( + self._sso_handler.render_error( request, "unsigned_respond", "SAML2 response was not signed." ) return @@ -210,7 +186,7 @@ class SamlHandler: # attributes. for requirement in self._saml2_attribute_requirements: if not _check_attribute_requirement(saml2_auth.ava, requirement): - self._render_error( + self._sso_handler.render_error( request, "unauthorised", "You are not authorised to log in here." ) return @@ -226,7 +202,7 @@ class SamlHandler: ) except MappingException as e: logger.exception("Could not map user") - self._render_error(request, "mapping_error", str(e)) + self._sso_handler.render_error(request, "mapping_error", str(e)) return # Complete the interactive auth session or the login. @@ -274,17 +250,11 @@ class SamlHandler: with (await self._mapping_lock.queue(self._auth_provider_id)): # first of all, check if we already have a mapping for this user - logger.info( - "Looking for existing mapping for user %s:%s", - self._auth_provider_id, - remote_user_id, + previously_registered_user_id = await self._sso_handler.get_sso_user_by_remote_user_id( + self._auth_provider_id, remote_user_id, ) - registered_user_id = await self._datastore.get_user_by_external_id( - self._auth_provider_id, remote_user_id - ) - if registered_user_id is not None: - logger.info("Found existing mapping %s", registered_user_id) - return registered_user_id + if previously_registered_user_id: + return previously_registered_user_id # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid @@ -294,7 +264,7 @@ class SamlHandler: ): attrval = saml2_auth.ava[self._grandfathered_mxid_source_attribute][0] user_id = UserID( - map_username_to_mxid_localpart(attrval), self._hostname + map_username_to_mxid_localpart(attrval), self.server_name ).to_string() logger.info( "Looking for existing account based on mapped %s %s", @@ -302,11 +272,11 @@ class SamlHandler: user_id, ) - users = await self._datastore.get_users_by_id_case_insensitive(user_id) + users = await self.store.get_users_by_id_case_insensitive(user_id) if users: registered_user_id = list(users.keys())[0] logger.info("Grandfathering mapping to %s", registered_user_id) - await self._datastore.record_user_external_id( + await self.store.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) return registered_user_id @@ -335,8 +305,8 @@ class SamlHandler: emails = attribute_dict.get("emails", []) # Check if this mxid already exists - if not await self._datastore.get_users_by_id_case_insensitive( - UserID(localpart, self._hostname).to_string() + if not await self.store.get_users_by_id_case_insensitive( + UserID(localpart, self.server_name).to_string() ): # This mxid is free break @@ -348,7 +318,6 @@ class SamlHandler: ) logger.info("Mapped SAML user to local part %s", localpart) - registered_user_id = await self._registration_handler.register_user( localpart=localpart, default_display_name=displayname, @@ -356,13 +325,13 @@ class SamlHandler: user_agent_ips=(user_agent, ip_address), ) - await self._datastore.record_user_external_id( + await self.store.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) return registered_user_id def expire_sessions(self): - expire_before = self._clock.time_msec() - self._saml2_session_lifetime + expire_before = self.clock.time_msec() - self._saml2_session_lifetime to_expire = set() for reqid, data in self._outstanding_requests_dict.items(): if data.creation_time < expire_before: diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py new file mode 100644 index 0000000000..9cb1866a71 --- /dev/null +++ b/synapse/handlers/sso.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional + +from synapse.handlers._base import BaseHandler +from synapse.http.server import respond_with_html + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class MappingException(Exception): + """Used to catch errors when mapping the UserInfo object + """ + + +class SsoHandler(BaseHandler): + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + self._error_template = hs.config.sso_error_template + + def render_error( + self, request, error: str, error_description: Optional[str] = None + ) -> None: + """Renders the error template and responds with it. + + This is used to show errors to the user. The template of this page can + be found under `synapse/res/templates/sso_error.html`. + + Args: + request: The incoming request from the browser. + We'll respond with an HTML page describing the error. + error: A technical identifier for this error. + error_description: A human-readable description of the error. + """ + html = self._error_template.render( + error=error, error_description=error_description + ) + respond_with_html(request, 400, html) + + async def get_sso_user_by_remote_user_id( + self, auth_provider_id: str, remote_user_id: str + ) -> Optional[str]: + """ + Maps the user ID of a remote IdP to a mxid for a previously seen user. + + If the user has not been seen yet, this will return None. + + Args: + auth_provider_id: A unique identifier for this SSO provider, e.g. + "oidc" or "saml". + remote_user_id: The user ID according to the remote IdP. This might + be an e-mail address, a GUID, or some other form. It must be + unique and immutable. + + Returns: + The mxid of a previously seen user. + """ + # Check if we already have a mapping for this user. + logger.info( + "Looking for existing mapping for user %s:%s", + auth_provider_id, + remote_user_id, + ) + previously_registered_user_id = await self.store.get_user_by_external_id( + auth_provider_id, remote_user_id, + ) + + # A match was found, return the user ID. + if previously_registered_user_id is not None: + logger.info("Found existing mapping %s", previously_registered_user_id) + return previously_registered_user_id + + # No match. + return None diff --git a/synapse/server.py b/synapse/server.py index 21a232bbd9..12a783de17 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -89,6 +89,7 @@ from synapse.handlers.room_member import RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.search import SearchHandler from synapse.handlers.set_password import SetPasswordHandler +from synapse.handlers.sso import SsoHandler from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler @@ -390,6 +391,10 @@ class HomeServer(metaclass=abc.ABCMeta): else: return FollowerTypingHandler(self) + @cache_in_self + def get_sso_handler(self) -> SsoHandler: + return SsoHandler(self) + @cache_in_self def get_sync_handler(self) -> SyncHandler: return SyncHandler(self) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 0d51705849..630e6da808 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -154,6 +154,9 @@ class OidcHandlerTestCase(HomeserverTestCase): ) self.handler = OidcHandler(hs) + # Mock the render error method. + self.render_error = Mock(return_value=None) + self.handler._sso_handler.render_error = self.render_error return hs @@ -161,12 +164,12 @@ class OidcHandlerTestCase(HomeserverTestCase): return patch.dict(self.handler._provider_metadata, values) def assertRenderedError(self, error, error_description=None): - args = self.handler._render_error.call_args[0] + args = self.render_error.call_args[0] self.assertEqual(args[1], error) if error_description is not None: self.assertEqual(args[2], error_description) # Reset the render_error mock - self.handler._render_error.reset_mock() + self.render_error.reset_mock() def test_config(self): """Basic config correctly sets up the callback URL and client auth correctly.""" @@ -356,7 +359,6 @@ class OidcHandlerTestCase(HomeserverTestCase): def test_callback_error(self): """Errors from the provider returned in the callback are displayed.""" - self.handler._render_error = Mock() request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] self.get_success(self.handler.handle_oidc_callback(request)) @@ -387,7 +389,6 @@ class OidcHandlerTestCase(HomeserverTestCase): "preferred_username": "bar", } user_id = "@foo:domain.org" - self.handler._render_error = Mock(return_value=None) self.handler._exchange_code = simple_async_mock(return_value=token) self.handler._parse_id_token = simple_async_mock(return_value=userinfo) self.handler._fetch_userinfo = simple_async_mock(return_value=userinfo) @@ -435,7 +436,7 @@ class OidcHandlerTestCase(HomeserverTestCase): userinfo, token, user_agent, ip_address ) self.handler._fetch_userinfo.assert_not_called() - self.handler._render_error.assert_not_called() + self.render_error.assert_not_called() # Handle mapping errors self.handler._map_userinfo_to_user = simple_async_mock( @@ -469,7 +470,7 @@ class OidcHandlerTestCase(HomeserverTestCase): userinfo, token, user_agent, ip_address ) self.handler._fetch_userinfo.assert_called_once_with(token) - self.handler._render_error.assert_not_called() + self.render_error.assert_not_called() # Handle userinfo fetching error self.handler._fetch_userinfo = simple_async_mock(raises=Exception()) @@ -485,7 +486,6 @@ class OidcHandlerTestCase(HomeserverTestCase): def test_callback_session(self): """The callback verifies the session presence and validity""" - self.handler._render_error = Mock(return_value=None) request = Mock(spec=["args", "getCookie", "addCookie"]) # Missing cookie -- cgit 1.5.1 From 03e392f787df3fb8d23aca0964695a5caa075940 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 18 Nov 2020 15:43:11 +0000 Subject: Fix tests on develop (#8777) This was broken due to #8617 and #8761. --- changelog.d/8777.misc | 1 + tests/rest/admin/test_user.py | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/8777.misc (limited to 'tests') diff --git a/changelog.d/8777.misc b/changelog.d/8777.misc new file mode 100644 index 0000000000..e6da7d038d --- /dev/null +++ b/changelog.d/8777.misc @@ -0,0 +1 @@ + Refactor test utilities for injecting HTTP requests. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index cc8a70be04..9661af7e79 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1574,7 +1574,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, b"{}", access_token=self.admin_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) return channel.json_body["access_token"] @@ -1582,7 +1581,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): """Try to login as a user without authentication. """ request, channel = self.make_request("POST", self.url, b"{}") - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -1593,7 +1591,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", self.url, b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) @@ -1622,7 +1619,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # We should only see the one device (from the login in `prepare`) @@ -1638,28 +1634,24 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Logout with the puppet token request, channel = self.make_request( "POST", "logout", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # The puppet token should no longer work request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) # .. but the real user's tokens should still work request, channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) def test_user_logout_all(self): @@ -1673,28 +1665,24 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Logout all with the real user token request, channel = self.make_request( "POST", "logout/all", b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # The puppet token should still work request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # .. but the real user's tokens shouldn't request, channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) def test_admin_logout_all(self): @@ -1708,28 +1696,24 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Logout all with the admin user token request, channel = self.make_request( "POST", "logout/all", b"{}", access_token=self.admin_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # The puppet token should no longer work request, channel = self.make_request( "GET", "devices", b"{}", access_token=puppet_token ) - self.render(request) self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) # .. but the real user's tokens should still work request, channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) @unittest.override_config( -- cgit 1.5.1 From 950bb0305fc3c114f456eb4e2a806014150545d2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Nov 2020 10:05:33 +0000 Subject: Consistently use room_id from federation request body (#8776) * Consistently use room_id from federation request body Some federation APIs have a redundant `room_id` path param (see https://github.com/matrix-org/matrix-doc/issues/2330). We should make sure we consistently use either the path param or the body param, and the body param is easier. * Kill off some references to "context" Once upon a time, "rooms" were known as "contexts". I think this kills of the last references to "contexts". --- changelog.d/8776.bugfix | 1 + synapse/federation/federation_server.py | 23 +++++------ synapse/federation/transport/server.py | 68 ++++++++++++++++----------------- synapse/handlers/federation.py | 10 ++--- tests/handlers/test_federation.py | 1 - 5 files changed, 49 insertions(+), 54 deletions(-) create mode 100644 changelog.d/8776.bugfix (limited to 'tests') diff --git a/changelog.d/8776.bugfix b/changelog.d/8776.bugfix new file mode 100644 index 0000000000..dd7ebbeb86 --- /dev/null +++ b/changelog.d/8776.bugfix @@ -0,0 +1 @@ +Fix a bug in some federation APIs which could lead to unexpected behaviour if different parameters were set in the URI and the request body. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 23278e36b7..4b6ab470d0 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -49,6 +49,7 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name +from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, @@ -391,7 +392,7 @@ class FederationServer(FederationBase): TRANSACTION_CONCURRENCY_LIMIT, ) - async def on_context_state_request( + async def on_room_state_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: origin_host, _ = parse_server_name(origin) @@ -514,11 +515,12 @@ class FederationServer(FederationBase): return {"event": ret_pdu.get_pdu_json(time_now)} async def on_send_join_request( - self, origin: str, content: JsonDict, room_id: str + self, origin: str, content: JsonDict ) -> Dict[str, Any]: logger.debug("on_send_join_request: content: %s", content) - room_version = await self.store.get_room_version(room_id) + assert_params_in_dict(content, ["room_id"]) + room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) @@ -547,12 +549,11 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - async def on_send_leave_request( - self, origin: str, content: JsonDict, room_id: str - ) -> dict: + async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict: logger.debug("on_send_leave_request: content: %s", content) - room_version = await self.store.get_room_version(room_id) + assert_params_in_dict(content, ["room_id"]) + room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) @@ -748,12 +749,8 @@ class FederationServer(FederationBase): ) return ret - async def on_exchange_third_party_invite_request( - self, room_id: str, event_dict: Dict - ): - ret = await self.handler.on_exchange_third_party_invite_request( - room_id, event_dict - ) + async def on_exchange_third_party_invite_request(self, event_dict: Dict): + ret = await self.handler.on_exchange_third_party_invite_request(event_dict) return ret async def check_server_matches_acl(self, server_name: str, room_id: str): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a0933fae88..b53e7a20ec 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -440,13 +440,13 @@ class FederationEventServlet(BaseFederationServlet): class FederationStateV1Servlet(BaseFederationServlet): - PATH = "/state/(?P[^/]*)/?" + PATH = "/state/(?P[^/]*)/?" - # This is when someone asks for all data for a given context. - async def on_GET(self, origin, content, query, context): - return await self.handler.on_context_state_request( + # This is when someone asks for all data for a given room. + async def on_GET(self, origin, content, query, room_id): + return await self.handler.on_room_state_request( origin, - context, + room_id, parse_string_from_args(query, "event_id", None, required=False), ) @@ -463,16 +463,16 @@ class FederationStateIdsServlet(BaseFederationServlet): class FederationBackfillServlet(BaseFederationServlet): - PATH = "/backfill/(?P[^/]*)/?" + PATH = "/backfill/(?P[^/]*)/?" - async def on_GET(self, origin, content, query, context): + async def on_GET(self, origin, content, query, room_id): versions = [x.decode("ascii") for x in query[b"v"]] limit = parse_integer_from_args(query, "limit", None) if not limit: return 400, {"error": "Did not include limit param"} - return await self.handler.on_backfill_request(origin, context, versions, limit) + return await self.handler.on_backfill_request(origin, room_id, versions, limit) class FederationQueryServlet(BaseFederationServlet): @@ -487,9 +487,9 @@ class FederationQueryServlet(BaseFederationServlet): class FederationMakeJoinServlet(BaseFederationServlet): - PATH = "/make_join/(?P[^/]*)/(?P[^/]*)" + PATH = "/make_join/(?P[^/]*)/(?P[^/]*)" - async def on_GET(self, origin, _content, query, context, user_id): + async def on_GET(self, origin, _content, query, room_id, user_id): """ Args: origin (unicode): The authenticated server_name of the calling server @@ -511,16 +511,16 @@ class FederationMakeJoinServlet(BaseFederationServlet): supported_versions = ["1"] content = await self.handler.on_make_join_request( - origin, context, user_id, supported_versions=supported_versions + origin, room_id, user_id, supported_versions=supported_versions ) return 200, content class FederationMakeLeaveServlet(BaseFederationServlet): - PATH = "/make_leave/(?P[^/]*)/(?P[^/]*)" + PATH = "/make_leave/(?P[^/]*)/(?P[^/]*)" - async def on_GET(self, origin, content, query, context, user_id): - content = await self.handler.on_make_leave_request(origin, context, user_id) + async def on_GET(self, origin, content, query, room_id, user_id): + content = await self.handler.on_make_leave_request(origin, room_id, user_id) return 200, content @@ -528,7 +528,7 @@ class FederationV1SendLeaveServlet(BaseFederationServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" async def on_PUT(self, origin, content, query, room_id, event_id): - content = await self.handler.on_send_leave_request(origin, content, room_id) + content = await self.handler.on_send_leave_request(origin, content) return 200, (200, content) @@ -538,43 +538,43 @@ class FederationV2SendLeaveServlet(BaseFederationServlet): PREFIX = FEDERATION_V2_PREFIX async def on_PUT(self, origin, content, query, room_id, event_id): - content = await self.handler.on_send_leave_request(origin, content, room_id) + content = await self.handler.on_send_leave_request(origin, content) return 200, content class FederationEventAuthServlet(BaseFederationServlet): - PATH = "/event_auth/(?P[^/]*)/(?P[^/]*)" + PATH = "/event_auth/(?P[^/]*)/(?P[^/]*)" - async def on_GET(self, origin, content, query, context, event_id): - return await self.handler.on_event_auth(origin, context, event_id) + async def on_GET(self, origin, content, query, room_id, event_id): + return await self.handler.on_event_auth(origin, room_id, event_id) class FederationV1SendJoinServlet(BaseFederationServlet): - PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" - async def on_PUT(self, origin, content, query, context, event_id): - # TODO(paul): assert that context/event_id parsed from path actually + async def on_PUT(self, origin, content, query, room_id, event_id): + # TODO(paul): assert that room_id/event_id parsed from path actually # match those given in content - content = await self.handler.on_send_join_request(origin, content, context) + content = await self.handler.on_send_join_request(origin, content) return 200, (200, content) class FederationV2SendJoinServlet(BaseFederationServlet): - PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" PREFIX = FEDERATION_V2_PREFIX - async def on_PUT(self, origin, content, query, context, event_id): - # TODO(paul): assert that context/event_id parsed from path actually + async def on_PUT(self, origin, content, query, room_id, event_id): + # TODO(paul): assert that room_id/event_id parsed from path actually # match those given in content - content = await self.handler.on_send_join_request(origin, content, context) + content = await self.handler.on_send_join_request(origin, content) return 200, content class FederationV1InviteServlet(BaseFederationServlet): - PATH = "/invite/(?P[^/]*)/(?P[^/]*)" + PATH = "/invite/(?P[^/]*)/(?P[^/]*)" - async def on_PUT(self, origin, content, query, context, event_id): + async def on_PUT(self, origin, content, query, room_id, event_id): # We don't get a room version, so we have to assume its EITHER v1 or # v2. This is "fine" as the only difference between V1 and V2 is the # state resolution algorithm, and we don't use that for processing @@ -589,12 +589,12 @@ class FederationV1InviteServlet(BaseFederationServlet): class FederationV2InviteServlet(BaseFederationServlet): - PATH = "/invite/(?P[^/]*)/(?P[^/]*)" + PATH = "/invite/(?P[^/]*)/(?P[^/]*)" PREFIX = FEDERATION_V2_PREFIX - async def on_PUT(self, origin, content, query, context, event_id): - # TODO(paul): assert that context/event_id parsed from path actually + async def on_PUT(self, origin, content, query, room_id, event_id): + # TODO(paul): assert that room_id/event_id parsed from path actually # match those given in content room_version = content["room_version"] @@ -616,9 +616,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet): PATH = "/exchange_third_party_invite/(?P[^/]*)" async def on_PUT(self, origin, content, query, room_id): - content = await self.handler.on_exchange_third_party_invite_request( - room_id, content - ) + content = await self.handler.on_exchange_third_party_invite_request(content) return 200, content diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 69bc5ba44d..b9799090f7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -55,6 +55,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.handlers._base import BaseHandler +from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, @@ -2688,7 +2689,7 @@ class FederationHandler(BaseHandler): ) async def on_exchange_third_party_invite_request( - self, room_id: str, event_dict: JsonDict + self, event_dict: JsonDict ) -> None: """Handle an exchange_third_party_invite request from a remote server @@ -2696,12 +2697,11 @@ class FederationHandler(BaseHandler): into a normal m.room.member invite. Args: - room_id: The ID of the room. - - event_dict (dict[str, Any]): Dictionary containing the event body. + event_dict: Dictionary containing the event body. """ - room_version = await self.store.get_room_version_id(room_id) + assert_params_in_dict(event_dict, ["room_id"]) + room_version = await self.store.get_room_version_id(event_dict["room_id"]) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 9ef80fe502..bf866dacf3 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -59,7 +59,6 @@ class FederationTestCase(unittest.HomeserverTestCase): ) d = self.handler.on_exchange_third_party_invite_request( - room_id=room_id, event_dict={ "type": EventTypes.Member, "room_id": room_id, -- cgit 1.5.1 From 79bfe966e08a2212cc2fae2b00f5efb2c2185543 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 19 Nov 2020 14:25:17 -0500 Subject: Improve error checking for OIDC/SAML mapping providers (#8774) Checks that the localpart returned by mapping providers for SAML and OIDC are valid before registering new users. Extends the OIDC tests for existing users and invalid data. --- UPGRADE.rst | 30 ++++++++++++++ changelog.d/8774.misc | 1 + docs/sso_mapping_providers.md | 9 +++- synapse/handlers/oidc_handler.py | 25 ++++++++--- synapse/handlers/saml_handler.py | 6 +++ synapse/types.py | 6 +-- tests/handlers/test_oidc.py | 89 +++++++++++++++++++++++++++++++--------- 7 files changed, 137 insertions(+), 29 deletions(-) create mode 100644 changelog.d/8774.misc (limited to 'tests') diff --git a/UPGRADE.rst b/UPGRADE.rst index 7c19cf2a70..4de1bb5841 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -75,6 +75,36 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.24.0 +==================== + +Custom OpenID Connect mapping provider breaking change +------------------------------------------------------ + +This release allows the OpenID Connect mapping provider to perform normalisation +of the localpart of the Matrix ID. This allows for the mapping provider to +specify different algorithms, instead of the [default way](https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets). + +If your Synapse configuration uses a custom mapping provider +(`oidc_config.user_mapping_provider.module` is specified and not equal to +`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`) then you *must* ensure +that `map_user_attributes` of the mapping provider performs some normalisation +of the `localpart` returned. To match previous behaviour you can use the +`map_username_to_mxid_localpart` function provided by Synapse. An example is +shown below: + +.. code-block:: python + + from synapse.types import map_username_to_mxid_localpart + + class MyMappingProvider: + def map_user_attributes(self, userinfo, token): + # ... your custom logic ... + sso_user_id = ... + localpart = map_username_to_mxid_localpart(sso_user_id) + + return {"localpart": localpart} + Upgrading to v1.23.0 ==================== diff --git a/changelog.d/8774.misc b/changelog.d/8774.misc new file mode 100644 index 0000000000..57cca8fee5 --- /dev/null +++ b/changelog.d/8774.misc @@ -0,0 +1 @@ +Add additional error checking for OpenID Connect and SAML mapping providers. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 32b06aa2c5..707dd73978 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -15,8 +15,15 @@ where SAML mapping providers come into play. SSO mapping providers are currently supported for OpenID and SAML SSO configurations. Please see the details below for how to implement your own. +It is the responsibility of the mapping provider to normalise the SSO attributes +and map them to a valid Matrix ID. The +[specification for Matrix IDs](https://matrix.org/docs/spec/appendices#user-identifiers) +has some information about what is considered valid. Alternately an easy way to +ensure it is valid is to use a Synapse utility function: +`synapse.types.map_username_to_mxid_localpart`. + External mapping providers are provided to Synapse in the form of an external -Python module. You can retrieve this module from [PyPi](https://pypi.org) or elsewhere, +Python module. You can retrieve this module from [PyPI](https://pypi.org) or elsewhere, but it must be importable via Synapse (e.g. it must be in the same virtualenv as Synapse). The Synapse config is then modified to point to the mapping provider (and optionally provide additional configuration for it). diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index be8562d47b..4bfd8d5617 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -38,7 +38,12 @@ from synapse.handlers._base import BaseHandler from synapse.handlers.sso import MappingException from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart +from synapse.types import ( + JsonDict, + UserID, + contains_invalid_mxid_characters, + map_username_to_mxid_localpart, +) from synapse.util import json_decoder if TYPE_CHECKING: @@ -885,10 +890,12 @@ class OidcHandler(BaseHandler): "Retrieved user attributes from user mapping provider: %r", attributes ) - if not attributes["localpart"]: - raise MappingException("localpart is empty") - - localpart = map_username_to_mxid_localpart(attributes["localpart"]) + localpart = attributes["localpart"] + if not localpart: + raise MappingException( + "Error parsing OIDC response: OIDC mapping provider plugin " + "did not return a localpart value" + ) user_id = UserID(localpart, self.server_name).to_string() users = await self.store.get_users_by_id_case_insensitive(user_id) @@ -908,6 +915,11 @@ class OidcHandler(BaseHandler): # This mxid is taken raise MappingException("mxid '{}' is already taken".format(user_id)) else: + # Since the localpart is provided via a potentially untrusted module, + # ensure the MXID is valid before registering. + if contains_invalid_mxid_characters(localpart): + raise MappingException("localpart is invalid: %s" % (localpart,)) + # It's the first time this user is logging in and the mapped mxid was # not taken, register the user registered_user_id = await self._registration_handler.register_user( @@ -1076,6 +1088,9 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): ) -> UserAttribute: localpart = self._config.localpart_template.render(user=userinfo).strip() + # Ensure only valid characters are included in the MXID. + localpart = map_username_to_mxid_localpart(localpart) + display_name = None # type: Optional[str] if self._config.display_name_template is not None: display_name = self._config.display_name_template.render( diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 9bf430b656..5d9b555b13 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -31,6 +31,7 @@ from synapse.http.site import SynapseRequest from synapse.module_api import ModuleApi from synapse.types import ( UserID, + contains_invalid_mxid_characters, map_username_to_mxid_localpart, mxid_localpart_allowed_characters, ) @@ -318,6 +319,11 @@ class SamlHandler(BaseHandler): "Unable to generate a Matrix ID from the SAML response" ) + # Since the localpart is provided via a potentially untrusted module, + # ensure the MXID is valid before registering. + if contains_invalid_mxid_characters(localpart): + raise MappingException("localpart is invalid: %s" % (localpart,)) + logger.info("Mapped SAML user to local part %s", localpart) registered_user_id = await self._registration_handler.register_user( localpart=localpart, diff --git a/synapse/types.py b/synapse/types.py index 66bb5bac8d..3ab6bdbe06 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -317,14 +317,14 @@ mxid_localpart_allowed_characters = set( ) -def contains_invalid_mxid_characters(localpart): +def contains_invalid_mxid_characters(localpart: str) -> bool: """Check for characters not allowed in an mxid or groupid localpart Args: - localpart (basestring): the localpart to be checked + localpart: the localpart to be checked Returns: - bool: True if there are any naughty characters + True if there are any naughty characters """ return any(c not in mxid_localpart_allowed_characters for c in localpart) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 630e6da808..b4fa02acc4 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import json from urllib.parse import parse_qs, urlparse @@ -24,12 +23,8 @@ import pymacaroons from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from synapse.handlers.oidc_handler import ( - MappingException, - OidcError, - OidcHandler, - OidcMappingProvider, -) +from synapse.handlers.oidc_handler import OidcError, OidcHandler, OidcMappingProvider +from synapse.handlers.sso import MappingException from synapse.types import UserID from tests.unittest import HomeserverTestCase, override_config @@ -132,14 +127,13 @@ class OidcHandlerTestCase(HomeserverTestCase): config = self.default_config() config["public_baseurl"] = BASE_URL - oidc_config = {} - oidc_config["enabled"] = True - oidc_config["client_id"] = CLIENT_ID - oidc_config["client_secret"] = CLIENT_SECRET - oidc_config["issuer"] = ISSUER - oidc_config["scopes"] = SCOPES - oidc_config["user_mapping_provider"] = { - "module": __name__ + ".TestMappingProvider", + oidc_config = { + "enabled": True, + "client_id": CLIENT_ID, + "client_secret": CLIENT_SECRET, + "issuer": ISSUER, + "scopes": SCOPES, + "user_mapping_provider": {"module": __name__ + ".TestMappingProvider"}, } # Update this config with what's in the default config so that @@ -705,13 +699,13 @@ class OidcHandlerTestCase(HomeserverTestCase): def test_map_userinfo_to_existing_user(self): """Existing users can log in with OpenID Connect when allow_existing_users is True.""" store = self.hs.get_datastore() - user4 = UserID.from_string("@test_user_4:test") + user = UserID.from_string("@test_user:test") self.get_success( - store.register_user(user_id=user4.to_string(), password_hash=None) + store.register_user(user_id=user.to_string(), password_hash=None) ) userinfo = { - "sub": "test4", - "username": "test_user_4", + "sub": "test", + "username": "test_user", } token = {} mxid = self.get_success( @@ -719,4 +713,59 @@ class OidcHandlerTestCase(HomeserverTestCase): userinfo, token, "user-agent", "10.10.10.10" ) ) - self.assertEqual(mxid, "@test_user_4:test") + self.assertEqual(mxid, "@test_user:test") + + # Register some non-exact matching cases. + user2 = UserID.from_string("@TEST_user_2:test") + self.get_success( + store.register_user(user_id=user2.to_string(), password_hash=None) + ) + user2_caps = UserID.from_string("@test_USER_2:test") + self.get_success( + store.register_user(user_id=user2_caps.to_string(), password_hash=None) + ) + + # Attempting to login without matching a name exactly is an error. + userinfo = { + "sub": "test2", + "username": "TEST_USER_2", + } + e = self.get_failure( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertTrue( + str(e.value).startswith( + "Attempted to login as '@TEST_USER_2:test' but it matches more than one user inexactly:" + ) + ) + + # Logging in when matching a name exactly should work. + user2 = UserID.from_string("@TEST_USER_2:test") + self.get_success( + store.register_user(user_id=user2.to_string(), password_hash=None) + ) + + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@TEST_USER_2:test") + + def test_map_userinfo_to_invalid_localpart(self): + """If the mapping provider generates an invalid localpart it should be rejected.""" + userinfo = { + "sub": "test2", + "username": "föö", + } + token = {} + e = self.get_failure( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual(str(e.value), "localpart is invalid: föö") -- cgit 1.5.1 From 4fd222ad704767e08c41a60690c4b499ed788b63 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 25 Nov 2020 10:04:22 -0500 Subject: Support trying multiple localparts for OpenID Connect. (#8801) Abstracts the SAML and OpenID Connect code which attempts to regenerate the localpart of a matrix ID if it is already in use. --- changelog.d/8801.feature | 1 + docs/sso_mapping_providers.md | 11 ++- synapse/handlers/oidc_handler.py | 120 +++++++++++++----------------- synapse/handlers/saml_handler.py | 91 +++++++---------------- synapse/handlers/sso.py | 155 ++++++++++++++++++++++++++++++++++++++- tests/handlers/test_oidc.py | 88 +++++++++++++++++++++- 6 files changed, 330 insertions(+), 136 deletions(-) create mode 100644 changelog.d/8801.feature (limited to 'tests') diff --git a/changelog.d/8801.feature b/changelog.d/8801.feature new file mode 100644 index 0000000000..77f7fe4e5d --- /dev/null +++ b/changelog.d/8801.feature @@ -0,0 +1 @@ +Add support for re-trying generation of a localpart for OpenID Connect mapping providers. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 707dd73978..dee53b5d40 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -63,13 +63,22 @@ A custom mapping provider must specify the following methods: information from. - This method must return a string, which is the unique identifier for the user. Commonly the ``sub`` claim of the response. -* `map_user_attributes(self, userinfo, token)` +* `map_user_attributes(self, userinfo, token, failures)` - This method must be async. - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user information from. - `token` - A dictionary which includes information necessary to make further requests to the OpenID provider. + - `failures` - An `int` that represents the amount of times the returned + mxid localpart mapping has failed. This should be used + to create a deduplicated mxid localpart which should be + returned instead. For example, if this method returns + `john.doe` as the value of `localpart` in the returned + dict, and that is already taken on the homeserver, this + method will be called again with the same parameters but + with failures=1. The method should then return a different + `localpart` value, such as `john.doe1`. - Returns a dictionary with two keys: - localpart: A required string, used to generate the Matrix ID. - displayname: An optional string, the display name for the user. diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 34de9109ea..78c4e94a9d 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect import logging from typing import TYPE_CHECKING, Dict, Generic, List, Optional, Tuple, TypeVar from urllib.parse import urlencode @@ -35,15 +36,10 @@ from twisted.web.client import readBody from synapse.config import ConfigError from synapse.handlers._base import BaseHandler -from synapse.handlers.sso import MappingException +from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.types import ( - JsonDict, - UserID, - contains_invalid_mxid_characters, - map_username_to_mxid_localpart, -) +from synapse.types import JsonDict, map_username_to_mxid_localpart from synapse.util import json_decoder if TYPE_CHECKING: @@ -869,73 +865,51 @@ class OidcHandler(BaseHandler): # to be strings. remote_user_id = str(remote_user_id) - # first of all, check if we already have a mapping for this user - previously_registered_user_id = await self._sso_handler.get_sso_user_by_remote_user_id( - self._auth_provider_id, remote_user_id, + # Older mapping providers don't accept the `failures` argument, so we + # try and detect support. + mapper_signature = inspect.signature( + self._user_mapping_provider.map_user_attributes ) - if previously_registered_user_id: - return previously_registered_user_id + supports_failures = "failures" in mapper_signature.parameters - # Otherwise, generate a new user. - try: - attributes = await self._user_mapping_provider.map_user_attributes( - userinfo, token - ) - except Exception as e: - raise MappingException( - "Could not extract user attributes from OIDC response: " + str(e) - ) + async def oidc_response_to_user_attributes(failures: int) -> UserAttributes: + """ + Call the mapping provider to map the OIDC userinfo and token to user attributes. - logger.debug( - "Retrieved user attributes from user mapping provider: %r", attributes - ) + This is backwards compatibility for abstraction for the SSO handler. + """ + if supports_failures: + attributes = await self._user_mapping_provider.map_user_attributes( + userinfo, token, failures + ) + else: + # If the mapping provider does not support processing failures, + # do not continually generate the same Matrix ID since it will + # continue to already be in use. Note that the error raised is + # arbitrary and will get turned into a MappingException. + if failures: + raise RuntimeError( + "Mapping provider does not support de-duplicating Matrix IDs" + ) - localpart = attributes["localpart"] - if not localpart: - raise MappingException( - "Error parsing OIDC response: OIDC mapping provider plugin " - "did not return a localpart value" - ) + attributes = await self._user_mapping_provider.map_user_attributes( # type: ignore + userinfo, token + ) - user_id = UserID(localpart, self.server_name).to_string() - users = await self.store.get_users_by_id_case_insensitive(user_id) - if users: - if self._allow_existing_users: - if len(users) == 1: - registered_user_id = next(iter(users)) - elif user_id in users: - registered_user_id = user_id - else: - raise MappingException( - "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( - user_id, list(users.keys()) - ) - ) - else: - # This mxid is taken - raise MappingException("mxid '{}' is already taken".format(user_id)) - else: - # Since the localpart is provided via a potentially untrusted module, - # ensure the MXID is valid before registering. - if contains_invalid_mxid_characters(localpart): - raise MappingException("localpart is invalid: %s" % (localpart,)) - - # It's the first time this user is logging in and the mapped mxid was - # not taken, register the user - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, - default_display_name=attributes["display_name"], - user_agent_ips=[(user_agent, ip_address)], - ) + return UserAttributes(**attributes) - await self.store.record_user_external_id( - self._auth_provider_id, remote_user_id, registered_user_id, + return await self._sso_handler.get_mxid_from_sso( + self._auth_provider_id, + remote_user_id, + user_agent, + ip_address, + oidc_response_to_user_attributes, + self._allow_existing_users, ) - return registered_user_id -UserAttribute = TypedDict( - "UserAttribute", {"localpart": str, "display_name": Optional[str]} +UserAttributeDict = TypedDict( + "UserAttributeDict", {"localpart": str, "display_name": Optional[str]} ) C = TypeVar("C") @@ -978,13 +952,15 @@ class OidcMappingProvider(Generic[C]): raise NotImplementedError() async def map_user_attributes( - self, userinfo: UserInfo, token: Token - ) -> UserAttribute: + self, userinfo: UserInfo, token: Token, failures: int + ) -> UserAttributeDict: """Map a `UserInfo` object into user attributes. Args: userinfo: An object representing the user given by the OIDC provider token: A dict with the tokens returned by the provider + failures: How many times a call to this function with this + UserInfo has resulted in a failure. Returns: A dict containing the ``localpart`` and (optionally) the ``display_name`` @@ -1084,13 +1060,17 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): return userinfo[self._config.subject_claim] async def map_user_attributes( - self, userinfo: UserInfo, token: Token - ) -> UserAttribute: + self, userinfo: UserInfo, token: Token, failures: int + ) -> UserAttributeDict: localpart = self._config.localpart_template.render(user=userinfo).strip() # Ensure only valid characters are included in the MXID. localpart = map_username_to_mxid_localpart(localpart) + # Append suffix integer if last call to this function failed to produce + # a usable mxid. + localpart += str(failures) if failures else "" + display_name = None # type: Optional[str] if self._config.display_name_template is not None: display_name = self._config.display_name_template.render( @@ -1100,7 +1080,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): if display_name == "": display_name = None - return UserAttribute(localpart=localpart, display_name=display_name) + return UserAttributeDict(localpart=localpart, display_name=display_name) async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: extras = {} # type: Dict[str, str] diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 37ab42f050..34db10ffe4 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -25,13 +25,12 @@ from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.config.saml2_config import SamlAttributeRequirement from synapse.handlers._base import BaseHandler -from synapse.handlers.sso import MappingException +from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.module_api import ModuleApi from synapse.types import ( UserID, - contains_invalid_mxid_characters, map_username_to_mxid_localpart, mxid_localpart_allowed_characters, ) @@ -250,14 +249,26 @@ class SamlHandler(BaseHandler): "Failed to extract remote user id from SAML response" ) - with (await self._mapping_lock.queue(self._auth_provider_id)): - # first of all, check if we already have a mapping for this user - previously_registered_user_id = await self._sso_handler.get_sso_user_by_remote_user_id( - self._auth_provider_id, remote_user_id, + async def saml_response_to_remapped_user_attributes( + failures: int, + ) -> UserAttributes: + """ + Call the mapping provider to map a SAML response to user attributes and coerce the result into the standard form. + + This is backwards compatibility for abstraction for the SSO handler. + """ + # Call the mapping provider. + result = self._user_mapping_provider.saml_response_to_user_attributes( + saml2_auth, failures, client_redirect_url + ) + # Remap some of the results. + return UserAttributes( + localpart=result.get("mxid_localpart"), + display_name=result.get("displayname"), + emails=result.get("emails"), ) - if previously_registered_user_id: - return previously_registered_user_id + with (await self._mapping_lock.queue(self._auth_provider_id)): # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid if ( @@ -284,59 +295,13 @@ class SamlHandler(BaseHandler): ) return registered_user_id - # Map saml response to user attributes using the configured mapping provider - for i in range(1000): - attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes( - saml2_auth, i, client_redirect_url=client_redirect_url, - ) - - logger.debug( - "Retrieved SAML attributes from user mapping provider: %s " - "(attempt %d)", - attribute_dict, - i, - ) - - localpart = attribute_dict.get("mxid_localpart") - if not localpart: - raise MappingException( - "Error parsing SAML2 response: SAML mapping provider plugin " - "did not return a mxid_localpart value" - ) - - displayname = attribute_dict.get("displayname") - emails = attribute_dict.get("emails", []) - - # Check if this mxid already exists - if not await self.store.get_users_by_id_case_insensitive( - UserID(localpart, self.server_name).to_string() - ): - # This mxid is free - break - else: - # Unable to generate a username in 1000 iterations - # Break and return error to the user - raise MappingException( - "Unable to generate a Matrix ID from the SAML response" - ) - - # Since the localpart is provided via a potentially untrusted module, - # ensure the MXID is valid before registering. - if contains_invalid_mxid_characters(localpart): - raise MappingException("localpart is invalid: %s" % (localpart,)) - - logger.debug("Mapped SAML user to local part %s", localpart) - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, - default_display_name=displayname, - bind_emails=emails, - user_agent_ips=[(user_agent, ip_address)], - ) - - await self.store.record_user_external_id( - self._auth_provider_id, remote_user_id, registered_user_id + return await self._sso_handler.get_mxid_from_sso( + self._auth_provider_id, + remote_user_id, + user_agent, + ip_address, + saml_response_to_remapped_user_attributes, ) - return registered_user_id def expire_sessions(self): expire_before = self.clock.time_msec() - self._saml2_session_lifetime @@ -451,11 +416,11 @@ class DefaultSamlMappingProvider: ) # Use the configured mapper for this mxid_source - base_mxid_localpart = self._mxid_mapper(mxid_source) + localpart = self._mxid_mapper(mxid_source) # Append suffix integer if last call to this function failed to produce - # a usable mxid - localpart = base_mxid_localpart + (str(failures) if failures else "") + # a usable mxid. + localpart += str(failures) if failures else "" # Retrieve the display name from the saml response # If displayname is None, the mxid_localpart will be used instead diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index cf7cb7754a..d963082210 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional + +import attr from synapse.handlers._base import BaseHandler from synapse.http.server import respond_with_html +from synapse.types import UserID, contains_invalid_mxid_characters if TYPE_CHECKING: from synapse.server import HomeServer @@ -29,9 +32,20 @@ class MappingException(Exception): """ +@attr.s +class UserAttributes: + localpart = attr.ib(type=str) + display_name = attr.ib(type=Optional[str], default=None) + emails = attr.ib(type=List[str], default=attr.Factory(list)) + + class SsoHandler(BaseHandler): + # The number of attempts to ask the mapping provider for when generating an MXID. + _MAP_USERNAME_RETRIES = 1000 + def __init__(self, hs: "HomeServer"): super().__init__(hs) + self._registration_handler = hs.get_registration_handler() self._error_template = hs.config.sso_error_template def render_error( @@ -94,3 +108,142 @@ class SsoHandler(BaseHandler): # No match. return None + + async def get_mxid_from_sso( + self, + auth_provider_id: str, + remote_user_id: str, + user_agent: str, + ip_address: str, + sso_to_matrix_id_mapper: Callable[[int], Awaitable[UserAttributes]], + allow_existing_users: bool = False, + ) -> str: + """ + Given an SSO ID, retrieve the user ID for it and possibly register the user. + + This first checks if the SSO ID has previously been linked to a matrix ID, + if it has that matrix ID is returned regardless of the current mapping + logic. + + The mapping function is called (potentially multiple times) to generate + a localpart for the user. + + If an unused localpart is generated, the user is registered from the + given user-agent and IP address and the SSO ID is linked to this matrix + ID for subsequent calls. + + If allow_existing_users is true the mapping function is only called once + and results in: + + 1. The use of a previously registered matrix ID. In this case, the + SSO ID is linked to the matrix ID. (Note it is possible that + other SSO IDs are linked to the same matrix ID.) + 2. An unused localpart, in which case the user is registered (as + discussed above). + 3. An error if the generated localpart matches multiple pre-existing + matrix IDs. Generally this should not happen. + + Args: + auth_provider_id: A unique identifier for this SSO provider, e.g. + "oidc" or "saml". + remote_user_id: The unique identifier from the SSO provider. + user_agent: The user agent of the client making the request. + ip_address: The IP address of the client making the request. + sso_to_matrix_id_mapper: A callable to generate the user attributes. + The only parameter is an integer which represents the amount of + times the returned mxid localpart mapping has failed. + allow_existing_users: True if the localpart returned from the + mapping provider can be linked to an existing matrix ID. + + Returns: + The user ID associated with the SSO response. + + Raises: + MappingException if there was a problem mapping the response to a user. + RedirectException: some mapping providers may raise this if they need + to redirect to an interstitial page. + + """ + # first of all, check if we already have a mapping for this user + previously_registered_user_id = await self.get_sso_user_by_remote_user_id( + auth_provider_id, remote_user_id, + ) + if previously_registered_user_id: + return previously_registered_user_id + + # Otherwise, generate a new user. + for i in range(self._MAP_USERNAME_RETRIES): + try: + attributes = await sso_to_matrix_id_mapper(i) + except Exception as e: + raise MappingException( + "Could not extract user attributes from SSO response: " + str(e) + ) + + logger.debug( + "Retrieved user attributes from user mapping provider: %r (attempt %d)", + attributes, + i, + ) + + if not attributes.localpart: + raise MappingException( + "Error parsing SSO response: SSO mapping provider plugin " + "did not return a localpart value" + ) + + # Check if this mxid already exists + user_id = UserID(attributes.localpart, self.server_name).to_string() + users = await self.store.get_users_by_id_case_insensitive(user_id) + # Note, if allow_existing_users is true then the loop is guaranteed + # to end on the first iteration: either by matching an existing user, + # raising an error, or registering a new user. See the docstring for + # more in-depth an explanation. + if users and allow_existing_users: + # If an existing matrix ID is returned, then use it. + if len(users) == 1: + previously_registered_user_id = next(iter(users)) + elif user_id in users: + previously_registered_user_id = user_id + else: + # Do not attempt to continue generating Matrix IDs. + raise MappingException( + "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( + user_id, users + ) + ) + + # Future logins should also match this user ID. + await self.store.record_user_external_id( + auth_provider_id, remote_user_id, previously_registered_user_id + ) + + return previously_registered_user_id + + elif not users: + # This mxid is free + break + else: + # Unable to generate a username in 1000 iterations + # Break and return error to the user + raise MappingException( + "Unable to generate a Matrix ID from the SSO response" + ) + + # Since the localpart is provided via a potentially untrusted module, + # ensure the MXID is valid before registering. + if contains_invalid_mxid_characters(attributes.localpart): + raise MappingException("localpart is invalid: %s" % (attributes.localpart,)) + + logger.debug("Mapped SSO user to local part %s", attributes.localpart) + registered_user_id = await self._registration_handler.register_user( + localpart=attributes.localpart, + default_display_name=attributes.display_name, + bind_emails=attributes.emails, + user_agent_ips=[(user_agent, ip_address)], + ) + + await self.store.record_user_external_id( + auth_provider_id, remote_user_id, registered_user_id + ) + return registered_user_id diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index b4fa02acc4..e880d32be6 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -89,6 +89,14 @@ class TestMappingProviderExtra(TestMappingProvider): return {"phone": userinfo["phone"]} +class TestMappingProviderFailures(TestMappingProvider): + async def map_user_attributes(self, userinfo, token, failures): + return { + "localpart": userinfo["username"] + (str(failures) if failures else ""), + "display_name": None, + } + + def simple_async_mock(return_value=None, raises=None): # AsyncMock is not available in python3.5, this mimics part of its behaviour async def cb(*args, **kwargs): @@ -152,6 +160,9 @@ class OidcHandlerTestCase(HomeserverTestCase): self.render_error = Mock(return_value=None) self.handler._sso_handler.render_error = self.render_error + # Reduce the number of attempts when generating MXIDs. + self.handler._sso_handler._MAP_USERNAME_RETRIES = 3 + return hs def metadata_edit(self, values): @@ -693,7 +704,10 @@ class OidcHandlerTestCase(HomeserverTestCase): ), MappingException, ) - self.assertEqual(str(e.value), "mxid '@test_user_3:test' is already taken") + self.assertEqual( + str(e.value), + "Could not extract user attributes from SSO response: Mapping provider does not support de-duplicating Matrix IDs", + ) @override_config({"oidc_config": {"allow_existing_users": True}}) def test_map_userinfo_to_existing_user(self): @@ -703,6 +717,8 @@ class OidcHandlerTestCase(HomeserverTestCase): self.get_success( store.register_user(user_id=user.to_string(), password_hash=None) ) + + # Map a user via SSO. userinfo = { "sub": "test", "username": "test_user", @@ -715,6 +731,23 @@ class OidcHandlerTestCase(HomeserverTestCase): ) self.assertEqual(mxid, "@test_user:test") + # Note that a second SSO user can be mapped to the same Matrix ID. (This + # requires a unique sub, but something that maps to the same matrix ID, + # in this case we'll just use the same username. A more realistic example + # would be subs which are email addresses, and mapping from the localpart + # of the email, e.g. bob@foo.com and bob@bar.com -> @bob:test.) + userinfo = { + "sub": "test1", + "username": "test_user", + } + token = {} + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user:test") + # Register some non-exact matching cases. user2 = UserID.from_string("@TEST_user_2:test") self.get_success( @@ -762,6 +795,7 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "föö", } token = {} + e = self.get_failure( self.handler._map_userinfo_to_user( userinfo, token, "user-agent", "10.10.10.10" @@ -769,3 +803,55 @@ class OidcHandlerTestCase(HomeserverTestCase): MappingException, ) self.assertEqual(str(e.value), "localpart is invalid: föö") + + @override_config( + { + "oidc_config": { + "user_mapping_provider": { + "module": __name__ + ".TestMappingProviderFailures" + } + } + } + ) + def test_map_userinfo_to_user_retries(self): + """The mapping provider can retry generating an MXID if the MXID is already in use.""" + store = self.hs.get_datastore() + self.get_success( + store.register_user(user_id="@test_user:test", password_hash=None) + ) + userinfo = { + "sub": "test", + "username": "test_user", + } + token = {} + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + # test_user is already taken, so test_user1 gets registered instead. + self.assertEqual(mxid, "@test_user1:test") + + # Register all of the potential users for a particular username. + self.get_success( + store.register_user(user_id="@tester:test", password_hash=None) + ) + for i in range(1, 3): + self.get_success( + store.register_user(user_id="@tester%d:test" % i, password_hash=None) + ) + + # Now attempt to map to a username, this will fail since all potential usernames are taken. + userinfo = { + "sub": "tester", + "username": "tester", + } + e = self.get_failure( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual( + str(e.value), "Unable to generate a Matrix ID from the SSO response" + ) -- cgit 1.5.1 From 3f0ff53158cc07b481c701077357d9d09254845b Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 25 Nov 2020 22:26:11 +0100 Subject: Remove deprecated `/_matrix/client/*/admin` endpoints (#8785) These are now only available via `/_synapse/admin/v1`. --- UPGRADE.rst | 22 ++++++ changelog.d/8785.removal | 1 + docs/admin_api/user_admin_api.rst | 7 ++ synapse/_scripts/register_new_matrix_user.py | 2 +- synapse/rest/admin/__init__.py | 12 +-- synapse/rest/admin/_base.py | 22 ------ synapse/rest/admin/groups.py | 7 +- synapse/rest/admin/media.py | 15 ++-- synapse/rest/admin/rooms.py | 3 +- synapse/rest/admin/users.py | 25 +++--- tests/rest/admin/test_admin.py | 2 +- tests/rest/admin/test_room.py | 4 +- tests/rest/admin/test_user.py | 110 ++++++++++++++++++++++++++- tests/rest/client/v2_alpha/test_register.py | 6 +- tests/storage/test_client_ips.py | 2 +- tests/unittest.py | 4 +- 16 files changed, 176 insertions(+), 68 deletions(-) create mode 100644 changelog.d/8785.removal (limited to 'tests') diff --git a/UPGRADE.rst b/UPGRADE.rst index 4de1bb5841..6825b567e9 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -105,6 +105,28 @@ shown below: return {"localpart": localpart} +Removal historical Synapse Admin API +------------------------------------ + +Historically, the Synapse Admin API has been accessible under: + +* ``/_matrix/client/api/v1/admin`` +* ``/_matrix/client/unstable/admin`` +* ``/_matrix/client/r0/admin`` +* ``/_synapse/admin/v1`` + +The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0. +The Admin API is now only accessible under: + +* ``/_synapse/admin/v1`` + +The only exception is the `/admin/whois` endpoint, which is +`also available via the client-server API `_. + +The deprecation of the old endpoints was announced with Synapse 1.20.0 (released +on 2020-09-22) and makes it easier for homeserver admins to lock down external +access to the Admin API endpoints. + Upgrading to v1.23.0 ==================== diff --git a/changelog.d/8785.removal b/changelog.d/8785.removal new file mode 100644 index 0000000000..ee8ee32598 --- /dev/null +++ b/changelog.d/8785.removal @@ -0,0 +1 @@ +Remove old `/_matrix/client/*/admin` endpoints which was deprecated since Synapse 1.20.0. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 84863296e3..1473a3d4e3 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -176,6 +176,13 @@ The api is:: GET /_synapse/admin/v1/whois/ +and:: + + GET /_matrix/client/r0/admin/whois/ + +See also: `Client Server API Whois +`_ + To use it, you will need to authenticate by providing an ``access_token`` for a server admin: see `README.rst `_. diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index da0996edbc..d37ccccd5b 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -37,7 +37,7 @@ def request_registration( exit=sys.exit, ): - url = "%s/_matrix/client/r0/admin/register" % (server_location,) + url = "%s/_synapse/admin/v1/register" % (server_location,) # Get the nonce r = requests.get(url, verify=False) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 7a3a5c46ca..55ddebb4fe 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -21,11 +21,7 @@ import synapse from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.rest.admin._base import ( - admin_patterns, - assert_requester_is_admin, - historical_admin_path_patterns, -) +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.rest.admin.devices import ( DeleteDevicesRestServlet, DeviceRestServlet, @@ -84,7 +80,7 @@ class VersionServlet(RestServlet): class PurgeHistoryRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns( + PATTERNS = admin_patterns( "/purge_history/(?P[^/]*)(/(?P[^/]+))?" ) @@ -169,9 +165,7 @@ class PurgeHistoryRestServlet(RestServlet): class PurgeHistoryStatusRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns( - "/purge_history_status/(?P[^/]+)" - ) + PATTERNS = admin_patterns("/purge_history_status/(?P[^/]+)") def __init__(self, hs): """ diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index db9fea263a..e09234c644 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -22,28 +22,6 @@ from synapse.api.errors import AuthError from synapse.types import UserID -def historical_admin_path_patterns(path_regex): - """Returns the list of patterns for an admin endpoint, including historical ones - - This is a backwards-compatibility hack. Previously, the Admin API was exposed at - various paths under /_matrix/client. This function returns a list of patterns - matching those paths (as well as the new one), so that existing scripts which rely - on the endpoints being available there are not broken. - - Note that this should only be used for existing endpoints: new ones should just - register for the /_synapse/admin path. - """ - return [ - re.compile(prefix + path_regex) - for prefix in ( - "^/_synapse/admin/v1", - "^/_matrix/client/api/v1/admin", - "^/_matrix/client/unstable/admin", - "^/_matrix/client/r0/admin", - ) - ] - - def admin_patterns(path_regex: str, version: str = "v1"): """Returns the list of patterns for an admin endpoint diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py index 0b54ca09f4..d0c86b204a 100644 --- a/synapse/rest/admin/groups.py +++ b/synapse/rest/admin/groups.py @@ -16,10 +16,7 @@ import logging from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet -from synapse.rest.admin._base import ( - assert_user_is_admin, - historical_admin_path_patterns, -) +from synapse.rest.admin._base import admin_patterns, assert_user_is_admin logger = logging.getLogger(__name__) @@ -28,7 +25,7 @@ class DeleteGroupAdminRestServlet(RestServlet): """Allows deleting of local groups """ - PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)") + PATTERNS = admin_patterns("/delete_group/(?P[^/]*)") def __init__(self, hs): self.group_server = hs.get_groups_server_handler() diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index ba50cb876d..c82b4f87d6 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -22,7 +22,6 @@ from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, - historical_admin_path_patterns, ) logger = logging.getLogger(__name__) @@ -34,10 +33,10 @@ class QuarantineMediaInRoom(RestServlet): """ PATTERNS = ( - historical_admin_path_patterns("/room/(?P[^/]+)/media/quarantine") + admin_patterns("/room/(?P[^/]+)/media/quarantine") + # This path kept around for legacy reasons - historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") + admin_patterns("/quarantine_media/(?P[^/]+)") ) def __init__(self, hs): @@ -63,9 +62,7 @@ class QuarantineMediaByUser(RestServlet): this server. """ - PATTERNS = historical_admin_path_patterns( - "/user/(?P[^/]+)/media/quarantine" - ) + PATTERNS = admin_patterns("/user/(?P[^/]+)/media/quarantine") def __init__(self, hs): self.store = hs.get_datastore() @@ -90,7 +87,7 @@ class QuarantineMediaByID(RestServlet): it via this server. """ - PATTERNS = historical_admin_path_patterns( + PATTERNS = admin_patterns( "/media/quarantine/(?P[^/]+)/(?P[^/]+)" ) @@ -116,7 +113,7 @@ class ListMediaInRoom(RestServlet): """Lists all of the media in a given room. """ - PATTERNS = historical_admin_path_patterns("/room/(?P[^/]+)/media") + PATTERNS = admin_patterns("/room/(?P[^/]+)/media") def __init__(self, hs): self.store = hs.get_datastore() @@ -134,7 +131,7 @@ class ListMediaInRoom(RestServlet): class PurgeMediaCacheRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/purge_media_cache") + PATTERNS = admin_patterns("/purge_media_cache") def __init__(self, hs): self.media_repository = hs.get_media_repository() diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index ee345e12ce..353151169a 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -29,7 +29,6 @@ from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, - historical_admin_path_patterns, ) from synapse.storage.databases.main.room import RoomSortOrder from synapse.types import RoomAlias, RoomID, UserID, create_requester @@ -44,7 +43,7 @@ class ShutdownRoomRestServlet(RestServlet): joined to the new room. """ - PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)") + PATTERNS = admin_patterns("/shutdown_room/(?P[^/]+)") def __init__(self, hs): self.hs = hs diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index fa8d8e6d91..b0ff5e1ead 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -33,8 +33,8 @@ from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, - historical_admin_path_patterns, ) +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import JsonDict, UserID if TYPE_CHECKING: @@ -55,7 +55,7 @@ _GET_PUSHERS_ALLOWED_KEYS = { class UsersRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$") + PATTERNS = admin_patterns("/users/(?P[^/]*)$") def __init__(self, hs): self.hs = hs @@ -338,7 +338,7 @@ class UserRegisterServlet(RestServlet): nonce to the time it was generated, in int seconds. """ - PATTERNS = historical_admin_path_patterns("/register") + PATTERNS = admin_patterns("/register") NONCE_TIMEOUT = 60 def __init__(self, hs): @@ -461,7 +461,14 @@ class UserRegisterServlet(RestServlet): class WhoisRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)") + path_regex = "/whois/(?P[^/]*)$" + PATTERNS = ( + admin_patterns(path_regex) + + + # URL for spec reason + # https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid + client_patterns("/admin" + path_regex, v1=True) + ) def __init__(self, hs): self.hs = hs @@ -485,7 +492,7 @@ class WhoisRestServlet(RestServlet): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)") + PATTERNS = admin_patterns("/deactivate/(?P[^/]*)") def __init__(self, hs): self._deactivate_account_handler = hs.get_deactivate_account_handler() @@ -516,7 +523,7 @@ class DeactivateAccountRestServlet(RestServlet): class AccountValidityRenewServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/account_validity/validity$") + PATTERNS = admin_patterns("/account_validity/validity$") def __init__(self, hs): """ @@ -559,9 +566,7 @@ class ResetPasswordRestServlet(RestServlet): 200 OK with empty object if success otherwise an error. """ - PATTERNS = historical_admin_path_patterns( - "/reset_password/(?P[^/]*)" - ) + PATTERNS = admin_patterns("/reset_password/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -603,7 +608,7 @@ class SearchUsersRestServlet(RestServlet): 200 OK with json object {list[dict[str, Any]], count} or empty object. """ - PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") + PATTERNS = admin_patterns("/search_users/(?P[^/]*)") def __init__(self, hs): self.hs = hs diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 898e43411e..4f76f8f768 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -100,7 +100,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): self.assertIn(group_id, self._get_groups_user_is_in(self.other_user_token)) # Now delete the group - url = "/admin/delete_group/" + group_id + url = "/_synapse/admin/v1/delete_group/" + group_id request, channel = self.make_request( "POST", url.encode("ascii"), diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 54824a5410..46933a0493 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -78,7 +78,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): ) # Test that the admin can still send shutdown - url = "admin/shutdown_room/" + room_id + url = "/_synapse/admin/v1/shutdown_room/" + room_id request, channel = self.make_request( "POST", url.encode("ascii"), @@ -112,7 +112,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Test that the admin can still send shutdown - url = "admin/shutdown_room/" + room_id + url = "/_synapse/admin/v1/shutdown_room/" + room_id request, channel = self.make_request( "POST", url.encode("ascii"), diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 9661af7e79..54d46f4bd3 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -41,7 +41,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): - self.url = "/_matrix/client/r0/admin/register" + self.url = "/_synapse/admin/v1/register" self.registration_handler = Mock() self.identity_handler = Mock() @@ -1768,3 +1768,111 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # though the MAU limit would stop the user doing so. puppet_token = self._get_token() self.helper.join(room_id, user=self.other_user, tok=puppet_token) + + +class WhoisRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url1 = "/_synapse/admin/v1/whois/%s" % urllib.parse.quote(self.other_user) + self.url2 = "/_matrix/client/r0/admin/whois/%s" % urllib.parse.quote( + self.other_user + ) + + def test_no_auth(self): + """ + Try to get information of an user without authentication. + """ + request, channel = self.make_request("GET", self.url1, b"{}") + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + request, channel = self.make_request("GET", self.url2, b"{}") + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_not_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + self.register_user("user2", "pass") + other_user2_token = self.login("user2", "pass") + + request, channel = self.make_request( + "GET", self.url1, access_token=other_user2_token, + ) + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + request, channel = self.make_request( + "GET", self.url2, access_token=other_user2_token, + ) + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url1 = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain" + url2 = "/_matrix/client/r0/admin/whois/@unknown_person:unknown_domain" + + request, channel = self.make_request( + "GET", url1, access_token=self.admin_user_tok, + ) + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only whois a local user", channel.json_body["error"]) + + request, channel = self.make_request( + "GET", url2, access_token=self.admin_user_tok, + ) + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only whois a local user", channel.json_body["error"]) + + def test_get_whois_admin(self): + """ + The lookup should succeed for an admin. + """ + request, channel = self.make_request( + "GET", self.url1, access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(self.other_user, channel.json_body["user_id"]) + self.assertIn("devices", channel.json_body) + + request, channel = self.make_request( + "GET", self.url2, access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(self.other_user, channel.json_body["user_id"]) + self.assertIn("devices", channel.json_body) + + def test_get_whois_user(self): + """ + The lookup should succeed for a normal user looking up their own information. + """ + other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "GET", self.url1, access_token=other_user_token, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(self.other_user, channel.json_body["user_id"]) + self.assertIn("devices", channel.json_body) + + request, channel = self.make_request( + "GET", self.url2, access_token=other_user_token, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(self.other_user, channel.json_body["user_id"]) + self.assertIn("devices", channel.json_body) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 88923fcea4..699a40c3df 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -342,7 +342,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.register_user("admin", "adminpassword", admin=True) admin_tok = self.login("admin", "adminpassword") - url = "/_matrix/client/unstable/admin/account_validity/validity" + url = "/_synapse/admin/v1/account_validity/validity" params = {"user_id": user_id} request_data = json.dumps(params) request, channel = self.make_request( @@ -362,7 +362,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.register_user("admin", "adminpassword", admin=True) admin_tok = self.login("admin", "adminpassword") - url = "/_matrix/client/unstable/admin/account_validity/validity" + url = "/_synapse/admin/v1/account_validity/validity" params = { "user_id": user_id, "expiration_ts": 0, @@ -389,7 +389,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.register_user("admin", "adminpassword", admin=True) admin_tok = self.login("admin", "adminpassword") - url = "/_matrix/client/unstable/admin/account_validity/validity" + url = "/_synapse/admin/v1/account_validity/validity" params = { "user_id": user_id, "expiration_ts": 0, diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 6bdde1a2ba..a69117c5a9 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -416,7 +416,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): self.reactor, self.site, "GET", - "/_matrix/client/r0/admin/users/" + self.user_id, + "/_synapse/admin/v1/users/" + self.user_id, access_token=access_token, custom_headers=headers1.items(), **make_request_args, diff --git a/tests/unittest.py b/tests/unittest.py index c7c889c405..a9d59e31f7 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -554,7 +554,7 @@ class HomeserverTestCase(TestCase): self.hs.config.registration_shared_secret = "shared" # Create the user - request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + request, channel = self.make_request("GET", "/_synapse/admin/v1/register") self.assertEqual(channel.code, 200, msg=channel.result) nonce = channel.json_body["nonce"] @@ -580,7 +580,7 @@ class HomeserverTestCase(TestCase): } ) request, channel = self.make_request( - "POST", "/_matrix/client/r0/admin/register", body.encode("utf8") + "POST", "/_synapse/admin/v1/register", body.encode("utf8") ) self.assertEqual(channel.code, 200, channel.json_body) -- cgit 1.5.1 From ca60822b34416e524a80c5689734d870ceb6e130 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 30 Nov 2020 19:28:44 +0100 Subject: Simplify the way the `HomeServer` object caches its internal attributes. (#8565) Changes `@cache_in_self` to use underscore-prefixed attributes. --- changelog.d/8565.misc | 1 + synapse/handlers/identity.py | 3 ++- synapse/rest/client/v2_alpha/account.py | 6 +++--- synapse/rest/client/v2_alpha/register.py | 4 ++-- synapse/rest/key/v2/local_key_resource.py | 2 +- synapse/server.py | 27 +++++++++++++-------------- tests/handlers/test_auth.py | 6 +++--- tests/replication/_base.py | 2 +- tests/rest/client/v1/test_presence.py | 15 +++++++++------ tests/rest/client/v2_alpha/test_register.py | 6 +++--- tests/utils.py | 2 +- 11 files changed, 39 insertions(+), 35 deletions(-) create mode 100644 changelog.d/8565.misc (limited to 'tests') diff --git a/changelog.d/8565.misc b/changelog.d/8565.misc new file mode 100644 index 0000000000..7bef422618 --- /dev/null +++ b/changelog.d/8565.misc @@ -0,0 +1 @@ +Simplify the way the `HomeServer` object caches its internal attributes. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index bc3e9607ca..9b3c6b4551 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -354,7 +354,8 @@ class IdentityHandler(BaseHandler): raise SynapseError(500, "An error was encountered when sending the email") token_expires = ( - self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime + self.hs.get_clock().time_msec() + + self.hs.config.email_validation_token_lifetime ) await self.store.start_or_continue_validation_session( diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index a54e1011f7..eebee44a44 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -115,7 +115,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.clock.sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) @@ -387,7 +387,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.clock.sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -466,7 +466,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.clock.sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index ea68114026..a89ae6ddf9 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -135,7 +135,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.clock.sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -214,7 +214,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.clock.sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} raise SynapseError( diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index c16280f668..d8e8e48c1c 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -66,7 +66,7 @@ class LocalKey(Resource): def __init__(self, hs): self.config = hs.config - self.clock = hs.clock + self.clock = hs.get_clock() self.update_response_body(self.clock.time_msec()) Resource.__init__(self) diff --git a/synapse/server.py b/synapse/server.py index c82d8f9fad..b017e3489f 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -147,7 +147,8 @@ def cache_in_self(builder: T) -> T: "@cache_in_self can only be used on functions starting with `get_`" ) - depname = builder.__name__[len("get_") :] + # get_attr -> _attr + depname = builder.__name__[len("get") :] building = [False] @@ -235,15 +236,6 @@ class HomeServer(metaclass=abc.ABCMeta): self._instance_id = random_string(5) self._instance_name = config.worker_name or "master" - self.clock = Clock(reactor) - self.distributor = Distributor() - - self.registration_ratelimiter = Ratelimiter( - clock=self.clock, - rate_hz=config.rc_registration.per_second, - burst_count=config.rc_registration.burst_count, - ) - self.version_string = version_string self.datastores = None # type: Optional[Databases] @@ -301,8 +293,9 @@ class HomeServer(metaclass=abc.ABCMeta): def is_mine_id(self, string: str) -> bool: return string.split(":", 1)[1] == self.hostname + @cache_in_self def get_clock(self) -> Clock: - return self.clock + return Clock(self._reactor) def get_datastore(self) -> DataStore: if not self.datastores: @@ -319,11 +312,17 @@ class HomeServer(metaclass=abc.ABCMeta): def get_config(self) -> HomeServerConfig: return self.config + @cache_in_self def get_distributor(self) -> Distributor: - return self.distributor + return Distributor() + @cache_in_self def get_registration_ratelimiter(self) -> Ratelimiter: - return self.registration_ratelimiter + return Ratelimiter( + clock=self.get_clock(), + rate_hz=self.config.rc_registration.per_second, + burst_count=self.config.rc_registration.burst_count, + ) @cache_in_self def get_federation_client(self) -> FederationClient: @@ -687,7 +686,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_federation_ratelimiter(self) -> FederationRateLimiter: - return FederationRateLimiter(self.clock, config=self.config.rc_federation) + return FederationRateLimiter(self.get_clock(), config=self.config.rc_federation) @cache_in_self def get_module_api(self) -> ModuleApi: diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index b5055e018c..e24ce81284 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -52,7 +52,7 @@ class AuthTestCase(unittest.TestCase): self.fail("some_user was not in %s" % macaroon.inspect()) def test_macaroon_caveats(self): - self.hs.clock.now = 5000 + self.hs.get_clock().now = 5000 token = self.macaroon_generator.generate_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) @@ -78,7 +78,7 @@ class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def test_short_term_login_token_gives_user_id(self): - self.hs.clock.now = 1000 + self.hs.get_clock().now = 1000 token = self.macaroon_generator.generate_short_term_login_token("a_user", 5000) user_id = yield defer.ensureDeferred( @@ -87,7 +87,7 @@ class AuthTestCase(unittest.TestCase): self.assertEqual("a_user", user_id) # when we advance the clock, the token should be rejected - self.hs.clock.now = 6000 + self.hs.get_clock().now = 6000 with self.assertRaises(synapse.api.errors.AuthError): yield defer.ensureDeferred( self.auth_handler.validate_short_term_login_token_and_get_user_id(token) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 516db4c30a..295c5d58a6 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -78,7 +78,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool self.test_handler = self._build_replication_data_handler() - self.worker_hs.replication_data_handler = self.test_handler + self.worker_hs._replication_data_handler = self.test_handler repl_handler = ReplicationCommandHandler(self.worker_hs) self.client = ClientReplicationStreamProtocol( diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index b84f86d28c..5d5c24d01c 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -33,13 +33,16 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): + presence_handler = Mock() + presence_handler.set_state.return_value = defer.succeed(None) + hs = self.setup_test_homeserver( - "red", http_client=None, federation_client=Mock() + "red", + http_client=None, + federation_client=Mock(), + presence_handler=presence_handler, ) - hs.presence_handler = Mock() - hs.presence_handler.set_state.return_value = defer.succeed(None) - return hs def test_put_presence(self): @@ -55,7 +58,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200) - self.assertEqual(self.hs.presence_handler.set_state.call_count, 1) + self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) def test_put_presence_disabled(self): """ @@ -70,4 +73,4 @@ class PresenceTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200) - self.assertEqual(self.hs.presence_handler.set_state.call_count, 0) + self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 0) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 699a40c3df..8f0c2430e8 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -569,7 +569,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do # nothing. - now = self.hs.clock.time_msec() + now = self.hs.get_clock().time_msec() self.get_success( self.store.user_add_threepid( user_id=user_id, @@ -587,7 +587,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # We need to manually add an email address otherwise the handler will do # nothing. - now = self.hs.clock.time_msec() + now = self.hs.get_clock().time_msec() self.get_success( self.store.user_add_threepid( user_id=user_id, @@ -646,7 +646,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): self.hs.config.account_validity.startup_job_max_delta = self.max_delta - now_ms = self.hs.clock.time_msec() + now_ms = self.hs.get_clock().time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) diff --git a/tests/utils.py b/tests/utils.py index acec74e9e9..c8d3ffbaba 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -271,7 +271,7 @@ def setup_test_homeserver( # Install @cache_in_self attributes for key, val in kwargs.items(): - setattr(hs, key, val) + setattr(hs, "_" + key, val) # Mock TLS hs.tls_server_context_factory = Mock() -- cgit 1.5.1 From 17fa58bdd1c23b9019d080fd98873aa5182f56c0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 30 Nov 2020 18:43:54 +0000 Subject: Add a config option to change whether unread push notification counts are per-message or per-room (#8820) This PR adds a new config option to the `push` section of the homeserver config, `group_unread_count_by_room`. By default Synapse will group push notifications by room (so if you have 1000 unread messages, if they lie in 55 rooms, you'll see an unread count on your phone of 55). However, it is also useful to be able to send out the true count of unread messages if desired. If `group_unread_count_by_room` is set to `false`, then with the above example, one would see an unread count of 1000 (email anyone?). --- changelog.d/8820.feature | 1 + docs/sample_config.yaml | 10 +++ synapse/config/push.py | 13 ++++ synapse/push/httppusher.py | 13 +++- synapse/push/push_tools.py | 16 +++-- tests/push/test_http.py | 163 ++++++++++++++++++++++++++++++++++++++++++++- 6 files changed, 207 insertions(+), 9 deletions(-) create mode 100644 changelog.d/8820.feature (limited to 'tests') diff --git a/changelog.d/8820.feature b/changelog.d/8820.feature new file mode 100644 index 0000000000..9e35861b11 --- /dev/null +++ b/changelog.d/8820.feature @@ -0,0 +1 @@ +Add a config option, `push.group_by_unread_count`, which controls whether unread message counts in push notifications are defined as "the number of rooms with unread messages" or "total unread messages". diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index df0f3e1d8e..394eb9a3ff 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2271,6 +2271,16 @@ push: # #include_content: false + # When a push notification is received, an unread count is also sent. + # This number can either be calculated as the number of unread messages + # for the user, or the number of *rooms* the user has unread messages in. + # + # The default value is "true", meaning push clients will see the number of + # rooms with unread messages in them. Uncomment to instead send the number + # of unread messages. + # + #group_unread_count_by_room: false + # Spam checkers are third-party modules that can block specific actions # of local users, such as creating rooms and registering undesirable diff --git a/synapse/config/push.py b/synapse/config/push.py index a71baac89c..3adbfb73e6 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -23,6 +23,9 @@ class PushConfig(Config): def read_config(self, config, **kwargs): push_config = config.get("push") or {} self.push_include_content = push_config.get("include_content", True) + self.push_group_unread_count_by_room = push_config.get( + "group_unread_count_by_room", True + ) pusher_instances = config.get("pusher_instances") or [] self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances) @@ -68,4 +71,14 @@ class PushConfig(Config): # include the event ID and room ID in push notification payloads. # #include_content: false + + # When a push notification is received, an unread count is also sent. + # This number can either be calculated as the number of unread messages + # for the user, or the number of *rooms* the user has unread messages in. + # + # The default value is "true", meaning push clients will see the number of + # rooms with unread messages in them. Uncomment to instead send the number + # of unread messages. + # + #group_unread_count_by_room: false """ diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 793d0db2d9..eff0975b6a 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -75,6 +75,7 @@ class HttpPusher: self.failing_since = pusherdict["failing_since"] self.timed_call = None self._is_processing = False + self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room # This is the highest stream ordering we know it's safe to process. # When new events arrive, we'll be given a window of new events: we @@ -136,7 +137,11 @@ class HttpPusher: async def _update_badge(self): # XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems # to be largely redundant. perhaps we can remove it. - badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) + badge = await push_tools.get_badge_count( + self.hs.get_datastore(), + self.user_id, + group_by_room=self._group_unread_count_by_room, + ) await self._send_badge(badge) def on_timer(self): @@ -283,7 +288,11 @@ class HttpPusher: return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) - badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) + badge = await push_tools.get_badge_count( + self.hs.get_datastore(), + self.user_id, + group_by_room=self._group_unread_count_by_room, + ) event = await self.store.get_event(push_action["event_id"], allow_none=True) if event is None: diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index d0145666bf..6e7c880dc0 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -12,12 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.push.presentable_names import calculate_room_name, name_from_member_event from synapse.storage import Storage +from synapse.storage.databases.main import DataStore -async def get_badge_count(store, user_id): +async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int: invites = await store.get_invited_rooms_for_local_user(user_id) joins = await store.get_rooms_for_user(user_id) @@ -34,9 +34,15 @@ async def get_badge_count(store, user_id): room_id, user_id, last_unread_event_id ) ) - # return one badge count per conversation, as count per - # message is so noisy as to be almost useless - badge += 1 if notifs["notify_count"] else 0 + if notifs["notify_count"] == 0: + continue + + if group_by_room: + # return one badge count per conversation + badge += 1 + else: + # increment the badge count by the number of unread messages in the room + badge += notifs["notify_count"] return badge diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 8571924b29..f118430309 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from mock import Mock from twisted.internet.defer import Deferred @@ -20,8 +19,9 @@ from twisted.internet.defer import Deferred import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import receipts -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config class HTTPPusherTests(HomeserverTestCase): @@ -29,6 +29,7 @@ class HTTPPusherTests(HomeserverTestCase): synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, + receipts.register_servlets, ] user_id = True hijack_auth = False @@ -499,3 +500,161 @@ class HTTPPusherTests(HomeserverTestCase): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") + + def test_push_unread_count_group_by_room(self): + """ + The HTTP pusher will group unread count by number of unread rooms. + """ + # Carry out common push count tests and setup + self._test_push_unread_count() + + # Carry out our option-value specific test + # + # This push should still only contain an unread count of 1 (for 1 unread room) + self.assertEqual( + self.push_attempts[5][2]["notification"]["counts"]["unread"], 1 + ) + + @override_config({"push": {"group_unread_count_by_room": False}}) + def test_push_unread_count_message_count(self): + """ + The HTTP pusher will send the total unread message count. + """ + # Carry out common push count tests and setup + self._test_push_unread_count() + + # Carry out our option-value specific test + # + # We're counting every unread message, so there should now be 4 since the + # last read receipt + self.assertEqual( + self.push_attempts[5][2]["notification"]["counts"]["unread"], 4 + ) + + def _test_push_unread_count(self): + """ + Tests that the correct unread count appears in sent push notifications + + Note that: + * Sending messages will cause push notifications to go out to relevant users + * Sending a read receipt will cause a "badge update" notification to go out to + the user that sent the receipt + """ + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("other_user", "pass") + other_access_token = self.login("other_user", "pass") + + # Create a room (as other_user) + room_id = self.helper.create_room_as(other_user_id, tok=other_access_token) + + # The user to get notified joins + self.helper.join(room=room_id, user=user_id, tok=access_token) + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastore().get_user_by_access_token(access_token) + ) + token_id = user_tuple.token_id + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "example.com"}, + ) + ) + + # Send a message + response = self.helper.send( + room_id, body="Hello there!", tok=other_access_token + ) + # To get an unread count, the user who is getting notified has to have a read + # position in the room. We'll set the read position to this event in a moment + first_message_event_id = response["event_id"] + + # Advance time a bit (so the pusher will register something has happened) and + # make the push succeed + self.push_attempts[0][0].callback({}) + self.pump() + + # Check our push made it + self.assertEqual(len(self.push_attempts), 1) + self.assertEqual(self.push_attempts[0][1], "example.com") + + # Check that the unread count for the room is 0 + # + # The unread count is zero as the user has no read receipt in the room yet + self.assertEqual( + self.push_attempts[0][2]["notification"]["counts"]["unread"], 0 + ) + + # Now set the user's read receipt position to the first event + # + # This will actually trigger a new notification to be sent out so that + # even if the user does not receive another message, their unread + # count goes down + request, channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (room_id, first_message_event_id), + {}, + access_token=access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Advance time and make the push succeed + self.push_attempts[1][0].callback({}) + self.pump() + + # Unread count is still zero as we've read the only message in the room + self.assertEqual(len(self.push_attempts), 2) + self.assertEqual( + self.push_attempts[1][2]["notification"]["counts"]["unread"], 0 + ) + + # Send another message + self.helper.send( + room_id, body="How's the weather today?", tok=other_access_token + ) + + # Advance time and make the push succeed + self.push_attempts[2][0].callback({}) + self.pump() + + # This push should contain an unread count of 1 as there's now been one + # message since our last read receipt + self.assertEqual(len(self.push_attempts), 3) + self.assertEqual( + self.push_attempts[2][2]["notification"]["counts"]["unread"], 1 + ) + + # Since we're grouping by room, sending more messages shouldn't increase the + # unread count, as they're all being sent in the same room + self.helper.send(room_id, body="Hello?", tok=other_access_token) + + # Advance time and make the push succeed + self.pump() + self.push_attempts[3][0].callback({}) + + self.helper.send(room_id, body="Hello??", tok=other_access_token) + + # Advance time and make the push succeed + self.pump() + self.push_attempts[4][0].callback({}) + + self.helper.send(room_id, body="HELLO???", tok=other_access_token) + + # Advance time and make the push succeed + self.pump() + self.push_attempts[5][0].callback({}) + + self.assertEqual(len(self.push_attempts), 6) -- cgit 1.5.1 From 09ac0569fe910ce4599a07e385522380ce1af6e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Dec 2020 11:04:57 +0000 Subject: Fix broken testcase (#8851) This test was broken by #8565. It doesn't need to set set `self.clock` here anyway - that is done by `setUp`. --- changelog.d/8851.misc | 1 + tests/rest/admin/test_media.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/8851.misc (limited to 'tests') diff --git a/changelog.d/8851.misc b/changelog.d/8851.misc new file mode 100644 index 0000000000..7bef422618 --- /dev/null +++ b/changelog.d/8851.misc @@ -0,0 +1 @@ +Simplify the way the `HomeServer` object caches its internal attributes. diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 2a65ab33bd..dadf9db660 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -192,7 +192,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.handler = hs.get_device_handler() self.media_repo = hs.get_media_repository_resource() self.server_name = hs.hostname - self.clock = hs.clock self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") -- cgit 1.5.1 From ddc43436838e19a7dd16860389bd76c74578dae7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Dec 2020 11:10:42 +0000 Subject: Add some tests for `password_auth_providers` (#8819) These things seemed to be completely untested, so I added a load of tests for them. --- changelog.d/8819.misc | 1 + mypy.ini | 1 + tests/handlers/test_password_providers.py | 486 ++++++++++++++++++++++++++++++ 3 files changed, 488 insertions(+) create mode 100644 changelog.d/8819.misc create mode 100644 tests/handlers/test_password_providers.py (limited to 'tests') diff --git a/changelog.d/8819.misc b/changelog.d/8819.misc new file mode 100644 index 0000000000..a5793273a5 --- /dev/null +++ b/changelog.d/8819.misc @@ -0,0 +1 @@ +Add tests for `password_auth_provider`s. diff --git a/mypy.ini b/mypy.ini index a5503abe26..3c8d303064 100644 --- a/mypy.ini +++ b/mypy.ini @@ -80,6 +80,7 @@ files = synapse/util/metrics.py, tests/replication, tests/test_utils, + tests/handlers/test_password_providers.py, tests/rest/client/v2_alpha/test_auth.py, tests/util/test_stream_change_cache.py diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py new file mode 100644 index 0000000000..edfab8a13a --- /dev/null +++ b/tests/handlers/test_password_providers.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the password_auth_provider interface""" + +from typing import Any, Type, Union + +from mock import Mock + +from twisted.internet import defer + +import synapse +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import devices +from synapse.types import JsonDict + +from tests import unittest +from tests.server import FakeChannel +from tests.unittest import override_config + +# (possibly experimental) login flows we expect to appear in the list after the normal +# ones +ADDITIONAL_LOGIN_FLOWS = [{"type": "uk.half-shot.msc2778.login.application_service"}] + +# a mock instance which the dummy auth providers delegate to, so we can see what's going +# on +mock_password_provider = Mock() + + +class PasswordOnlyAuthProvider: + """A password_provider which only implements `check_password`.""" + + @staticmethod + def parse_config(self): + pass + + def __init__(self, config, account_handler): + pass + + def check_password(self, *args): + return mock_password_provider.check_password(*args) + + +class CustomAuthProvider: + """A password_provider which implements a custom login type.""" + + @staticmethod + def parse_config(self): + pass + + def __init__(self, config, account_handler): + pass + + def get_supported_login_types(self): + return {"test.login_type": ["test_field"]} + + def check_auth(self, *args): + return mock_password_provider.check_auth(*args) + + +def providers_config(*providers: Type[Any]) -> dict: + """Returns a config dict that will enable the given password auth providers""" + return { + "password_providers": [ + {"module": "%s.%s" % (__name__, provider.__qualname__), "config": {}} + for provider in providers + ] + } + + +class PasswordAuthProviderTests(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + devices.register_servlets, + ] + + def setUp(self): + # we use a global mock device, so make sure we are starting with a clean slate + mock_password_provider.reset_mock() + super().setUp() + + @override_config(providers_config(PasswordOnlyAuthProvider)) + def test_password_only_auth_provider_login(self): + # login flows should only have m.login.password + flows = self._get_login_flows() + self.assertEqual(flows, [{"type": "m.login.password"}] + ADDITIONAL_LOGIN_FLOWS) + + # check_password must return an awaitable + mock_password_provider.check_password.return_value = defer.succeed(True) + channel = self._send_password_login("u", "p") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@u:test", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") + mock_password_provider.reset_mock() + + # login with mxid should work too + channel = self._send_password_login("@u:bz", "p") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@u:bz", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") + mock_password_provider.reset_mock() + + # try a weird username / pass. Honestly it's unclear what we *expect* to happen + # in these cases, but at least we can guard against the API changing + # unexpectedly + channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with( + "@ USER🙂NAME :test", " pASS😢word " + ) + + @override_config(providers_config(PasswordOnlyAuthProvider)) + def test_password_only_auth_provider_ui_auth(self): + """UI Auth should delegate correctly to the password provider""" + + # create the user, otherwise access doesn't work + module_api = self.hs.get_module_api() + self.get_success(module_api.register_user("u")) + + # log in twice, to get two devices + mock_password_provider.check_password.return_value = defer.succeed(True) + tok1 = self.login("u", "p") + self.login("u", "p", device_id="dev2") + mock_password_provider.reset_mock() + + # have the auth provider deny the request to start with + mock_password_provider.check_password.return_value = defer.succeed(False) + + # make the initial request which returns a 401 + session = self._start_delete_device_session(tok1, "dev2") + mock_password_provider.check_password.assert_not_called() + + # Make another request providing the UI auth flow. + channel = self._authed_delete_device(tok1, "dev2", session, "u", "p") + self.assertEqual(channel.code, 401) # XXX why not a 403? + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") + mock_password_provider.reset_mock() + + # Finally, check the request goes through when we allow it + mock_password_provider.check_password.return_value = defer.succeed(True) + channel = self._authed_delete_device(tok1, "dev2", session, "u", "p") + self.assertEqual(channel.code, 200) + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") + + @override_config(providers_config(PasswordOnlyAuthProvider)) + def test_local_user_fallback_login(self): + """rejected login should fall back to local db""" + self.register_user("localuser", "localpass") + + # check_password must return an awaitable + mock_password_provider.check_password.return_value = defer.succeed(False) + channel = self._send_password_login("u", "p") + self.assertEqual(channel.code, 403, channel.result) + + channel = self._send_password_login("localuser", "localpass") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@localuser:test", channel.json_body["user_id"]) + + @override_config(providers_config(PasswordOnlyAuthProvider)) + def test_local_user_fallback_ui_auth(self): + """rejected login should fall back to local db""" + self.register_user("localuser", "localpass") + + # have the auth provider deny the request + mock_password_provider.check_password.return_value = defer.succeed(False) + + # log in twice, to get two devices + tok1 = self.login("localuser", "localpass") + self.login("localuser", "localpass", device_id="dev2") + mock_password_provider.check_password.reset_mock() + + # first delete should give a 401 + session = self._start_delete_device_session(tok1, "dev2") + mock_password_provider.check_password.assert_not_called() + + # Wrong password + channel = self._authed_delete_device(tok1, "dev2", session, "localuser", "xxx") + self.assertEqual(channel.code, 401) # XXX why not a 403? + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + mock_password_provider.check_password.assert_called_once_with( + "@localuser:test", "xxx" + ) + mock_password_provider.reset_mock() + + # Right password + channel = self._authed_delete_device( + tok1, "dev2", session, "localuser", "localpass" + ) + self.assertEqual(channel.code, 200) + mock_password_provider.check_password.assert_called_once_with( + "@localuser:test", "localpass" + ) + + @override_config( + { + **providers_config(PasswordOnlyAuthProvider), + "password_config": {"localdb_enabled": False}, + } + ) + def test_no_local_user_fallback_login(self): + """localdb_enabled can block login with the local password + """ + self.register_user("localuser", "localpass") + + # check_password must return an awaitable + mock_password_provider.check_password.return_value = defer.succeed(False) + channel = self._send_password_login("localuser", "localpass") + self.assertEqual(channel.code, 403) + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + mock_password_provider.check_password.assert_called_once_with( + "@localuser:test", "localpass" + ) + + @override_config( + { + **providers_config(PasswordOnlyAuthProvider), + "password_config": {"localdb_enabled": False}, + } + ) + def test_no_local_user_fallback_ui_auth(self): + """localdb_enabled can block ui auth with the local password + """ + self.register_user("localuser", "localpass") + + # allow login via the auth provider + mock_password_provider.check_password.return_value = defer.succeed(True) + + # log in twice, to get two devices + tok1 = self.login("localuser", "p") + self.login("localuser", "p", device_id="dev2") + mock_password_provider.check_password.reset_mock() + + # first delete should give a 401 + session = self._start_delete_device_session(tok1, "dev2") + mock_password_provider.check_password.assert_not_called() + + # now try deleting with the local password + mock_password_provider.check_password.return_value = defer.succeed(False) + channel = self._authed_delete_device( + tok1, "dev2", session, "localuser", "localpass" + ) + self.assertEqual(channel.code, 401) # XXX why not a 403? + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + mock_password_provider.check_password.assert_called_once_with( + "@localuser:test", "localpass" + ) + + @override_config( + { + **providers_config(PasswordOnlyAuthProvider), + "password_config": {"enabled": False}, + } + ) + def test_password_auth_disabled(self): + """password auth doesn't work if it's disabled across the board""" + # login flows should be empty + flows = self._get_login_flows() + self.assertEqual(flows, ADDITIONAL_LOGIN_FLOWS) + + # login shouldn't work and should be rejected with a 400 ("unknown login type") + channel = self._send_password_login("u", "p") + self.assertEqual(channel.code, 400, channel.result) + mock_password_provider.check_password.assert_not_called() + + @override_config(providers_config(CustomAuthProvider)) + def test_custom_auth_provider_login(self): + # login flows should have the custom flow and m.login.password, since we + # haven't disabled local password lookup. + # (password must come first, because reasons) + flows = self._get_login_flows() + self.assertEqual( + flows, + [{"type": "m.login.password"}, {"type": "test.login_type"}] + + ADDITIONAL_LOGIN_FLOWS, + ) + + # login with missing param should be rejected + channel = self._send_login("test.login_type", "u") + self.assertEqual(channel.code, 400, channel.result) + mock_password_provider.check_auth.assert_not_called() + + mock_password_provider.check_auth.return_value = defer.succeed("@user:bz") + channel = self._send_login("test.login_type", "u", test_field="y") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@user:bz", channel.json_body["user_id"]) + mock_password_provider.check_auth.assert_called_once_with( + "u", "test.login_type", {"test_field": "y"} + ) + mock_password_provider.reset_mock() + + # try a weird username. Again, it's unclear what we *expect* to happen + # in these cases, but at least we can guard against the API changing + # unexpectedly + mock_password_provider.check_auth.return_value = defer.succeed( + "@ MALFORMED! :bz" + ) + channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) + mock_password_provider.check_auth.assert_called_once_with( + " USER🙂NAME ", "test.login_type", {"test_field": " abc "} + ) + + @override_config(providers_config(CustomAuthProvider)) + def test_custom_auth_provider_ui_auth(self): + # register the user and log in twice, to get two devices + self.register_user("localuser", "localpass") + tok1 = self.login("localuser", "localpass") + self.login("localuser", "localpass", device_id="dev2") + + # make the initial request which returns a 401 + channel = self._delete_device(tok1, "dev2") + self.assertEqual(channel.code, 401) + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + self.assertIn({"stages": ["test.login_type"]}, channel.json_body["flows"]) + session = channel.json_body["session"] + + # missing param + body = { + "auth": { + "type": "test.login_type", + "identifier": {"type": "m.id.user", "user": "localuser"}, + # FIXME "identifier" is ignored + # https://github.com/matrix-org/synapse/issues/5665 + "user": "localuser", + "session": session, + }, + } + + channel = self._delete_device(tok1, "dev2", body) + self.assertEqual(channel.code, 400) + # there's a perfectly good M_MISSING_PARAM errcode, but heaven forfend we should + # use it... + self.assertIn("Missing parameters", channel.json_body["error"]) + mock_password_provider.check_auth.assert_not_called() + mock_password_provider.reset_mock() + + # right params, but authing as the wrong user + mock_password_provider.check_auth.return_value = defer.succeed("@user:bz") + body["auth"]["test_field"] = "foo" + channel = self._delete_device(tok1, "dev2", body) + self.assertEqual(channel.code, 403) + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + mock_password_provider.check_auth.assert_called_once_with( + "localuser", "test.login_type", {"test_field": "foo"} + ) + mock_password_provider.reset_mock() + + # and finally, succeed + mock_password_provider.check_auth.return_value = defer.succeed( + "@localuser:test" + ) + channel = self._delete_device(tok1, "dev2", body) + self.assertEqual(channel.code, 200) + mock_password_provider.check_auth.assert_called_once_with( + "localuser", "test.login_type", {"test_field": "foo"} + ) + + @override_config(providers_config(CustomAuthProvider)) + def test_custom_auth_provider_callback(self): + callback = Mock(return_value=defer.succeed(None)) + + mock_password_provider.check_auth.return_value = defer.succeed( + ("@user:bz", callback) + ) + channel = self._send_login("test.login_type", "u", test_field="y") + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual("@user:bz", channel.json_body["user_id"]) + mock_password_provider.check_auth.assert_called_once_with( + "u", "test.login_type", {"test_field": "y"} + ) + + # check the args to the callback + callback.assert_called_once() + call_args, call_kwargs = callback.call_args + # should be one positional arg + self.assertEqual(len(call_args), 1) + self.assertEqual(call_args[0]["user_id"], "@user:bz") + for p in ["user_id", "access_token", "device_id", "home_server"]: + self.assertIn(p, call_args[0]) + + @override_config( + {**providers_config(CustomAuthProvider), "password_config": {"enabled": False}} + ) + def test_custom_auth_password_disabled(self): + """Test login with a custom auth provider where password login is disabled""" + self.register_user("localuser", "localpass") + + flows = self._get_login_flows() + self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS) + + # login shouldn't work and should be rejected with a 400 ("unknown login type") + channel = self._send_password_login("localuser", "localpass") + self.assertEqual(channel.code, 400, channel.result) + mock_password_provider.check_auth.assert_not_called() + + @override_config( + { + **providers_config(CustomAuthProvider), + "password_config": {"localdb_enabled": False}, + } + ) + def test_custom_auth_no_local_user_fallback(self): + """Test login with a custom auth provider where the local db is disabled""" + self.register_user("localuser", "localpass") + + flows = self._get_login_flows() + self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS) + + # password login shouldn't work and should be rejected with a 400 + # ("unknown login type") + channel = self._send_password_login("localuser", "localpass") + self.assertEqual(channel.code, 400, channel.result) + + test_custom_auth_no_local_user_fallback.skip = "currently broken" + + def _get_login_flows(self) -> JsonDict: + _, channel = self.make_request("GET", "/_matrix/client/r0/login") + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["flows"] + + def _send_password_login(self, user: str, password: str) -> FakeChannel: + return self._send_login(type="m.login.password", user=user, password=password) + + def _send_login(self, type, user, **params) -> FakeChannel: + params.update({"user": user, "type": type}) + _, channel = self.make_request("POST", "/_matrix/client/r0/login", params) + return channel + + def _start_delete_device_session(self, access_token, device_id) -> str: + """Make an initial delete device request, and return the UI Auth session ID""" + channel = self._delete_device(access_token, device_id) + self.assertEqual(channel.code, 401) + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + return channel.json_body["session"] + + def _authed_delete_device( + self, + access_token: str, + device_id: str, + session: str, + user_id: str, + password: str, + ) -> FakeChannel: + """Make a delete device request, authenticating with the given uid/password""" + return self._delete_device( + access_token, + device_id, + { + "auth": { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": user_id}, + # FIXME "identifier" is ignored + # https://github.com/matrix-org/synapse/issues/5665 + "user": user_id, + "password": password, + "session": session, + }, + }, + ) + + def _delete_device( + self, access_token: str, device: str, body: Union[JsonDict, bytes] = b"", + ) -> FakeChannel: + """Delete an individual device.""" + _, channel = self.make_request( + "DELETE", "devices/" + device, body, access_token=access_token + ) + return channel -- cgit 1.5.1 From 89f79307306ed117d9dcfe46a31a3fe1a1a5ceae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Dec 2020 13:04:03 +0000 Subject: Don't offer password login when it is disabled (#8835) Fix a minor bug where we would offer "m.login.password" login if a custom auth provider supported it, even if password login was disabled. --- changelog.d/8835.bugfix | 1 + synapse/handlers/auth.py | 10 ++- tests/handlers/test_password_providers.py | 108 +++++++++++++++++++++++++++++- 3 files changed, 115 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8835.bugfix (limited to 'tests') diff --git a/changelog.d/8835.bugfix b/changelog.d/8835.bugfix new file mode 100644 index 0000000000..446d04aa55 --- /dev/null +++ b/changelog.d/8835.bugfix @@ -0,0 +1 @@ +Fix minor long-standing bug in login, where we would offer the `password` login type if a custom auth provider supported it, even if password login was disabled. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 5163afd86c..588d3a60df 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -205,15 +205,23 @@ class AuthHandler(BaseHandler): # type in the list. (NB that the spec doesn't require us to do so and # clients which favour types that they don't understand over those that # they do are technically broken) + + # start out by assuming PASSWORD is enabled; we will remove it later if not. login_types = [] - if self._password_enabled: + if hs.config.password_localdb_enabled: login_types.append(LoginType.PASSWORD) + for provider in self.password_providers: if hasattr(provider, "get_supported_login_types"): for t in provider.get_supported_login_types().keys(): if t not in login_types: login_types.append(t) + + if not self._password_enabled: + login_types.remove(LoginType.PASSWORD) + self._supported_login_types = login_types + # Login types and UI Auth types have a heavy overlap, but are not # necessarily identical. Login types have SSO (and other login types) # added in the rest layer, see synapse.rest.client.v1.login.LoginRestServerlet.on_GET. diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index edfab8a13a..dfbc4ee07e 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -70,6 +70,24 @@ class CustomAuthProvider: return mock_password_provider.check_auth(*args) +class PasswordCustomAuthProvider: + """A password_provider which implements password login via `check_auth`, as well + as a custom type.""" + + @staticmethod + def parse_config(self): + pass + + def __init__(self, config, account_handler): + pass + + def get_supported_login_types(self): + return {"m.login.password": ["password"], "test.login_type": ["test_field"]} + + def check_auth(self, *args): + return mock_password_provider.check_auth(*args) + + def providers_config(*providers: Type[Any]) -> dict: """Returns a config dict that will enable the given password auth providers""" return { @@ -246,7 +264,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_password.reset_mock() # first delete should give a 401 - session = self._start_delete_device_session(tok1, "dev2") + channel = self._delete_device(tok1, "dev2") + self.assertEqual(channel.code, 401) + # there are no valid flows here! + self.assertEqual(channel.json_body["flows"], []) + session = channel.json_body["session"] mock_password_provider.check_password.assert_not_called() # now try deleting with the local password @@ -410,6 +432,88 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 400, channel.result) mock_password_provider.check_auth.assert_not_called() + @override_config( + { + **providers_config(PasswordCustomAuthProvider), + "password_config": {"enabled": False}, + } + ) + def test_password_custom_auth_password_disabled_login(self): + """log in with a custom auth provider which implements password, but password + login is disabled""" + self.register_user("localuser", "localpass") + + flows = self._get_login_flows() + self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS) + + # login shouldn't work and should be rejected with a 400 ("unknown login type") + channel = self._send_password_login("localuser", "localpass") + self.assertEqual(channel.code, 400, channel.result) + mock_password_provider.check_auth.assert_not_called() + + @override_config( + { + **providers_config(PasswordCustomAuthProvider), + "password_config": {"enabled": False}, + } + ) + def test_password_custom_auth_password_disabled_ui_auth(self): + """UI Auth with a custom auth provider which implements password, but password + login is disabled""" + # register the user and log in twice via the test login type to get two devices, + self.register_user("localuser", "localpass") + mock_password_provider.check_auth.return_value = defer.succeed( + "@localuser:test" + ) + channel = self._send_login("test.login_type", "localuser", test_field="") + self.assertEqual(channel.code, 200, channel.result) + tok1 = channel.json_body["access_token"] + + channel = self._send_login( + "test.login_type", "localuser", test_field="", device_id="dev2" + ) + self.assertEqual(channel.code, 200, channel.result) + + # make the initial request which returns a 401 + channel = self._delete_device(tok1, "dev2") + self.assertEqual(channel.code, 401) + # Ensure that flows are what is expected. In particular, "password" should *not* + # be present. + self.assertIn({"stages": ["test.login_type"]}, channel.json_body["flows"]) + session = channel.json_body["session"] + + mock_password_provider.reset_mock() + + # check that auth with password is rejected + body = { + "auth": { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": "localuser"}, + # FIXME "identifier" is ignored + # https://github.com/matrix-org/synapse/issues/5665 + "user": "localuser", + "password": "localpass", + "session": session, + }, + } + + channel = self._delete_device(tok1, "dev2", body) + self.assertEqual(channel.code, 400) + self.assertEqual( + "Password login has been disabled.", channel.json_body["error"] + ) + mock_password_provider.check_auth.assert_not_called() + mock_password_provider.reset_mock() + + # successful auth + body["auth"]["type"] = "test.login_type" + body["auth"]["test_field"] = "x" + channel = self._delete_device(tok1, "dev2", body) + self.assertEqual(channel.code, 200) + mock_password_provider.check_auth.assert_called_once_with( + "localuser", "test.login_type", {"test_field": "x"} + ) + @override_config( { **providers_config(CustomAuthProvider), @@ -428,8 +532,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): channel = self._send_password_login("localuser", "localpass") self.assertEqual(channel.code, 400, channel.result) - test_custom_auth_no_local_user_fallback.skip = "currently broken" - def _get_login_flows(self) -> JsonDict: _, channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) -- cgit 1.5.1 From 4d9496559d25ba36eaea45d73e67e79b9d936450 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Dec 2020 17:42:26 +0000 Subject: Support "identifier" dicts in UIA (#8848) The spec requires synapse to support `identifier` dicts for `m.login.password` user-interactive auth, which it did not (instead, it required an undocumented `user` parameter.) To fix this properly, we need to pull the code that interprets `identifier` into `AuthHandler.validate_login` so that it can be called from the UIA code. Fixes #5665. --- changelog.d/8848.bugfix | 1 + synapse/handlers/auth.py | 185 ++++++++++++++++++++++++++---- synapse/rest/client/v1/login.py | 107 +---------------- tests/handlers/test_password_providers.py | 11 +- tests/rest/client/v2_alpha/test_auth.py | 33 ++++-- 5 files changed, 190 insertions(+), 147 deletions(-) create mode 100644 changelog.d/8848.bugfix (limited to 'tests') diff --git a/changelog.d/8848.bugfix b/changelog.d/8848.bugfix new file mode 100644 index 0000000000..499e66f05b --- /dev/null +++ b/changelog.d/8848.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which caused Synapse to require unspecified parameters during user-interactive authentication. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 588d3a60df..8815f685b9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -238,6 +238,13 @@ class AuthHandler(BaseHandler): burst_count=self.hs.config.rc_login_failed_attempts.burst_count, ) + # Ratelimitier for failed /login attempts + self._failed_login_attempts_ratelimiter = Ratelimiter( + clock=hs.get_clock(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + ) + self._clock = self.hs.get_clock() # Expire old UI auth sessions after a period of time. @@ -650,14 +657,8 @@ class AuthHandler(BaseHandler): res = await checker.check_auth(authdict, clientip=clientip) return res - # build a v1-login-style dict out of the authdict and fall back to the - # v1 code - user_id = authdict.get("user") - - if user_id is None: - raise SynapseError(400, "", Codes.MISSING_PARAM) - - (canonical_id, callback) = await self.validate_login(user_id, authdict) + # fall back to the v1 login flow + canonical_id, _ = await self.validate_login(authdict) return canonical_id def _get_params_recaptcha(self) -> dict: @@ -832,17 +833,17 @@ class AuthHandler(BaseHandler): return self._supported_login_types async def validate_login( - self, username: str, login_submission: Dict[str, Any] + self, login_submission: Dict[str, Any], ratelimit: bool = False, ) -> Tuple[str, Optional[Callable[[Dict[str, str]], None]]]: """Authenticates the user for the /login API - Also used by the user-interactive auth flow to validate - m.login.password auth types. + Also used by the user-interactive auth flow to validate auth types which don't + have an explicit UIA handler, including m.password.auth. Args: - username: username supplied by the user login_submission: the whole of the login submission (including 'type' and other relevant fields) + ratelimit: whether to apply the failed_login_attempt ratelimiter Returns: A tuple of the canonical user id, and optional callback to be called once the access token and device id are issued @@ -851,29 +852,161 @@ class AuthHandler(BaseHandler): SynapseError if there was a problem with the request LoginError if there was an authentication problem. """ - - if username.startswith("@"): - qualified_user_id = username - else: - qualified_user_id = UserID(username, self.hs.hostname).to_string() - login_type = login_submission.get("type") - known_login_type = False + + # ideally, we wouldn't be checking the identifier unless we know we have a login + # method which uses it (https://github.com/matrix-org/synapse/issues/8836) + # + # But the auth providers' check_auth interface requires a username, so in + # practice we can only support login methods which we can map to a username + # anyway. # special case to check for "password" for the check_password interface # for the auth providers password = login_submission.get("password") - if login_type == LoginType.PASSWORD: if not self._password_enabled: raise SynapseError(400, "Password login has been disabled.") - if not password: - raise SynapseError(400, "Missing parameter: password") + if not isinstance(password, str): + raise SynapseError(400, "Bad parameter: password", Codes.INVALID_PARAM) + + # map old-school login fields into new-school "identifier" fields. + identifier_dict = convert_client_dict_legacy_fields_to_identifier( + login_submission + ) + + # convert phone type identifiers to generic threepids + if identifier_dict["type"] == "m.id.phone": + identifier_dict = login_id_phone_to_thirdparty(identifier_dict) + + # convert threepid identifiers to user IDs + if identifier_dict["type"] == "m.id.thirdparty": + address = identifier_dict.get("address") + medium = identifier_dict.get("medium") + + if medium is None or address is None: + raise SynapseError(400, "Invalid thirdparty identifier") + + # For emails, canonicalise the address. + # We store all email addresses canonicalised in the DB. + # (See add_threepid in synapse/handlers/auth.py) + if medium == "email": + try: + address = canonicalise_email(address) + except ValueError as e: + raise SynapseError(400, str(e)) + + # We also apply account rate limiting using the 3PID as a key, as + # otherwise using 3PID bypasses the ratelimiting based on user ID. + if ratelimit: + self._failed_login_attempts_ratelimiter.ratelimit( + (medium, address), update=False + ) + + # Check for login providers that support 3pid login types + if login_type == LoginType.PASSWORD: + # we've already checked that there is a (valid) password field + assert isinstance(password, str) + ( + canonical_user_id, + callback_3pid, + ) = await self.check_password_provider_3pid(medium, address, password) + if canonical_user_id: + # Authentication through password provider and 3pid succeeded + return canonical_user_id, callback_3pid + + # No password providers were able to handle this 3pid + # Check local store + user_id = await self.hs.get_datastore().get_user_id_by_threepid( + medium, address + ) + if not user_id: + logger.warning( + "unknown 3pid identifier medium %s, address %r", medium, address + ) + # We mark that we've failed to log in here, as + # `check_password_provider_3pid` might have returned `None` due + # to an incorrect password, rather than the account not + # existing. + # + # If it returned None but the 3PID was bound then we won't hit + # this code path, which is fine as then the per-user ratelimit + # will kick in below. + if ratelimit: + self._failed_login_attempts_ratelimiter.can_do_action( + (medium, address) + ) + raise LoginError(403, "", errcode=Codes.FORBIDDEN) + + identifier_dict = {"type": "m.id.user", "user": user_id} + + # by this point, the identifier should be an m.id.user: if it's anything + # else, we haven't understood it. + if identifier_dict["type"] != "m.id.user": + raise SynapseError(400, "Unknown login identifier type") + + username = identifier_dict.get("user") + if not username: + raise SynapseError(400, "User identifier is missing 'user' key") + + if username.startswith("@"): + qualified_user_id = username + else: + qualified_user_id = UserID(username, self.hs.hostname).to_string() + + # Check if we've hit the failed ratelimit (but don't update it) + if ratelimit: + self._failed_login_attempts_ratelimiter.ratelimit( + qualified_user_id.lower(), update=False + ) + + try: + return await self._validate_userid_login(username, login_submission) + except LoginError: + # The user has failed to log in, so we need to update the rate + # limiter. Using `can_do_action` avoids us raising a ratelimit + # exception and masking the LoginError. The actual ratelimiting + # should have happened above. + if ratelimit: + self._failed_login_attempts_ratelimiter.can_do_action( + qualified_user_id.lower() + ) + raise + + async def _validate_userid_login( + self, username: str, login_submission: Dict[str, Any], + ) -> Tuple[str, Optional[Callable[[Dict[str, str]], None]]]: + """Helper for validate_login + + Handles login, once we've mapped 3pids onto userids + + Args: + username: the username, from the identifier dict + login_submission: the whole of the login submission + (including 'type' and other relevant fields) + Returns: + A tuple of the canonical user id, and optional callback + to be called once the access token and device id are issued + Raises: + StoreError if there was a problem accessing the database + SynapseError if there was a problem with the request + LoginError if there was an authentication problem. + """ + if username.startswith("@"): + qualified_user_id = username + else: + qualified_user_id = UserID(username, self.hs.hostname).to_string() + + login_type = login_submission.get("type") + known_login_type = False for provider in self.password_providers: if hasattr(provider, "check_password") and login_type == LoginType.PASSWORD: known_login_type = True - is_valid = await provider.check_password(qualified_user_id, password) + # we've already checked that there is a (valid) password field + is_valid = await provider.check_password( + qualified_user_id, login_submission["password"] + ) if is_valid: return qualified_user_id, None @@ -914,8 +1047,12 @@ class AuthHandler(BaseHandler): if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled: known_login_type = True + # we've already checked that there is a (valid) password field + password = login_submission["password"] + assert isinstance(password, str) + canonical_user_id = await self._check_local_password( - qualified_user_id, password # type: ignore + qualified_user_id, password ) if canonical_user_id: diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 074bdd66c9..d7ae148214 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -19,10 +19,6 @@ from typing import Awaitable, Callable, Dict, Optional from synapse.api.errors import Codes, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.appservice import ApplicationService -from synapse.handlers.auth import ( - convert_client_dict_legacy_fields_to_identifier, - login_id_phone_to_thirdparty, -) from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, @@ -33,7 +29,6 @@ from synapse.http.site import SynapseRequest from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import JsonDict, UserID -from synapse.util.threepids import canonicalise_email logger = logging.getLogger(__name__) @@ -78,11 +73,6 @@ class LoginRestServlet(RestServlet): rate_hz=self.hs.config.rc_login_account.per_second, burst_count=self.hs.config.rc_login_account.burst_count, ) - self._failed_attempts_ratelimiter = Ratelimiter( - clock=hs.get_clock(), - rate_hz=self.hs.config.rc_login_failed_attempts.per_second, - burst_count=self.hs.config.rc_login_failed_attempts.burst_count, - ) def on_GET(self, request: SynapseRequest): flows = [] @@ -140,17 +130,6 @@ class LoginRestServlet(RestServlet): result["well_known"] = well_known_data return 200, result - def _get_qualified_user_id(self, identifier): - if identifier["type"] != "m.id.user": - raise SynapseError(400, "Unknown login identifier type") - if "user" not in identifier: - raise SynapseError(400, "User identifier is missing 'user' key") - - if identifier["user"].startswith("@"): - return identifier["user"] - else: - return UserID(identifier["user"], self.hs.hostname).to_string() - async def _do_appservice_login( self, login_submission: JsonDict, appservice: ApplicationService ): @@ -201,91 +180,9 @@ class LoginRestServlet(RestServlet): login_submission.get("address"), login_submission.get("user"), ) - identifier = convert_client_dict_legacy_fields_to_identifier(login_submission) - - # convert phone type identifiers to generic threepids - if identifier["type"] == "m.id.phone": - identifier = login_id_phone_to_thirdparty(identifier) - - # convert threepid identifiers to user IDs - if identifier["type"] == "m.id.thirdparty": - address = identifier.get("address") - medium = identifier.get("medium") - - if medium is None or address is None: - raise SynapseError(400, "Invalid thirdparty identifier") - - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See add_threepid in synapse/handlers/auth.py) - if medium == "email": - try: - address = canonicalise_email(address) - except ValueError as e: - raise SynapseError(400, str(e)) - - # We also apply account rate limiting using the 3PID as a key, as - # otherwise using 3PID bypasses the ratelimiting based on user ID. - self._failed_attempts_ratelimiter.ratelimit((medium, address), update=False) - - # Check for login providers that support 3pid login types - ( - canonical_user_id, - callback_3pid, - ) = await self.auth_handler.check_password_provider_3pid( - medium, address, login_submission["password"] - ) - if canonical_user_id: - # Authentication through password provider and 3pid succeeded - - result = await self._complete_login( - canonical_user_id, login_submission, callback_3pid - ) - return result - - # No password providers were able to handle this 3pid - # Check local store - user_id = await self.hs.get_datastore().get_user_id_by_threepid( - medium, address - ) - if not user_id: - logger.warning( - "unknown 3pid identifier medium %s, address %r", medium, address - ) - # We mark that we've failed to log in here, as - # `check_password_provider_3pid` might have returned `None` due - # to an incorrect password, rather than the account not - # existing. - # - # If it returned None but the 3PID was bound then we won't hit - # this code path, which is fine as then the per-user ratelimit - # will kick in below. - self._failed_attempts_ratelimiter.can_do_action((medium, address)) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) - - identifier = {"type": "m.id.user", "user": user_id} - - # by this point, the identifier should be an m.id.user: if it's anything - # else, we haven't understood it. - qualified_user_id = self._get_qualified_user_id(identifier) - - # Check if we've hit the failed ratelimit (but don't update it) - self._failed_attempts_ratelimiter.ratelimit( - qualified_user_id.lower(), update=False + canonical_user_id, callback = await self.auth_handler.validate_login( + login_submission, ratelimit=True ) - - try: - canonical_user_id, callback = await self.auth_handler.validate_login( - identifier["user"], login_submission - ) - except LoginError: - # The user has failed to log in, so we need to update the rate - # limiter. Using `can_do_action` avoids us raising a ratelimit - # exception and masking the LoginError. The actual ratelimiting - # should have happened above. - self._failed_attempts_ratelimiter.can_do_action(qualified_user_id.lower()) - raise - result = await self._complete_login( canonical_user_id, login_submission, callback ) diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index dfbc4ee07e..22b9a11dc0 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -358,9 +358,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): "auth": { "type": "test.login_type", "identifier": {"type": "m.id.user", "user": "localuser"}, - # FIXME "identifier" is ignored - # https://github.com/matrix-org/synapse/issues/5665 - "user": "localuser", "session": session, }, } @@ -489,9 +486,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): "auth": { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "localuser"}, - # FIXME "identifier" is ignored - # https://github.com/matrix-org/synapse/issues/5665 - "user": "localuser", "password": "localpass", "session": session, }, @@ -541,7 +535,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): return self._send_login(type="m.login.password", user=user, password=password) def _send_login(self, type, user, **params) -> FakeChannel: - params.update({"user": user, "type": type}) + params.update({"identifier": {"type": "m.id.user", "user": user}, "type": type}) _, channel = self.make_request("POST", "/_matrix/client/r0/login", params) return channel @@ -569,9 +563,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): "auth": { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": user_id}, - # FIXME "identifier" is ignored - # https://github.com/matrix-org/synapse/issues/5665 - "user": user_id, "password": password, "session": session, }, diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index f684c37db5..77246e478f 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -38,11 +38,6 @@ class DummyRecaptchaChecker(UserInteractiveAuthChecker): return succeed(True) -class DummyPasswordChecker(UserInteractiveAuthChecker): - def check_auth(self, authdict, clientip): - return succeed(authdict["identifier"]["user"]) - - class FallbackAuthTests(unittest.HomeserverTestCase): servlets = [ @@ -162,9 +157,6 @@ class UIAuthTests(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - auth_handler = hs.get_auth_handler() - auth_handler.checkers[LoginType.PASSWORD] = DummyPasswordChecker(hs) - self.user_pass = "pass" self.user = self.register_user("test", self.user_pass) self.user_tok = self.login("test", self.user_pass) @@ -234,6 +226,31 @@ class UIAuthTests(unittest.HomeserverTestCase): }, ) + def test_grandfathered_identifier(self): + """Check behaviour without "identifier" dict + + Synapse used to require clients to submit a "user" field for m.login.password + UIA - check that still works. + """ + + device_id = self.get_device_ids()[0] + channel = self.delete_device(device_id, 401) + session = channel.json_body["session"] + + # Make another request providing the UI auth flow. + self.delete_device( + device_id, + 200, + { + "auth": { + "type": "m.login.password", + "user": self.user, + "password": self.user_pass, + "session": session, + }, + }, + ) + def test_can_change_body(self): """ The client dict can be modified during the user interactive authentication session. -- cgit 1.5.1 From edb3d3f82716c2b5c903ddb4d0df155e06c5c9e9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 2 Dec 2020 10:38:18 +0000 Subject: Allow specifying room version in 'RestHelper.create_room_as' and add typing (#8854) This PR adds a `room_version` argument to the `RestHelper`'s `create_room_as` function for tests. I plan to use this for testing knocking, which currently uses an unstable room version. --- changelog.d/8854.misc | 1 + tests/rest/client/v1/utils.py | 27 +++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8854.misc (limited to 'tests') diff --git a/changelog.d/8854.misc b/changelog.d/8854.misc new file mode 100644 index 0000000000..5895df2d5c --- /dev/null +++ b/changelog.d/8854.misc @@ -0,0 +1 @@ +Allow for specifying a room version when creating a room in unit tests via `RestHelper.create_room_as`. \ No newline at end of file diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index b58768675b..737c38c396 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -41,14 +41,37 @@ class RestHelper: auth_user_id = attr.ib() def create_room_as( - self, room_creator=None, is_public=True, tok=None, expect_code=200, - ): + self, + room_creator: str = None, + is_public: bool = True, + room_version: str = None, + tok: str = None, + expect_code: int = 200, + ) -> str: + """ + Create a room. + + Args: + room_creator: The user ID to create the room with. + is_public: If True, the `visibility` parameter will be set to the + default (public). Otherwise, the `visibility` parameter will be set + to "private". + room_version: The room version to create the room as. Defaults to Synapse's + default room version. + tok: The access token to use in the request. + expect_code: The expected HTTP response code. + + Returns: + The ID of the newly created room. + """ temp_id = self.auth_user_id self.auth_user_id = room_creator path = "/_matrix/client/r0/createRoom" content = {} if not is_public: content["visibility"] = "private" + if room_version: + content["room_version"] = room_version if tok: path = path + "?access_token=%s" % tok -- cgit 1.5.1 From d3ed93504bb6bb8ad138e356e3c74b6a7286299b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 2 Dec 2020 10:38:50 +0000 Subject: Create a `PasswordProvider` wrapper object (#8849) The idea here is to abstract out all the conditional code which tests which methods a given password provider has, to provide a consistent interface. --- changelog.d/8849.misc | 1 + synapse/handlers/auth.py | 203 ++++++++++++++++++++++-------- tests/handlers/test_password_providers.py | 5 +- 3 files changed, 152 insertions(+), 57 deletions(-) create mode 100644 changelog.d/8849.misc (limited to 'tests') diff --git a/changelog.d/8849.misc b/changelog.d/8849.misc new file mode 100644 index 0000000000..3dd496ce61 --- /dev/null +++ b/changelog.d/8849.misc @@ -0,0 +1 @@ +Refactor `password_auth_provider` support code. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 8815f685b9..c7dc07008a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd +# Copyright 2019 - 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +26,7 @@ from typing import ( Dict, Iterable, List, + Mapping, Optional, Tuple, Union, @@ -181,17 +183,12 @@ class AuthHandler(BaseHandler): # better way to break the loop account_handler = ModuleApi(hs, self) - self.password_providers = [] - for module, config in hs.config.password_providers: - try: - self.password_providers.append( - module(config=config, account_handler=account_handler) - ) - except Exception as e: - logger.error("Error while initializing %r: %s", module, e) - raise + self.password_providers = [ + PasswordProvider.load(module, config, account_handler) + for module, config in hs.config.password_providers + ] - logger.info("Extra password_providers: %r", self.password_providers) + logger.info("Extra password_providers: %s", self.password_providers) self.hs = hs # FIXME better possibility to access registrationHandler later? self.macaroon_gen = hs.get_macaroon_generator() @@ -853,6 +850,8 @@ class AuthHandler(BaseHandler): LoginError if there was an authentication problem. """ login_type = login_submission.get("type") + if not isinstance(login_type, str): + raise SynapseError(400, "Bad parameter: type", Codes.INVALID_PARAM) # ideally, we wouldn't be checking the identifier unless we know we have a login # method which uses it (https://github.com/matrix-org/synapse/issues/8836) @@ -998,24 +997,12 @@ class AuthHandler(BaseHandler): qualified_user_id = UserID(username, self.hs.hostname).to_string() login_type = login_submission.get("type") + # we already checked that we have a valid login type + assert isinstance(login_type, str) + known_login_type = False for provider in self.password_providers: - if hasattr(provider, "check_password") and login_type == LoginType.PASSWORD: - known_login_type = True - # we've already checked that there is a (valid) password field - is_valid = await provider.check_password( - qualified_user_id, login_submission["password"] - ) - if is_valid: - return qualified_user_id, None - - if not hasattr(provider, "get_supported_login_types") or not hasattr( - provider, "check_auth" - ): - # this password provider doesn't understand custom login types - continue - supported_login_types = provider.get_supported_login_types() if login_type not in supported_login_types: # this password provider doesn't understand this login type @@ -1040,8 +1027,6 @@ class AuthHandler(BaseHandler): result = await provider.check_auth(username, login_type, login_dict) if result: - if isinstance(result, str): - result = (result, None) return result if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled: @@ -1083,19 +1068,9 @@ class AuthHandler(BaseHandler): unsuccessful, `user_id` and `callback` are both `None`. """ for provider in self.password_providers: - if hasattr(provider, "check_3pid_auth"): - # This function is able to return a deferred that either - # resolves None, meaning authentication failure, or upon - # success, to a str (which is the user_id) or a tuple of - # (user_id, callback_func), where callback_func should be run - # after we've finished everything else - result = await provider.check_3pid_auth(medium, address, password) - if result: - # Check if the return value is a str or a tuple - if isinstance(result, str): - # If it's a str, set callback function to None - result = (result, None) - return result + result = await provider.check_3pid_auth(medium, address, password) + if result: + return result return None, None @@ -1153,16 +1128,11 @@ class AuthHandler(BaseHandler): # see if any of our auth providers want to know about this for provider in self.password_providers: - if hasattr(provider, "on_logged_out"): - # This might return an awaitable, if it does block the log out - # until it completes. - result = provider.on_logged_out( - user_id=user_info.user_id, - device_id=user_info.device_id, - access_token=access_token, - ) - if inspect.isawaitable(result): - await result + await provider.on_logged_out( + user_id=user_info.user_id, + device_id=user_info.device_id, + access_token=access_token, + ) # delete pushers associated with this access token if user_info.token_id is not None: @@ -1191,11 +1161,10 @@ class AuthHandler(BaseHandler): # see if any of our auth providers want to know about this for provider in self.password_providers: - if hasattr(provider, "on_logged_out"): - for token, token_id, device_id in tokens_and_devices: - await provider.on_logged_out( - user_id=user_id, device_id=device_id, access_token=token - ) + for token, token_id, device_id in tokens_and_devices: + await provider.on_logged_out( + user_id=user_id, device_id=device_id, access_token=token + ) # delete pushers associated with the access tokens await self.hs.get_pusherpool().remove_pushers_by_access_token( @@ -1519,3 +1488,127 @@ class MacaroonGenerator: macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) return macaroon + + +class PasswordProvider: + """Wrapper for a password auth provider module + + This class abstracts out all of the backwards-compatibility hacks for + password providers, to provide a consistent interface. + """ + + @classmethod + def load(cls, module, config, module_api: ModuleApi) -> "PasswordProvider": + try: + pp = module(config=config, account_handler=module_api) + except Exception as e: + logger.error("Error while initializing %r: %s", module, e) + raise + return cls(pp, module_api) + + def __init__(self, pp, module_api: ModuleApi): + self._pp = pp + self._module_api = module_api + + self._supported_login_types = {} + + # grandfather in check_password support + if hasattr(self._pp, "check_password"): + self._supported_login_types[LoginType.PASSWORD] = ("password",) + + g = getattr(self._pp, "get_supported_login_types", None) + if g: + self._supported_login_types.update(g()) + + def __str__(self): + return str(self._pp) + + def get_supported_login_types(self) -> Mapping[str, Iterable[str]]: + """Get the login types supported by this password provider + + Returns a map from a login type identifier (such as m.login.password) to an + iterable giving the fields which must be provided by the user in the submission + to the /login API. + + This wrapper adds m.login.password to the list if the underlying password + provider supports the check_password() api. + """ + return self._supported_login_types + + async def check_auth( + self, username: str, login_type: str, login_dict: JsonDict + ) -> Optional[Tuple[str, Optional[Callable]]]: + """Check if the user has presented valid login credentials + + This wrapper also calls check_password() if the underlying password provider + supports the check_password() api and the login type is m.login.password. + + Args: + username: user id presented by the client. Either an MXID or an unqualified + username. + + login_type: the login type being attempted - one of the types returned by + get_supported_login_types() + + login_dict: the dictionary of login secrets passed by the client. + + Returns: (user_id, callback) where `user_id` is the fully-qualified mxid of the + user, and `callback` is an optional callback which will be called with the + result from the /login call (including access_token, device_id, etc.) + """ + # first grandfather in a call to check_password + if login_type == LoginType.PASSWORD: + g = getattr(self._pp, "check_password", None) + if g: + qualified_user_id = self._module_api.get_qualified_user_id(username) + is_valid = await self._pp.check_password( + qualified_user_id, login_dict["password"] + ) + if is_valid: + return qualified_user_id, None + + g = getattr(self._pp, "check_auth", None) + if not g: + return None + result = await g(username, login_type, login_dict) + + # Check if the return value is a str or a tuple + if isinstance(result, str): + # If it's a str, set callback function to None + return result, None + + return result + + async def check_3pid_auth( + self, medium: str, address: str, password: str + ) -> Optional[Tuple[str, Optional[Callable]]]: + g = getattr(self._pp, "check_3pid_auth", None) + if not g: + return None + + # This function is able to return a deferred that either + # resolves None, meaning authentication failure, or upon + # success, to a str (which is the user_id) or a tuple of + # (user_id, callback_func), where callback_func should be run + # after we've finished everything else + result = await g(medium, address, password) + + # Check if the return value is a str or a tuple + if isinstance(result, str): + # If it's a str, set callback function to None + return result, None + + return result + + async def on_logged_out( + self, user_id: str, device_id: Optional[str], access_token: str + ) -> None: + g = getattr(self._pp, "on_logged_out", None) + if not g: + return + + # This might return an awaitable, if it does block the log out + # until it completes. + result = g(user_id=user_id, device_id=device_id, access_token=access_token,) + if inspect.isawaitable(result): + await result diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 22b9a11dc0..ceaf0902d2 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -266,8 +266,9 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # first delete should give a 401 channel = self._delete_device(tok1, "dev2") self.assertEqual(channel.code, 401) - # there are no valid flows here! - self.assertEqual(channel.json_body["flows"], []) + # m.login.password UIA is permitted because the auth provider allows it, + # even though the localdb does not. + self.assertEqual(channel.json_body["flows"], [{"stages": ["m.login.password"]}]) session = channel.json_body["session"] mock_password_provider.check_password.assert_not_called() -- cgit 1.5.1 From c21bdc813f5c21153cded05bcd0a57b5836f09fe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Dec 2020 07:09:21 -0500 Subject: Add basic SAML tests for mapping users. (#8800) --- .buildkite/scripts/test_old_deps.sh | 2 +- changelog.d/8800.misc | 1 + synapse/handlers/saml_handler.py | 2 +- tests/handlers/test_oidc.py | 34 ++++----- tests/handlers/test_saml.py | 136 ++++++++++++++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 19 deletions(-) create mode 100644 changelog.d/8800.misc create mode 100644 tests/handlers/test_saml.py (limited to 'tests') diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh index cdb77b556c..9905c4bc4f 100755 --- a/.buildkite/scripts/test_old_deps.sh +++ b/.buildkite/scripts/test_old_deps.sh @@ -6,7 +6,7 @@ set -ex apt-get update -apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox +apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox export LANG="C.UTF-8" diff --git a/changelog.d/8800.misc b/changelog.d/8800.misc new file mode 100644 index 0000000000..57cca8fee5 --- /dev/null +++ b/changelog.d/8800.misc @@ -0,0 +1 @@ +Add additional error checking for OpenID Connect and SAML mapping providers. diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 34db10ffe4..7ffad7d8af 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -265,7 +265,7 @@ class SamlHandler(BaseHandler): return UserAttributes( localpart=result.get("mxid_localpart"), display_name=result.get("displayname"), - emails=result.get("emails"), + emails=result.get("emails", []), ) with (await self._mapping_lock.queue(self._auth_provider_id)): diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index e880d32be6..c9807a7b73 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -23,7 +23,7 @@ import pymacaroons from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from synapse.handlers.oidc_handler import OidcError, OidcHandler, OidcMappingProvider +from synapse.handlers.oidc_handler import OidcError, OidcMappingProvider from synapse.handlers.sso import MappingException from synapse.types import UserID @@ -127,13 +127,8 @@ async def get_json(url): class OidcHandlerTestCase(HomeserverTestCase): - def make_homeserver(self, reactor, clock): - - self.http_client = Mock(spec=["get_json"]) - self.http_client.get_json.side_effect = get_json - self.http_client.user_agent = "Synapse Test" - - config = self.default_config() + def default_config(self): + config = super().default_config() config["public_baseurl"] = BASE_URL oidc_config = { "enabled": True, @@ -149,19 +144,24 @@ class OidcHandlerTestCase(HomeserverTestCase): oidc_config.update(config.get("oidc_config", {})) config["oidc_config"] = oidc_config - hs = self.setup_test_homeserver( - http_client=self.http_client, - proxied_http_client=self.http_client, - config=config, - ) + return config + + def make_homeserver(self, reactor, clock): + + self.http_client = Mock(spec=["get_json"]) + self.http_client.get_json.side_effect = get_json + self.http_client.user_agent = "Synapse Test" + + hs = self.setup_test_homeserver(proxied_http_client=self.http_client) - self.handler = OidcHandler(hs) + self.handler = hs.get_oidc_handler() + sso_handler = hs.get_sso_handler() # Mock the render error method. self.render_error = Mock(return_value=None) - self.handler._sso_handler.render_error = self.render_error + sso_handler.render_error = self.render_error # Reduce the number of attempts when generating MXIDs. - self.handler._sso_handler._MAP_USERNAME_RETRIES = 3 + sso_handler._MAP_USERNAME_RETRIES = 3 return hs @@ -832,7 +832,7 @@ class OidcHandlerTestCase(HomeserverTestCase): # test_user is already taken, so test_user1 gets registered instead. self.assertEqual(mxid, "@test_user1:test") - # Register all of the potential users for a particular username. + # Register all of the potential mxids for a particular OIDC username. self.get_success( store.register_user(user_id="@tester:test", password_hash=None) ) diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py new file mode 100644 index 0000000000..79fd47036f --- /dev/null +++ b/tests/handlers/test_saml.py @@ -0,0 +1,136 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import attr + +from synapse.handlers.sso import MappingException + +from tests.unittest import HomeserverTestCase + +# These are a few constants that are used as config parameters in the tests. +BASE_URL = "https://synapse/" + + +@attr.s +class FakeAuthnResponse: + ava = attr.ib(type=dict) + + +class TestMappingProvider: + def __init__(self, config, module): + pass + + @staticmethod + def parse_config(config): + return + + @staticmethod + def get_saml_attributes(config): + return {"uid"}, {"displayName"} + + def get_remote_user_id(self, saml_response, client_redirect_url): + return saml_response.ava["uid"] + + def saml_response_to_user_attributes( + self, saml_response, failures, client_redirect_url + ): + localpart = saml_response.ava["username"] + (str(failures) if failures else "") + return {"mxid_localpart": localpart, "displayname": None} + + +class SamlHandlerTestCase(HomeserverTestCase): + def default_config(self): + config = super().default_config() + config["public_baseurl"] = BASE_URL + saml_config = { + "sp_config": {"metadata": {}}, + # Disable grandfathering. + "grandfathered_mxid_source_attribute": None, + "user_mapping_provider": {"module": __name__ + ".TestMappingProvider"}, + } + config["saml2_config"] = saml_config + + return config + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + + self.handler = hs.get_saml_handler() + + # Reduce the number of attempts when generating MXIDs. + sso_handler = hs.get_sso_handler() + sso_handler._MAP_USERNAME_RETRIES = 3 + + return hs + + def test_map_saml_response_to_user(self): + """Ensure that mapping the SAML response returned from a provider to an MXID works properly.""" + saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) + # The redirect_url doesn't matter with the default user mapping provider. + redirect_url = "" + mxid = self.get_success( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user:test") + + def test_map_saml_response_to_invalid_localpart(self): + """If the mapping provider generates an invalid localpart it should be rejected.""" + saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) + redirect_url = "" + e = self.get_failure( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual(str(e.value), "localpart is invalid: föö") + + def test_map_saml_response_to_user_retries(self): + """The mapping provider can retry generating an MXID if the MXID is already in use.""" + store = self.hs.get_datastore() + self.get_success( + store.register_user(user_id="@test_user:test", password_hash=None) + ) + saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) + redirect_url = "" + mxid = self.get_success( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ) + ) + # test_user is already taken, so test_user1 gets registered instead. + self.assertEqual(mxid, "@test_user1:test") + + # Register all of the potential mxids for a particular SAML username. + self.get_success( + store.register_user(user_id="@tester:test", password_hash=None) + ) + for i in range(1, 3): + self.get_success( + store.register_user(user_id="@tester%d:test" % i, password_hash=None) + ) + + # Now attempt to map to a username, this will fail since all potential usernames are taken. + saml_response = FakeAuthnResponse({"uid": "tester", "username": "tester"}) + e = self.get_failure( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual( + str(e.value), "Unable to generate a Matrix ID from the SSO response" + ) -- cgit 1.5.1 From 8388384a640d3381b5858d3fb1d2ea0a8c9c059c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Dec 2020 07:45:42 -0500 Subject: Fix a regression when grandfathering SAML users. (#8855) This was broken in #8801 when abstracting code shared with OIDC. After this change both SAML and OIDC have a concept of grandfathering users, but with different implementations. --- changelog.d/8855.feature | 1 + synapse/handlers/oidc_handler.py | 30 ++++++++++++++++++-- synapse/handlers/saml_handler.py | 9 +++--- synapse/handlers/sso.py | 60 +++++++++++++--------------------------- tests/handlers/test_oidc.py | 8 ++++++ tests/handlers/test_saml.py | 34 ++++++++++++++++++++++- 6 files changed, 94 insertions(+), 48 deletions(-) create mode 100644 changelog.d/8855.feature (limited to 'tests') diff --git a/changelog.d/8855.feature b/changelog.d/8855.feature new file mode 100644 index 0000000000..77f7fe4e5d --- /dev/null +++ b/changelog.d/8855.feature @@ -0,0 +1 @@ +Add support for re-trying generation of a localpart for OpenID Connect mapping providers. diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 78c4e94a9d..55c4377890 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -39,7 +39,7 @@ from synapse.handlers._base import BaseHandler from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.types import JsonDict, map_username_to_mxid_localpart +from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import json_decoder if TYPE_CHECKING: @@ -898,13 +898,39 @@ class OidcHandler(BaseHandler): return UserAttributes(**attributes) + async def grandfather_existing_users() -> Optional[str]: + if self._allow_existing_users: + # If allowing existing users we want to generate a single localpart + # and attempt to match it. + attributes = await oidc_response_to_user_attributes(failures=0) + + user_id = UserID(attributes.localpart, self.server_name).to_string() + users = await self.store.get_users_by_id_case_insensitive(user_id) + if users: + # If an existing matrix ID is returned, then use it. + if len(users) == 1: + previously_registered_user_id = next(iter(users)) + elif user_id in users: + previously_registered_user_id = user_id + else: + # Do not attempt to continue generating Matrix IDs. + raise MappingException( + "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( + user_id, users + ) + ) + + return previously_registered_user_id + + return None + return await self._sso_handler.get_mxid_from_sso( self._auth_provider_id, remote_user_id, user_agent, ip_address, oidc_response_to_user_attributes, - self._allow_existing_users, + grandfather_existing_users, ) diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 7ffad7d8af..76d4169fe2 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -268,7 +268,7 @@ class SamlHandler(BaseHandler): emails=result.get("emails", []), ) - with (await self._mapping_lock.queue(self._auth_provider_id)): + async def grandfather_existing_users() -> Optional[str]: # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid if ( @@ -290,17 +290,18 @@ class SamlHandler(BaseHandler): if users: registered_user_id = list(users.keys())[0] logger.info("Grandfathering mapping to %s", registered_user_id) - await self.store.record_user_external_id( - self._auth_provider_id, remote_user_id, registered_user_id - ) return registered_user_id + return None + + with (await self._mapping_lock.queue(self._auth_provider_id)): return await self._sso_handler.get_mxid_from_sso( self._auth_provider_id, remote_user_id, user_agent, ip_address, saml_response_to_remapped_user_attributes, + grandfather_existing_users, ) def expire_sessions(self): diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index d963082210..f42b90e1bc 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -116,7 +116,7 @@ class SsoHandler(BaseHandler): user_agent: str, ip_address: str, sso_to_matrix_id_mapper: Callable[[int], Awaitable[UserAttributes]], - allow_existing_users: bool = False, + grandfather_existing_users: Optional[Callable[[], Awaitable[Optional[str]]]], ) -> str: """ Given an SSO ID, retrieve the user ID for it and possibly register the user. @@ -125,6 +125,10 @@ class SsoHandler(BaseHandler): if it has that matrix ID is returned regardless of the current mapping logic. + If a callable is provided for grandfathering users, it is called and can + potentially return a matrix ID to use. If it does, the SSO ID is linked to + this matrix ID for subsequent calls. + The mapping function is called (potentially multiple times) to generate a localpart for the user. @@ -132,17 +136,6 @@ class SsoHandler(BaseHandler): given user-agent and IP address and the SSO ID is linked to this matrix ID for subsequent calls. - If allow_existing_users is true the mapping function is only called once - and results in: - - 1. The use of a previously registered matrix ID. In this case, the - SSO ID is linked to the matrix ID. (Note it is possible that - other SSO IDs are linked to the same matrix ID.) - 2. An unused localpart, in which case the user is registered (as - discussed above). - 3. An error if the generated localpart matches multiple pre-existing - matrix IDs. Generally this should not happen. - Args: auth_provider_id: A unique identifier for this SSO provider, e.g. "oidc" or "saml". @@ -152,8 +145,9 @@ class SsoHandler(BaseHandler): sso_to_matrix_id_mapper: A callable to generate the user attributes. The only parameter is an integer which represents the amount of times the returned mxid localpart mapping has failed. - allow_existing_users: True if the localpart returned from the - mapping provider can be linked to an existing matrix ID. + grandfather_existing_users: A callable which can return an previously + existing matrix ID. The SSO ID is then linked to the returned + matrix ID. Returns: The user ID associated with the SSO response. @@ -171,6 +165,16 @@ class SsoHandler(BaseHandler): if previously_registered_user_id: return previously_registered_user_id + # Check for grandfathering of users. + if grandfather_existing_users: + previously_registered_user_id = await grandfather_existing_users() + if previously_registered_user_id: + # Future logins should also match this user ID. + await self.store.record_user_external_id( + auth_provider_id, remote_user_id, previously_registered_user_id + ) + return previously_registered_user_id + # Otherwise, generate a new user. for i in range(self._MAP_USERNAME_RETRIES): try: @@ -194,33 +198,7 @@ class SsoHandler(BaseHandler): # Check if this mxid already exists user_id = UserID(attributes.localpart, self.server_name).to_string() - users = await self.store.get_users_by_id_case_insensitive(user_id) - # Note, if allow_existing_users is true then the loop is guaranteed - # to end on the first iteration: either by matching an existing user, - # raising an error, or registering a new user. See the docstring for - # more in-depth an explanation. - if users and allow_existing_users: - # If an existing matrix ID is returned, then use it. - if len(users) == 1: - previously_registered_user_id = next(iter(users)) - elif user_id in users: - previously_registered_user_id = user_id - else: - # Do not attempt to continue generating Matrix IDs. - raise MappingException( - "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( - user_id, users - ) - ) - - # Future logins should also match this user ID. - await self.store.record_user_external_id( - auth_provider_id, remote_user_id, previously_registered_user_id - ) - - return previously_registered_user_id - - elif not users: + if not await self.store.get_users_by_id_case_insensitive(user_id): # This mxid is free break else: diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index c9807a7b73..d485af52fd 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -731,6 +731,14 @@ class OidcHandlerTestCase(HomeserverTestCase): ) self.assertEqual(mxid, "@test_user:test") + # Subsequent calls should map to the same mxid. + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user:test") + # Note that a second SSO user can be mapped to the same Matrix ID. (This # requires a unique sub, but something that maps to the same matrix ID, # in this case we'll just use the same username. A more realistic example diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 79fd47036f..e1e13a5faf 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -16,7 +16,7 @@ import attr from synapse.handlers.sso import MappingException -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config # These are a few constants that are used as config parameters in the tests. BASE_URL = "https://synapse/" @@ -59,6 +59,10 @@ class SamlHandlerTestCase(HomeserverTestCase): "grandfathered_mxid_source_attribute": None, "user_mapping_provider": {"module": __name__ + ".TestMappingProvider"}, } + + # Update this config with what's in the default config so that + # override_config works as expected. + saml_config.update(config.get("saml2_config", {})) config["saml2_config"] = saml_config return config @@ -86,6 +90,34 @@ class SamlHandlerTestCase(HomeserverTestCase): ) self.assertEqual(mxid, "@test_user:test") + @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) + def test_map_saml_response_to_existing_user(self): + """Existing users can log in with SAML account.""" + store = self.hs.get_datastore() + self.get_success( + store.register_user(user_id="@test_user:test", password_hash=None) + ) + + # Map a user via SSO. + saml_response = FakeAuthnResponse( + {"uid": "tester", "mxid": ["test_user"], "username": "test_user"} + ) + redirect_url = "" + mxid = self.get_success( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user:test") + + # Subsequent calls should map to the same mxid. + mxid = self.get_success( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user:test") + def test_map_saml_response_to_invalid_localpart(self): """If the mapping provider generates an invalid localpart it should be rejected.""" saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) -- cgit 1.5.1 From 22c6c19f91d7325c82eddfada696826adad69e5b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 4 Dec 2020 08:25:15 -0500 Subject: Fix a regression that mapping providers should be able to redirect users. (#8878) This was broken in #8801. --- changelog.d/8878.bugfix | 1 + docs/sso_mapping_providers.md | 7 +++++++ synapse/handlers/oidc_handler.py | 2 +- synapse/handlers/sso.py | 27 ++++++++++++++++++++++----- tests/handlers/test_oidc.py | 3 +-- tests/handlers/test_saml.py | 28 ++++++++++++++++++++++++++++ 6 files changed, 60 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8878.bugfix (limited to 'tests') diff --git a/changelog.d/8878.bugfix b/changelog.d/8878.bugfix new file mode 100644 index 0000000000..e53005ee1c --- /dev/null +++ b/changelog.d/8878.bugfix @@ -0,0 +1 @@ +Fix a regression in v1.24.0rc1 which failed to allow SAML mapping providers which were unable to redirect users to an additional page. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index dee53b5d40..ab2a648910 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -168,6 +168,13 @@ A custom mapping provider must specify the following methods: the value of `mxid_localpart`. * `emails` - A list of emails for the new user. If not provided, will default to an empty list. + + Alternatively it can raise a `synapse.api.errors.RedirectException` to + redirect the user to another page. This is useful to prompt the user for + additional information, e.g. if you want them to provide their own username. + It is the responsibility of the mapping provider to either redirect back + to `client_redirect_url` (including any additional information) or to + complete registration using methods from the `ModuleApi`. ### Default SAML Mapping Provider diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 55c4377890..c605f7082a 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -888,7 +888,7 @@ class OidcHandler(BaseHandler): # continue to already be in use. Note that the error raised is # arbitrary and will get turned into a MappingException. if failures: - raise RuntimeError( + raise MappingException( "Mapping provider does not support de-duplicating Matrix IDs" ) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index f42b90e1bc..47ad96f97e 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional import attr +from synapse.api.errors import RedirectException from synapse.handlers._base import BaseHandler from synapse.http.server import respond_with_html from synapse.types import UserID, contains_invalid_mxid_characters @@ -28,7 +29,9 @@ logger = logging.getLogger(__name__) class MappingException(Exception): - """Used to catch errors when mapping the UserInfo object + """Used to catch errors when mapping an SSO response to user attributes. + + Note that the msg that is raised is shown to end-users. """ @@ -145,6 +148,14 @@ class SsoHandler(BaseHandler): sso_to_matrix_id_mapper: A callable to generate the user attributes. The only parameter is an integer which represents the amount of times the returned mxid localpart mapping has failed. + + It is expected that the mapper can raise two exceptions, which + will get passed through to the caller: + + MappingException if there was a problem mapping the response + to the user. + RedirectException to redirect to an additional page (e.g. + to prompt the user for more information). grandfather_existing_users: A callable which can return an previously existing matrix ID. The SSO ID is then linked to the returned matrix ID. @@ -154,8 +165,8 @@ class SsoHandler(BaseHandler): Raises: MappingException if there was a problem mapping the response to a user. - RedirectException: some mapping providers may raise this if they need - to redirect to an interstitial page. + RedirectException: if the mapping provider needs to redirect the user + to an additional page. (e.g. to prompt for more information) """ # first of all, check if we already have a mapping for this user @@ -179,10 +190,16 @@ class SsoHandler(BaseHandler): for i in range(self._MAP_USERNAME_RETRIES): try: attributes = await sso_to_matrix_id_mapper(i) + except (RedirectException, MappingException): + # Mapping providers are allowed to issue a redirect (e.g. to ask + # the user for more information) and can issue a mapping exception + # if a name cannot be generated. + raise except Exception as e: + # Any other exception is unexpected. raise MappingException( - "Could not extract user attributes from SSO response: " + str(e) - ) + "Could not extract user attributes from SSO response." + ) from e logger.debug( "Retrieved user attributes from user mapping provider: %r (attempt %d)", diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index d485af52fd..a308c46da9 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -705,8 +705,7 @@ class OidcHandlerTestCase(HomeserverTestCase): MappingException, ) self.assertEqual( - str(e.value), - "Could not extract user attributes from SSO response: Mapping provider does not support de-duplicating Matrix IDs", + str(e.value), "Mapping provider does not support de-duplicating Matrix IDs", ) @override_config({"oidc_config": {"allow_existing_users": True}}) diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index e1e13a5faf..45dc17aba5 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -14,6 +14,7 @@ import attr +from synapse.api.errors import RedirectException from synapse.handlers.sso import MappingException from tests.unittest import HomeserverTestCase, override_config @@ -49,6 +50,13 @@ class TestMappingProvider: return {"mxid_localpart": localpart, "displayname": None} +class TestRedirectMappingProvider(TestMappingProvider): + def saml_response_to_user_attributes( + self, saml_response, failures, client_redirect_url + ): + raise RedirectException(b"https://custom-saml-redirect/") + + class SamlHandlerTestCase(HomeserverTestCase): def default_config(self): config = super().default_config() @@ -166,3 +174,23 @@ class SamlHandlerTestCase(HomeserverTestCase): self.assertEqual( str(e.value), "Unable to generate a Matrix ID from the SSO response" ) + + @override_config( + { + "saml2_config": { + "user_mapping_provider": { + "module": __name__ + ".TestRedirectMappingProvider" + }, + } + } + ) + def test_map_saml_response_redirect(self): + saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) + redirect_url = "" + e = self.get_failure( + self.handler._map_saml_response_to_user( + saml_response, redirect_url, "user-agent", "10.10.10.10" + ), + RedirectException, + ) + self.assertEqual(e.value.location, b"https://custom-saml-redirect/") -- cgit 1.5.1