summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--changelog.d/6126.feature1
-rw-r--r--changelog.d/6418.bugfix1
-rw-r--r--changelog.d/6563.bugfix1
-rw-r--r--changelog.d/6657.bugfix1
-rw-r--r--synapse/federation/sender/__init__.py18
-rw-r--r--synapse/federation/sender/per_destination_queue.py15
-rw-r--r--synapse/federation/sender/transaction_manager.py4
-rw-r--r--synapse/federation/transport/client.py4
-rw-r--r--synapse/handlers/message.py2
-rw-r--r--synapse/handlers/room_list.py3
-rw-r--r--synapse/handlers/room_member.py46
-rw-r--r--synapse/handlers/sync.py5
-rw-r--r--synapse/http/federation/well_known_resolver.py4
-rw-r--r--synapse/push/httppusher.py4
-rw-r--r--synapse/replication/tcp/streams/_base.py2
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py30
-rw-r--r--synapse/storage/data_stores/main/__init__.py4
-rw-r--r--synapse/storage/data_stores/main/client_ips.py2
-rw-r--r--synapse/storage/data_stores/main/search.py2
-rw-r--r--tests/rest/admin/test_admin.py41
-rw-r--r--tests/rest/key/__init__.py0
-rw-r--r--tests/rest/key/v2/__init__.py0
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py135
23 files changed, 276 insertions, 49 deletions
diff --git a/changelog.d/6126.feature b/changelog.d/6126.feature
new file mode 100644

index 0000000000..1207ba6206 --- /dev/null +++ b/changelog.d/6126.feature
@@ -0,0 +1 @@ +Group events into larger federation transactions at times of high traffic. diff --git a/changelog.d/6418.bugfix b/changelog.d/6418.bugfix new file mode 100644
index 0000000000..a1f488d3a2 --- /dev/null +++ b/changelog.d/6418.bugfix
@@ -0,0 +1 @@ +Fix phone home stats reporting. diff --git a/changelog.d/6563.bugfix b/changelog.d/6563.bugfix new file mode 100644
index 0000000000..3325fb1dcf --- /dev/null +++ b/changelog.d/6563.bugfix
@@ -0,0 +1 @@ +Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. \ No newline at end of file diff --git a/changelog.d/6657.bugfix b/changelog.d/6657.bugfix new file mode 100644
index 0000000000..94e51a9896 --- /dev/null +++ b/changelog.d/6657.bugfix
@@ -0,0 +1 @@ +Fix incorrect signing of responses from the key server implementation. \ No newline at end of file diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 4ebb0e8bc0..f7065517e5 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py
@@ -152,9 +152,24 @@ class FederationSender(object): @defer.inlineCallbacks def _process_event_queue_loop(self): + loop_start_time = self.clock.time_msec() try: self._is_processing = True while True: + # if we've been going around this loop for a long time without + # catching up, deprioritise transaction transmission. This should mean + # that events get batched into fewer transactions, which is more + # efficient, and hence give us a chance to catch up + if ( + self.clock.time_msec() - loop_start_time > 60 * 1000 + and not self._transaction_manager.deprioritise_transmission + ): + logger.warning( + "Event queue is getting behind: deprioritising transaction " + "transmission" + ) + self._transaction_manager.deprioritise_transmission = True + last_token = yield self.store.get_federation_out_pos("events") next_token, events = yield self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 @@ -252,6 +267,9 @@ class FederationSender(object): finally: self._is_processing = False + if self._transaction_manager.deprioritise_transmission: + logger.info("Event queue caught up: re-prioritising transmission") + self._transaction_manager.deprioritise_transmission = False def _send_pdu(self, pdu, destinations): # We loop through all destinations to see whether we already have diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index a5b36b1827..a7c296e880 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py
@@ -15,6 +15,7 @@ # limitations under the License. import datetime import logging +import random from prometheus_client import Counter @@ -36,6 +37,8 @@ from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter # This is defined in the Matrix spec and enforced by the receiver. MAX_EDUS_PER_TRANSACTION = 100 +DEPRIORITISE_SLEEP_TIME = 10 + logger = logging.getLogger(__name__) @@ -189,6 +192,18 @@ class PerDestinationQueue(object): pending_pdus = [] while True: + if self._transaction_manager.deprioritise_transmission: + # if the event-processing loop has got behind, sleep to give it + # a chance to catch up. Add some randomness so that the transmitters + # don't all wake up in sync. + sleeptime = random.uniform( + DEPRIORITISE_SLEEP_TIME, DEPRIORITISE_SLEEP_TIME * 2 + ) + logger.info( + "TX [%s]: sleeping for %f seconds", self._destination, sleeptime + ) + yield self._clock.sleep(sleeptime) + # We have to keep 2 free slots for presence and rr_edus limit = MAX_EDUS_PER_TRANSACTION - 2 diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 5fed626d5b..ca558fa242 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py
@@ -49,6 +49,10 @@ class TransactionManager(object): # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) + # the federation sender sometimes sets this to delay transaction transmission, + # if the sender gets behind. + self.deprioritise_transmission = False + @measure_func("_send_new_transaction") @defer.inlineCallbacks def send_new_transaction(self, destination, pending_pdus, pending_edus): diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 198257414b..e8bbf4816e 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py
@@ -151,7 +151,7 @@ class TransportLayerClient(object): # generated by the json_data_callback. json_data = transaction.get_dict() - path = _create_v1_path("/send/%s", transaction.transaction_id) + path = _create_v1_path("/send/%s/", transaction.transaction_id) response = yield self.client.put_json( transaction.destination, @@ -160,7 +160,7 @@ class TransportLayerClient(object): json_data_callback=json_data_callback, long_retries=True, backoff_on_404=True, # If we get a 404 the other side has gone - try_trailing_slash_on_400=True, + # try_trailing_slash_on_400=True, ) return response diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 8ea3aca2f4..ce4fd6d2cb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py
@@ -229,7 +229,7 @@ class MessageHandler(object): # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there # is a user in the room that the AS is "interested in" - if requester.app_service and user_id not in users_with_profile: + if False and requester.app_service and user_id not in users_with_profile: for uid in users_with_profile: if requester.app_service.is_interested_in_user(uid): break diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index c615206df1..2252a86f77 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py
@@ -43,7 +43,8 @@ class RoomListHandler(BaseHandler): def __init__(self, hs): super(RoomListHandler, self).__init__(hs) self.enable_room_list_search = hs.config.enable_room_list_search - self.response_cache = ResponseCache(hs, "room_list") + + self.response_cache = ResponseCache(hs, "room_list", timeout_ms=10 * 60 * 1000) self.remote_response_cache = ResponseCache( hs, "remote_room_list", timeout_ms=30 * 1000 ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 03bb52ccfb..28097f90c0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py
@@ -62,6 +62,7 @@ class RoomMemberHandler(object): self.event_creation_handler = hs.get_event_creation_handler() self.member_linearizer = Linearizer(name="member") + self.member_limiter = Linearizer(max_count=10, name="member_as_limiter") self.clock = hs.get_clock() self.spam_checker = hs.get_spam_checker() @@ -269,19 +270,38 @@ class RoomMemberHandler(object): ): key = (room_id,) - with (yield self.member_linearizer.queue(key)): - result = yield self._update_membership( - requester, - target, - room_id, - action, - txn_id=txn_id, - remote_room_hosts=remote_room_hosts, - third_party_signed=third_party_signed, - ratelimit=ratelimit, - content=content, - require_consent=require_consent, - ) + as_id = object() + if requester.app_service: + as_id = requester.app_service.id + + then = self.clock.time_msec() + + with (yield self.member_limiter.queue(as_id)): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + + with (yield self.member_linearizer.queue(key)): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + + result = yield self._update_membership( + requester, + target, + room_id, + action, + txn_id=txn_id, + remote_room_hosts=remote_room_hosts, + third_party_signed=third_party_signed, + ratelimit=ratelimit, + content=content, + require_consent=require_consent, + ) return result diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2d3b8ba73c..ca5eb04735 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py
@@ -40,6 +40,7 @@ logger = logging.getLogger(__name__) # Debug logger for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger = logging.getLogger("synapse.handler.sync.4422_debug") +SYNC_RESPONSE_CACHE_MS = 2 * 60 * 1000 # Counts the number of times we returned a non-empty sync. `type` is one of # "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is @@ -225,7 +226,9 @@ class SyncHandler(object): self.presence_handler = hs.get_presence_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() - self.response_cache = ResponseCache(hs, "sync") + self.response_cache = ResponseCache( + hs, "sync", timeout_ms=SYNC_RESPONSE_CACHE_MS + ) self.state = hs.get_state_handler() self.auth = hs.get_auth() self.storage = hs.get_storage() diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py
index 7ddfad286d..b82c8a84f4 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py
@@ -103,6 +103,10 @@ class WellKnownResolver(object): Returns: Deferred[WellKnownLookupResult]: The result of the lookup """ + + if server_name == b"kde.org": + return WellKnownLookupResult(delegated_server=b"kde.modular.im:443") + try: prev_result, expiry, ttl = self._well_known_cache.get_with_expiry( server_name diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index d0879b0490..afa9ef31bf 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py
@@ -103,6 +103,10 @@ class HttpPusher(object): if "url" not in self.data: raise PusherConfigException("'url' required in data for HTTP pusher") self.url = self.data["url"] + self.url = self.url.replace( + "https://matrix.org/_matrix/push/v1/notify", + "http://10.103.0.7/_matrix/push/v1/notify", + ) self.http_client = hs.get_proxied_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 8512923eae..1fff1f4374 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py
@@ -24,7 +24,7 @@ from twisted.internet import defer logger = logging.getLogger(__name__) -MAX_EVENTS_BEHIND = 10000 +MAX_EVENTS_BEHIND = 500000 BackfillStreamRow = namedtuple( "BackfillStreamRow", diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index bf5e0eb844..e7fc3f0431 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -15,7 +15,6 @@ import logging from canonicaljson import encode_canonical_json, json -from signedjson.key import encode_verify_key_base64 from signedjson.sign import sign_json from twisted.internet import defer @@ -217,28 +216,15 @@ class RemoteKey(DirectServeResource): if cache_misses and query_remote_on_cache_miss: yield self.fetcher.get_keys(cache_misses) yield self.query_keys(request, query, query_remote_on_cache_miss=False) - return - - signed_keys = [] - for key_json in json_results: - key_json = json.loads(key_json) - - # backwards-compatibility hack for #6596: if the requested key belongs - # to us, make sure that all of the signing keys appear in the - # "verify_keys" section. - if key_json["server_name"] == self.config.server_name: - verify_keys = key_json["verify_keys"] + else: + signed_keys = [] + for key_json in json_results: + key_json = json.loads(key_json) for signing_key in self.config.key_server_signing_keys: - key_id = "%s:%s" % (signing_key.alg, signing_key.version) - verify_keys[key_id] = { - "key": encode_verify_key_base64(signing_key.verify_key) - } - - for signing_key in self.config.key_server_signing_keys: - key_json = sign_json(key_json, self.config.server_name, signing_key) + key_json = sign_json(key_json, self.config.server_name, signing_key) - signed_keys.append(key_json) + signed_keys.append(key_json) - results = {"server_keys": signed_keys} + results = {"server_keys": signed_keys} - respond_with_json_bytes(request, 200, encode_canonical_json(results)) + respond_with_json_bytes(request, 200, encode_canonical_json(results)) diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index c577c0df5f..2700cca822 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py
@@ -526,9 +526,9 @@ class DataStore( attr_filter = {} if not guests: - attr_filter["is_guest"] = False + attr_filter["is_guest"] = 0 if not deactivated: - attr_filter["deactivated"] = False + attr_filter["deactivated"] = 0 return self.db.simple_select_list_paginate( desc="get_users_paginate", diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index 13f4c9c72e..68f6a85a83 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py
@@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120 * 1000 +LAST_SEEN_GRANULARITY = 10 * 60 * 1000 class ClientIpBackgroundUpdateStore(SQLBaseStore): diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index 47ebb8a214..2c50e04515 100644 --- a/synapse/storage/data_stores/main/search.py +++ b/synapse/storage/data_stores/main/search.py
@@ -725,7 +725,7 @@ def _parse_query(database_engine, search_term): results = re.findall(r"([\w\-]+)", search_term, re.UNICODE) if isinstance(database_engine, PostgresEngine): - return " & ".join(result + ":*" for result in results) + return " & ".join(result for result in results) elif isinstance(database_engine, Sqlite3Engine): return " & ".join(result + "*" for result in results) else: diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 0ed2594381..325bd6a608 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py
@@ -341,6 +341,47 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.assertEqual("Invalid user type", channel.json_body["error"]) +class UsersListTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + url = "/_synapse/admin/v2/users" + + def prepare(self, reactor, clock, hs): + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.register_user("user1", "pass1", admin=False) + self.register_user("user2", "pass2", admin=False) + + def test_no_auth(self): + """ + Try to list users without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"]) + + def test_all_users(self): + """ + List all users, including deactivated users. + """ + request, channel = self.make_request( + "GET", + self.url + "?deactivated=true", + b"{}", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(3, len(channel.json_body["users"])) + + class ShutdownRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, diff --git a/tests/rest/key/__init__.py b/tests/rest/key/__init__.py new file mode 100644
index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/rest/key/__init__.py
diff --git a/tests/rest/key/v2/__init__.py b/tests/rest/key/v2/__init__.py new file mode 100644
index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/rest/key/v2/__init__.py
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index d8246b4e78..6776a56cad 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -13,25 +13,30 @@ # See the License for the specific language governing permissions and # limitations under the License. import urllib.parse -from io import BytesIO +from io import BytesIO, StringIO from mock import Mock import signedjson.key +from canonicaljson import encode_canonical_json from nacl.signing import SigningKey from signedjson.sign import sign_json from twisted.web.resource import NoResource +from synapse.crypto.keyring import PerspectivesKeyFetcher from synapse.http.site import SynapseRequest from synapse.rest.key.v2 import KeyApiV2Resource +from synapse.storage.keys import FetchKeyResult from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.stringutils import random_string from tests import unittest from tests.server import FakeChannel, wait_until_result +from tests.utils import default_config -class RemoteKeyResourceTestCase(unittest.HomeserverTestCase): +class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.http_client = Mock() return self.setup_test_homeserver(http_client=self.http_client) @@ -73,6 +78,8 @@ class RemoteKeyResourceTestCase(unittest.HomeserverTestCase): self.http_client.get_json.side_effect = get_json + +class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase): def make_notary_request(self, server_name: str, key_id: str) -> dict: """Send a GET request to the test server requesting the given key. @@ -125,6 +132,126 @@ class RemoteKeyResourceTestCase(unittest.HomeserverTestCase): oursigs = sigs[self.hs.hostname] self.assertEqual(len(oursigs), 2) - # and both keys should be present in the verify_keys section + # the requested key should be present in the verify_keys section self.assertIn("ed25519:ver1", keys[0]["verify_keys"]) - self.assertIn("ed25519:a_lPym", keys[0]["verify_keys"]) + + +class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): + """End-to-end tests of the perspectives fetch case + + The idea here is to actually wire up a PerspectivesKeyFetcher to the notary + endpoint, to check that the two implementations are compatible. + """ + + def default_config(self, *args, **kwargs): + config = super().default_config(*args, **kwargs) + + # replace the signing key with our own + self.hs_signing_key = signedjson.key.generate_signing_key("kssk") + strm = StringIO() + signedjson.key.write_signing_keys(strm, [self.hs_signing_key]) + config["signing_key"] = strm.getvalue() + + return config + + def prepare(self, reactor, clock, homeserver): + # make a second homeserver, configured to use the first one as a key notary + self.http_client2 = Mock() + config = default_config(name="keyclient") + config["trusted_key_servers"] = [ + { + "server_name": self.hs.hostname, + "verify_keys": { + "ed25519:%s" + % ( + self.hs_signing_key.version, + ): signedjson.key.encode_verify_key_base64( + self.hs_signing_key.verify_key + ) + }, + } + ] + self.hs2 = self.setup_test_homeserver( + http_client=self.http_client2, config=config + ) + + # wire up outbound POST /key/v2/query requests from hs2 so that they + # will be forwarded to hs1 + def post_json(destination, path, data): + self.assertEqual(destination, self.hs.hostname) + self.assertEqual( + path, "/_matrix/key/v2/query", + ) + + channel = FakeChannel(self.site, self.reactor) + req = SynapseRequest(channel) + req.content = BytesIO(encode_canonical_json(data)) + + req.requestReceived( + b"POST", path.encode("utf-8"), b"1.1", + ) + wait_until_result(self.reactor, req) + self.assertEqual(channel.code, 200) + resp = channel.json_body + return resp + + self.http_client2.post_json.side_effect = post_json + + def test_get_key(self): + """Fetch a key belonging to a random server""" + # make up a key to be fetched. + testkey = signedjson.key.generate_signing_key("abc") + + # we expect hs1 to make a regular key request to the target server + self.expect_outgoing_key_request("targetserver", testkey) + keyid = "ed25519:%s" % (testkey.version,) + + fetcher = PerspectivesKeyFetcher(self.hs2) + d = fetcher.get_keys({"targetserver": {keyid: 1000}}) + res = self.get_success(d) + self.assertIn("targetserver", res) + keyres = res["targetserver"][keyid] + assert isinstance(keyres, FetchKeyResult) + self.assertEqual( + signedjson.key.encode_verify_key_base64(keyres.verify_key), + signedjson.key.encode_verify_key_base64(testkey.verify_key), + ) + + def test_get_notary_key(self): + """Fetch a key belonging to the notary server""" + # make up a key to be fetched. We randomise the keyid to try to get it to + # appear before the key server signing key sometimes (otherwise we bail out + # before fetching its signature) + testkey = signedjson.key.generate_signing_key(random_string(5)) + + # we expect hs1 to make a regular key request to itself + self.expect_outgoing_key_request(self.hs.hostname, testkey) + keyid = "ed25519:%s" % (testkey.version,) + + fetcher = PerspectivesKeyFetcher(self.hs2) + d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + res = self.get_success(d) + self.assertIn(self.hs.hostname, res) + keyres = res[self.hs.hostname][keyid] + assert isinstance(keyres, FetchKeyResult) + self.assertEqual( + signedjson.key.encode_verify_key_base64(keyres.verify_key), + signedjson.key.encode_verify_key_base64(testkey.verify_key), + ) + + def test_get_notary_keyserver_key(self): + """Fetch the notary's keyserver key""" + # we expect hs1 to make a regular key request to itself + self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key) + keyid = "ed25519:%s" % (self.hs_signing_key.version,) + + fetcher = PerspectivesKeyFetcher(self.hs2) + d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + res = self.get_success(d) + self.assertIn(self.hs.hostname, res) + keyres = res[self.hs.hostname][keyid] + assert isinstance(keyres, FetchKeyResult) + self.assertEqual( + signedjson.key.encode_verify_key_base64(keyres.verify_key), + signedjson.key.encode_verify_key_base64(self.hs_signing_key.verify_key), + )